mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-10-02 20:59:01 +10:00
block: move elevator queue allocation logic into blk_mq_init_sched
[ Upstream commit49811586be
] In preparation for allocating sched_tags before freezing the request queue and acquiring ->elevator_lock, move the elevator queue allocation logic from the elevator ops ->init_sched callback into blk_mq_init_sched. As elevator_alloc is now only invoked from block layer core, we don't need to export it, so unexport elevator_alloc function. This refactoring provides a centralized location for elevator queue initialization, which makes it easier to store pre-allocated sched_tags in the struct elevator_queue during later changes. Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com> Link: https://lore.kernel.org/r/20250730074614.2537382-2-nilay@linux.ibm.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Stable-dep-of:2d82f3bd89
("blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
4bd3ed125c
commit
1837d92267
@ -7229,22 +7229,16 @@ static void bfq_init_root_group(struct bfq_group *root_group,
|
||||
root_group->sched_data.bfq_class_idle_last_service = jiffies;
|
||||
}
|
||||
|
||||
static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
||||
static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
|
||||
{
|
||||
struct bfq_data *bfqd;
|
||||
struct elevator_queue *eq;
|
||||
unsigned int i;
|
||||
struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
|
||||
if (!bfqd)
|
||||
return -ENOMEM;
|
||||
|
||||
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
|
||||
if (!bfqd) {
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
eq->elevator_data = bfqd;
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
@ -7402,7 +7396,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
||||
|
||||
out_free:
|
||||
kfree(bfqd);
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -475,10 +475,14 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
|
||||
BLKDEV_DEFAULT_RQ);
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
if (blk_mq_is_shared_tags(flags)) {
|
||||
ret = blk_mq_init_sched_shared_tags(q);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_elevator;
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
@ -487,7 +491,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
goto err_free_map_and_rqs;
|
||||
}
|
||||
|
||||
ret = e->ops.init_sched(q, e);
|
||||
ret = e->ops.init_sched(q, eq);
|
||||
if (ret)
|
||||
goto err_free_map_and_rqs;
|
||||
|
||||
@ -508,7 +512,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
err_free_map_and_rqs:
|
||||
blk_mq_sched_free_rqs(q);
|
||||
blk_mq_sched_tags_teardown(q, flags);
|
||||
|
||||
err_put_elevator:
|
||||
kobject_put(&eq->kobj);
|
||||
q->elevator = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
@ -148,7 +148,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
|
||||
|
||||
return eq;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_alloc);
|
||||
|
||||
static void elevator_release(struct kobject *kobj)
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ struct blk_mq_alloc_data;
|
||||
struct blk_mq_hw_ctx;
|
||||
|
||||
struct elevator_mq_ops {
|
||||
int (*init_sched)(struct request_queue *, struct elevator_type *);
|
||||
int (*init_sched)(struct request_queue *, struct elevator_queue *);
|
||||
void (*exit_sched)(struct elevator_queue *);
|
||||
int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
|
@ -399,20 +399,13 @@ err:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
|
||||
{
|
||||
struct kyber_queue_data *kqd;
|
||||
struct elevator_queue *eq;
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
kqd = kyber_queue_data_alloc(q);
|
||||
if (IS_ERR(kqd)) {
|
||||
kobject_put(&eq->kobj);
|
||||
if (IS_ERR(kqd))
|
||||
return PTR_ERR(kqd);
|
||||
}
|
||||
|
||||
blk_stat_enable_accounting(q);
|
||||
|
||||
|
@ -554,20 +554,14 @@ static void dd_exit_sched(struct elevator_queue *e)
|
||||
/*
|
||||
* initialize elevator private data (deadline_data).
|
||||
*/
|
||||
static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
|
||||
{
|
||||
struct deadline_data *dd;
|
||||
struct elevator_queue *eq;
|
||||
enum dd_prio prio;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
return ret;
|
||||
|
||||
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
|
||||
if (!dd)
|
||||
goto put_eq;
|
||||
return -ENOMEM;
|
||||
|
||||
eq->elevator_data = dd;
|
||||
|
||||
@ -594,10 +588,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
|
||||
q->elevator = eq;
|
||||
return 0;
|
||||
|
||||
put_eq:
|
||||
kobject_put(&eq->kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user