From c76541a932113faa5b3be65a3f0a3d4ebbc86aeb Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 19 Dec 2014 17:54:13 -0700 Subject: [PATCH 01/18] blk-mq: Exit queue on alloc failure Fixes usage counter when a request could not be allocated. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab5641227..bfe0f1f9cfa0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -258,8 +258,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ctx = alloc_data.ctx; } blk_mq_put_ctx(ctx); - if (!rq) + if (!rq) { + blk_mq_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); + } return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); From b4c6a028774bcf3f20ed1e66c27a05aa51a8cf55 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 19 Dec 2014 17:54:14 -0700 Subject: [PATCH 02/18] blk-mq: Export freeze/unfreeze functions Let drivers prevent entering a queue that isn't available. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 ++++-- include/linux/blk-mq.h | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index bfe0f1f9cfa0..97ebb84b5633 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -static void blk_mq_freeze_queue_start(struct request_queue *q) +void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) blk_mq_run_queues(q, false); } } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { @@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) blk_mq_freeze_queue_wait(q); } -static void blk_mq_unfreeze_queue(struct request_queue *q) +void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -149,6 +150,7 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) wake_up_all(&q->mq_freeze_wq); } } +EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9ab2e4e..3b43f509432c 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -212,6 +212,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, void *priv); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_mq_freeze_queue_start(struct request_queue *q); /* * Driver command data is immediately after the request. So subtract request From 2b25d981790b830f0e045881386866b970bf9066 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 22 Dec 2014 12:59:04 -0700 Subject: [PATCH 03/18] NVMe: Fix double free irq Sets the vector to an invalid value after it's freed so we don't free it twice. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b1d5d8797315..52d0f2d9fecd 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1131,10 +1131,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) */ static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; + int vector; spin_lock_irq(&nvmeq->q_lock); + if (nvmeq->cq_vector == -1) { + spin_unlock_irq(&nvmeq->q_lock); + return 1; + } + vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; nvmeq->dev->online_queues--; + nvmeq->cq_vector = -1; spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1173,7 +1179,7 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int vector) + int depth) { struct device *dmadev = &dev->pci_dev->dev; struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); @@ -1199,7 +1205,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; - nvmeq->cq_vector = vector; nvmeq->qid = qid; dev->queue_count++; dev->queues[qid] = nvmeq; @@ -1244,6 +1249,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) struct nvme_dev *dev = nvmeq->dev; int result; + nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) return result; @@ -1416,7 +1422,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (!nvmeq) return -ENOMEM; } @@ -1443,6 +1449,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) goto free_nvmeq; + nvmeq->cq_vector = 0; result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) goto free_tags; @@ -1944,7 +1951,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev) unsigned i; for (i = dev->queue_count; i <= dev->max_qid; i++) - if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) + if (!nvme_alloc_queue(dev, i, dev->q_depth)) break; for (i = dev->online_queues; i <= dev->queue_count - 1; i++) From aed3ea94bdd2ac0a21ed0103d34097e202ee77f6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 22 Dec 2014 14:04:42 -0700 Subject: [PATCH 04/18] block: wake up waiters when a queue is marked dying If it's dying, we can't expect new request to complete and come in an wake up other tasks waiting for requests. So after we have marked it as dying, wake up everybody currently waiting for a request. Once they wake, they will retry their allocation and fail appropriately due to the state of the queue. Tested-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-core.c | 21 ++++++++++++++++++++- block/blk-mq-tag.c | 14 ++++++++++---- block/blk-mq-tag.h | 1 + block/blk-mq.c | 10 ++++++++++ block/blk-mq.h | 1 + 5 files changed, 42 insertions(+), 5 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153a40c2..3ad405571dcc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + blk_set_queue_dying(q); spin_lock_irq(lock); /* diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb9ad1c..60c9d4a93fe4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * Wakeup all potentially sleeping on normal (non-reserved) tags + * Wakeup all potentially sleeping on tags */ -static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { struct blk_mq_bitmap_tags *bt; int i, wake_index; @@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) wake_index = bt_index_inc(wake_index); } + + if (include_reserve) { + bt = &tags->breserved_tags; + if (waitqueue_active(&bt->bs[0].wait)) + wake_up(&bt->bs[0].wait); + } } /* @@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) atomic_dec(&tags->active_queues); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); } /* @@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) * static and should never need resizing. */ bt_update_count(&tags->bitmap_tags, tdepth); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); return 0; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed17ef76..a6fa0fc9d41a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); +extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index 97ebb84b5633..1a41d7aefbd5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -152,6 +152,16 @@ void blk_mq_unfreeze_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); +void blk_mq_wake_waiters(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_wakeup_all(hctx->tags, true); +} + bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { return blk_mq_has_free_tags(hctx->tags); diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e64f79..4f4f943c22c3 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); +void blk_mq_wake_waiters(struct request_queue *q); /* * CPU hotplug helpers From 35b489d32fcc37e8735f41aa794b24cf9d1e74f5 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 2 Jan 2015 14:25:27 +0000 Subject: [PATCH 05/18] block: fix checking return value of blk_mq_init_queue Check IS_ERR_OR_NULL(return value) instead of just return value. Signed-off-by: Ming Lei Reduced to IS_ERR() by me, we never return NULL. Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 2 +- drivers/block/nvme-core.c | 2 +- drivers/block/virtio_blk.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ae9f615382f6..aa2224aa7caa 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -530,7 +530,7 @@ static int null_add_dev(void) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); - if (!nullb->q) { + if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 52d0f2d9fecd..f7d083bb3bd5 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1376,7 +1376,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) return -ENOMEM; dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); - if (!dev->admin_q) { + if (IS_ERR(dev->admin_q)) { blk_mq_free_tag_set(&dev->admin_tagset); return -ENOMEM; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ef7c098708f..cdfbd21e3597 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); - if (!q) { + if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } From 17ded320706c6316376059cfbe9dccab32c62b42 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 7 Jan 2015 10:44:04 -0700 Subject: [PATCH 06/18] blk-mq: get rid of ->cmd_size in the hardware queue We store it in the tag set, we don't need it in the hardware queue. While removing cmd_size, place ->queue_num further down to avoid a hole on 64-bit archs. It's not used in any fast paths, so we can safely move it. Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 - include/linux/blk-mq.h | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1a41d7aefbd5..cbd2a55d69b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1615,7 +1615,6 @@ static int blk_mq_init_hctx(struct request_queue *q, hctx->queue = q; hctx->queue_num = hctx_idx; hctx->flags = set->flags; - hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3b43f509432c..d0de259e92b2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { unsigned long flags; /* BLK_MQ_F_* flags */ struct request_queue *queue; - unsigned int queue_num; struct blk_flush_queue *fq; void *driver_data; @@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned int numa_node; - unsigned int cmd_size; /* per-request extra data */ + unsigned int queue_num; atomic_t nr_active; From 3fd5940cb2e47a61dcff4108f39be4c72cb4dace Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 8 Jan 2015 08:53:56 -0700 Subject: [PATCH 07/18] blk-mq: Wake tasks entering queue on dying When the queue is set to dying, wake up tasks that are waiting on frozen queue so they realize it is dying and abandon their request. Signed-off-by: Keith Busch Modified by me to add a code comment on the need for the wakeup. Signed-off-by: Jens Axboe --- block/blk-mq.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index cbd2a55d69b8..aefed96369dd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -160,6 +160,13 @@ void blk_mq_wake_waiters(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); + + /* + * If we are called because the queue has now been marked as + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ + wake_up_all(&q->mq_freeze_wq); } bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) From 973c01919bce7e3559b62a856b29211ec5ac325c Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:43 -0700 Subject: [PATCH 08/18] blk-mq: Export if requests were started Drivers can iterate over all allocated request tags, but their callback needs a way to know if the driver started the request in the first place. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 ++++++ include/linux/blk-mq.h | 1 + 2 files changed, 7 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index aefed96369dd..ce4811667d36 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -404,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); +int blk_mq_request_started(struct request *rq) +{ + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} +EXPORT_SYMBOL_GPL(blk_mq_request_started); + void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d0de259e92b2..addf8a19d806 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -194,6 +194,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); +int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); void __blk_mq_end_request(struct request *rq, int error); From c68ed59f534c318716c6189050af3c5ea03b8071 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:44 -0700 Subject: [PATCH 09/18] blk-mq: Let drivers cancel requeue_work Kicking requeued requests will start h/w queues in a work_queue, which may alter the driver's requested state to temporarily stop them. This patch exports a method to cancel the q->requeue_work so a driver can be assured stopped h/w queues won't be started up before it is ready. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 ++++++ include/linux/blk-mq.h | 1 + 2 files changed, 7 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index ce4811667d36..e73a5dd89fc4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -527,6 +527,12 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); +void blk_mq_cancel_requeue_work(struct request_queue *q) +{ + cancel_work_sync(&q->requeue_work); +} +EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); + void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_work(&q->requeue_work); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index addf8a19d806..18684e0bdb8a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -201,6 +201,7 @@ void __blk_mq_end_request(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); From 1885b24d23716e09b9c952822b05fd7f68099cdb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 7 Jan 2015 18:55:45 -0700 Subject: [PATCH 10/18] blk-mq: Add helper to abort requeued requests Adds a helper function a driver can use to abort requeued requests in case any are pending when h/w queues are being removed. Signed-off-by: Jens Axboe --- block/blk-mq.c | 20 ++++++++++++++++++++ include/linux/blk-mq.h | 1 + 2 files changed, 21 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index e73a5dd89fc4..261ccd89e15d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -539,6 +539,26 @@ void blk_mq_kick_requeue_list(struct request_queue *q) } EXPORT_SYMBOL(blk_mq_kick_requeue_list); +void blk_mq_abort_requeue_list(struct request_queue *q) +{ + unsigned long flags; + LIST_HEAD(rq_list); + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + while (!list_empty(&rq_list)) { + struct request *rq; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } +} +EXPORT_SYMBOL(blk_mq_abort_requeue_list); + static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 18684e0bdb8a..5735e7130d63 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -203,6 +203,7 @@ void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); From 5b3f25fc343690cafd3e27431a69a7bdaf9df001 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:46 -0700 Subject: [PATCH 11/18] blk-mq: Allow requests to never expire Some types of requests may be started that are not gauranteed to ever complete. This adds a request flag that a driver can use so mark the request as such. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +++- block/blk-timeout.c | 3 +++ include/linux/blk_types.h | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 261ccd89e15d..78324a2f0e10 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -619,7 +619,7 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) break; } } - + static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { @@ -627,6 +627,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) return; + if (rq->cmd_flags & REQ_NO_TIMEOUT) + return; if (time_after_eq(jiffies, rq->deadline)) { if (!blk_mark_rq_complete(rq)) diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c025894cdf..246dfb16c3d9 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) struct request_queue *q = req->q; unsigned long expiry; + if (req->cmd_flags & REQ_NO_TIMEOUT) + return; + /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ if (!q->mq_ops && !q->rq_timed_out_fn) return; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 445d59231bc4..c294e3e25e37 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_HASHED, /* on IO scheduler merge hash */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */ + __REQ_NO_TIMEOUT, /* requests may never expire */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) #endif /* __LINUX_BLK_TYPES_H */ From eb130dbfc40eabcd4e10797310bda6b9f6dd7e76 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 8 Jan 2015 08:59:53 -0700 Subject: [PATCH 12/18] blk-mq: End unstarted requests on a dying queue Requests that haven't been started prior to a queue dying can be ended in error without waiting for them to start and time out. Signed-off-by: Keith Busch Added code comment to explain why this is done. Signed-off-by: Jens Axboe --- block/blk-mq.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 78324a2f0e10..2f95747c287e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -625,8 +625,17 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, { struct blk_mq_timeout_data *data = priv; - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + /* + * If a request wasn't started before the queue was + * marked dying, kill it here or it'll go unnoticed. + */ + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_complete_request(rq); + } return; + } if (rq->cmd_flags & REQ_NO_TIMEOUT) return; From c917dfe52834979610d45022226445d1dc7c67d8 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:48 -0700 Subject: [PATCH 13/18] NVMe: Start all requests Once the nvme callback is set for a request, the driver can start it and make it available for timeout handling. For timed out commands on a device that is not initialized, this fixes potential deadlocks that can occur on startup and shutdown when a device is unresponsive since they can now be cancelled. Asynchronous requests do not have any expected timeout, so these are using the new "REQ_NO_TIMEOUT" request flags. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index f7d083bb3bd5..286fa4cfc937 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx, cmd->fn = handler; cmd->ctx = ctx; cmd->aborted = 0; + blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); } /* Special values must be less than 0x1000 */ @@ -664,8 +665,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } } - blk_mq_start_request(req); - nvme_set_info(cmd, iod, req_completion); spin_lock_irq(&nvmeq->q_lock); if (req->cmd_flags & REQ_DISCARD) @@ -835,6 +834,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) if (IS_ERR(req)) return PTR_ERR(req); + req->cmd_flags |= REQ_NO_TIMEOUT; cmd_info = blk_mq_rq_to_pdu(req); nvme_set_info(cmd_info, req, async_req_completion); @@ -1086,8 +1086,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, nvmeq->qid); - if (nvmeq->dev->initialized) - nvme_abort_req(req); + + if (!nvmeq->dev->initialized) { + /* + * Force cancelled command frees the request, which requires we + * return BLK_EH_NOT_HANDLED. + */ + nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); + return BLK_EH_NOT_HANDLED; + } + nvme_abort_req(req); /* * The aborted req will be completed on receiving the abort req. From ea191d2f36b0f577ce5377c3e72aedc34282969d Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:49 -0700 Subject: [PATCH 14/18] NVMe: Reference count admin queue usage Since there is no gendisk associated with the admin queue, the driver needs to hold a reference to it until all open references to the controller are closed. This also combines queue cleanup with freeing the tag set since these should not be separate. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 286fa4cfc937..beb8d48f8560 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1369,6 +1369,14 @@ static struct blk_mq_ops nvme_mq_ops = { .timeout = nvme_timeout, }; +static void nvme_dev_remove_admin(struct nvme_dev *dev) +{ + if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { + blk_cleanup_queue(dev->admin_q); + blk_mq_free_tag_set(&dev->admin_tagset); + } +} + static int nvme_alloc_admin_tags(struct nvme_dev *dev) { if (!dev->admin_q) { @@ -1388,17 +1396,15 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) blk_mq_free_tag_set(&dev->admin_tagset); return -ENOMEM; } + if (!blk_get_queue(dev->admin_q)) { + nvme_dev_remove_admin(dev); + return -ENODEV; + } } return 0; } -static void nvme_free_admin_tags(struct nvme_dev *dev) -{ - if (dev->admin_q) - blk_mq_free_tag_set(&dev->admin_tagset); -} - static int nvme_configure_admin_queue(struct nvme_dev *dev) { int result; @@ -1465,7 +1471,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) return result; free_tags: - nvme_free_admin_tags(dev); + nvme_dev_remove_admin(dev); free_nvmeq: nvme_free_queues(dev, 0); return result; @@ -2415,12 +2421,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) nvme_dev_unmap(dev); } -static void nvme_dev_remove_admin(struct nvme_dev *dev) -{ - if (dev->admin_q && !blk_queue_dying(dev->admin_q)) - blk_cleanup_queue(dev->admin_q); -} - static void nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns; @@ -2510,6 +2510,7 @@ static void nvme_free_dev(struct kref *kref) nvme_free_namespaces(dev); nvme_release_instance(dev); blk_mq_free_tag_set(&dev->tagset); + blk_put_queue(dev->admin_q); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -2795,7 +2796,6 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dev_shutdown(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); - nvme_free_admin_tags(dev); nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } From 0fb59cbc5f133207535b25ec7d16fba24d549ee2 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:50 -0700 Subject: [PATCH 15/18] NVMe: Admin queue removal handling This protects admin queue access on shutdown. When the controller is disabled, the queue is frozen to prevent new entry, and unfrozen on resume, and fixes cq_vector signedness to not suspend a queue twice. Since unfreezing the queue makes it available for commands, it requires the queue be initialized, so this moves this part after that. Special handling is done when the device is unresponsive during shutdown. This can be optimized to not require subsequent commands to timeout, but saving that fix for later. This patch also removes the kill signals in this path that were left-over artifacts from the blk-mq conversion and no longer necessary. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index beb8d48f8560..5fcb993fc6c9 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1183,6 +1183,8 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) adapter_delete_sq(dev, qid); adapter_delete_cq(dev, qid); } + if (!qid && dev->admin_q) + blk_mq_freeze_queue_start(dev->admin_q); nvme_clear_queue(nvmeq); } @@ -1400,7 +1402,8 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) nvme_dev_remove_admin(dev); return -ENODEV; } - } + } else + blk_mq_unfreeze_queue(dev->admin_q); return 0; } @@ -1459,19 +1462,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) goto free_nvmeq; - result = nvme_alloc_admin_tags(dev); - if (result) - goto free_nvmeq; - nvmeq->cq_vector = 0; result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) - goto free_tags; + goto free_nvmeq; return result; - free_tags: - nvme_dev_remove_admin(dev); free_nvmeq: nvme_free_queues(dev, 0); return result; @@ -2256,13 +2253,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) break; if (!schedule_timeout(ADMIN_TIMEOUT) || fatal_signal_pending(current)) { + /* + * Disable the controller first since we can't trust it + * at this point, but leave the admin queue enabled + * until all queue deletion requests are flushed. + * FIXME: This may take a while if there are more h/w + * queues than admin tags. + */ set_current_state(TASK_RUNNING); - nvme_disable_ctrl(dev, readq(&dev->bar->cap)); - nvme_disable_queue(dev, 0); - - send_sig(SIGKILL, dq->worker->task, 1); + nvme_clear_queue(dev->queues[0]); flush_kthread_worker(dq->worker); + nvme_disable_queue(dev, 0); return; } } @@ -2339,7 +2341,6 @@ static void nvme_del_queue_start(struct kthread_work *work) { struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, cmdinfo.work); - allow_signal(SIGKILL); if (nvme_delete_sq(nvmeq)) nvme_del_queue_end(nvmeq); } @@ -2607,15 +2608,20 @@ static int nvme_dev_start(struct nvme_dev *dev) } nvme_init_queue(dev->queues[0], 0); + result = nvme_alloc_admin_tags(dev); + if (result) + goto disable; result = nvme_setup_io_queues(dev); if (result) - goto disable; + goto free_tags; nvme_set_irq_hints(dev); return result; + free_tags: + nvme_dev_remove_admin(dev); disable: nvme_disable_queue(dev, 0); nvme_dev_list_remove(dev); From cef6a948271d5437f96e731878f2e9cb8c9820b7 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:51 -0700 Subject: [PATCH 16/18] NVMe: Command abort handling fixes Aborts all requeued commands prior to killing the request_queue. For commands that time out on a dying request queue, set the "Do Not Retry" bit on the command status so the command cannot be requeued. Finanally, if the driver is requested to abort a command it did not start, do nothing. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 5fcb993fc6c9..ad9a9b61fc1d 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1064,15 +1064,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, void *ctx; nvme_completion_fn fn; struct nvme_cmd_info *cmd; - static struct nvme_completion cqe = { - .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), - }; + struct nvme_completion cqe; + + if (!blk_mq_request_started(req)) + return; cmd = blk_mq_rq_to_pdu(req); if (cmd->ctx == CMD_CTX_CANCELLED) return; + if (blk_queue_dying(req->q)) + cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); + else + cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); + + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); ctx = cancel_cmd_info(cmd, &fn); @@ -2429,8 +2436,10 @@ static void nvme_dev_remove(struct nvme_dev *dev) list_for_each_entry(ns, &dev->namespaces, list) { if (ns->disk->flags & GENHD_FL_UP) del_gendisk(ns->disk); - if (!blk_queue_dying(ns->queue)) + if (!blk_queue_dying(ns->queue)) { + blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); + } } } From c9d3bf8810514b1d32b49254a8f3485f36380eed Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:52 -0700 Subject: [PATCH 17/18] NVMe: Start and stop h/w queues on reset This freezes and stops all the queues on device shutdown and restarts them on resume. This fixes hotplug and reset issues when the controller is actively being used. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 44 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ad9a9b61fc1d..ff4ff0999f02 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -432,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { + unsigned long flags; + blk_mq_requeue_request(req); - blk_mq_kick_requeue_list(req->q); + spin_lock_irqsave(req->q->queue_lock, flags); + if (!blk_queue_stopped(req->q)) + blk_mq_kick_requeue_list(req->q); + spin_unlock_irqrestore(req->q->queue_lock, flags); return; } req->errors = nvme_error_status(status); @@ -2405,6 +2410,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev) kthread_stop(tmp); } +static void nvme_freeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + blk_mq_freeze_queue_start(ns->queue); + + spin_lock(ns->queue->queue_lock); + queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); + spin_unlock(ns->queue->queue_lock); + + blk_mq_cancel_requeue_work(ns->queue); + blk_mq_stop_hw_queues(ns->queue); + } +} + +static void nvme_unfreeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); + blk_mq_unfreeze_queue(ns->queue); + blk_mq_start_stopped_hw_queues(ns->queue, true); + blk_mq_kick_requeue_list(ns->queue); + } +} + static void nvme_dev_shutdown(struct nvme_dev *dev) { int i; @@ -2413,8 +2446,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) dev->initialized = 0; nvme_dev_list_remove(dev); - if (dev->bar) + if (dev->bar) { + nvme_freeze_queues(dev); csts = readl(&dev->bar->csts); + } if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { for (i = dev->queue_count - 1; i >= 0; i--) { struct nvme_queue *nvmeq = dev->queues[i]; @@ -2670,6 +2705,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) dev->reset_workfn = nvme_remove_disks; queue_work(nvme_workq, &dev->reset_work); spin_unlock(&dev_list_lock); + } else { + nvme_unfreeze_queues(dev); + nvme_set_irq_hints(dev); } dev->initialized = 1; return 0; @@ -2807,8 +2845,8 @@ static void nvme_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); flush_work(&dev->reset_work); misc_deregister(&dev->miscdev); - nvme_dev_remove(dev); nvme_dev_shutdown(dev); + nvme_dev_remove(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); nvme_release_prp_pools(dev); From 7a509a6b07dd5a08d91f8a7e0cccb9a6438ce439 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 7 Jan 2015 18:55:53 -0700 Subject: [PATCH 18/18] NVMe: Fix locking on abort handling The queues and device need to be locked when messing with them. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ff4ff0999f02..cb529e9a82dd 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1021,14 +1021,19 @@ static void nvme_abort_req(struct request *req) struct nvme_command cmd; if (!nvmeq->qid || cmd_rq->aborted) { + unsigned long flags; + + spin_lock_irqsave(&dev_list_lock, flags); if (work_busy(&dev->reset_work)) - return; + goto out; list_del_init(&dev->node); dev_warn(&dev->pci_dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); + out: + spin_unlock_irqrestore(&dev_list_lock, flags); return; } @@ -1096,25 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = cmd->nvmeq; + /* + * The aborted req will be completed on receiving the abort req. + * We enable the timer again. If hit twice, it'll cause a device reset, + * as the device then is in a faulty state. + */ + int ret = BLK_EH_RESET_TIMER; + dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, nvmeq->qid); + spin_lock_irq(&nvmeq->q_lock); if (!nvmeq->dev->initialized) { /* * Force cancelled command frees the request, which requires we * return BLK_EH_NOT_HANDLED. */ nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); - return BLK_EH_NOT_HANDLED; - } - nvme_abort_req(req); + ret = BLK_EH_NOT_HANDLED; + } else + nvme_abort_req(req); + spin_unlock_irq(&nvmeq->q_lock); - /* - * The aborted req will be completed on receiving the abort req. - * We enable the timer again. If hit twice, it'll cause a device reset, - * as the device then is in a faulty state. - */ - return BLK_EH_RESET_TIMER; + return ret; } static void nvme_free_queue(struct nvme_queue *nvmeq)