aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2015-02-24 11:03:22 -0500
committerMike Snitzer <snitzer@redhat.com>2015-02-24 17:44:47 -0500
commit9068f2f15d5fc7a5c9dc6e6963913216ede0c745 (patch)
treebf3e127510adc5d46c180e85267ce9042c4df232
parent41e546935a2f8a2a6797bd108b712b1314ea20a2 (diff)
downloadlinux-dm-9068f2f15d5fc7a5c9dc6e6963913216ede0c745.tar.gz
dm: only run the queue on completion if congested or no requests pending
Notice: this object is not reachable from any branch.
On really fast storage it can be beneficial to delay running the request_queue to allow the elevator more opportunity to merge requests. Otherwise, it has been observed that requests are being sent to q->request_fn much quicker than is ideal on IOPS-bound backends. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Notice: this object is not reachable from any branch.
-rw-r--r--drivers/md/dm.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fc928994028a5..6ffed8004b27c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1021,10 +1021,13 @@ static void end_clone_bio(struct bio *clone, int error)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ int nr_requests_pending;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
+ nr_requests_pending = md_in_flight(md);
+ if (!nr_requests_pending)
wake_up(&md->wait);
/*
@@ -1033,8 +1036,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue)
- blk_run_queue_async(md->queue);
+ if (run_queue) {
+ if (!nr_requests_pending ||
+ (nr_requests_pending >= md->queue->nr_congestion_on))
+ blk_run_queue_async(md->queue);
+ }
/*
* dm_put() must be at the end of this function. See the comment above