diff options
| author | Mike Snitzer <snitzer@redhat.com> | 2015-02-26 00:50:28 -0500 |
|---|---|---|
| committer | Mike Snitzer <snitzer@redhat.com> | 2015-03-02 17:47:03 -0500 |
| commit | f975bc0d522d1e57b108bf9e2e0293ee3f78f821 (patch) | |
| tree | 5c8a0fe4f0645137a5c177c9e9f4236e8219d11a | |
| parent | 48c47cfa7711112283eea0a065ef28e62d8526f2 (diff) | |
| download | linux-dm-f975bc0d522d1e57b108bf9e2e0293ee3f78f821.tar.gz | |
dm: impose configurable deadline for dm_request_fn's merge heuristic
Notice: this object is not reachable from any branch.
Otherwise, for sequential workloads, the dm_request_fn can perform
excessive request merging at the expense of increased service time.
Add a per-device sysfs parameter to allow the user to control how long a
request that is a reasonable merge candidate can be queued on the
request queue. The resolution of this request dispatch deadline is in
microseconds (ranging from 1 to 100000 usecs), to set a 0.3ms deadline:
echo 300 > /sys/block/dm-7/dm/rq_based_queue_deadline
This parameter is not applicable to bio-based DM devices so it will only
ever report 0 for them.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Notice: this object is not reachable from any branch.
| -rw-r--r-- | drivers/md/dm-sysfs.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm.c | 51 | ||||
| -rw-r--r-- | drivers/md/dm.h | 4 |
3 files changed, 56 insertions, 1 deletions
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index 1271c31709fd7..cf5f83b4ef37f 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -92,11 +92,13 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) static DM_ATTR_RO(name); static DM_ATTR_RO(uuid); static DM_ATTR_RO(suspended); +static DM_ATTR_RW(rq_based_queue_deadline); static struct attribute *dm_attrs[] = { &dm_attr_name.attr, &dm_attr_uuid.attr, &dm_attr_suspended.attr, + &dm_attr_rq_based_queue_deadline.attr, NULL, }; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3242f4ca796ef..75f695c59645f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -21,6 +21,7 @@ #include <linux/delay.h> #include <linux/wait.h> #include <linux/kthread.h> +#include <linux/ktime.h> #include <linux/elevator.h> /* for rq_end_sector() */ #include <trace/events/block.h> @@ -219,6 +220,8 @@ struct mapped_device { struct task_struct *kworker_task; /* for request-based merge heuristic in dm_request_fn() */ + ktime_t rq_based_queue_deadline; + ktime_t last_rq_start_time; sector_t last_rq_pos; int last_rq_rw; }; @@ -1934,6 +1937,7 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) md->last_rq_pos = rq_end_sector(orig); md->last_rq_rw = rq_data_dir(orig); + md->last_rq_start_time = ktime_get(); /* * Hold the md reference here for the in-flight I/O. @@ -1945,6 +1949,47 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) dm_get(md); } +#define DEF_QUEUE_DEADLINE_USECS 500 /* 0.5 ms */ +#define MAX_QUEUE_DEADLINE_USECS 100000 /* 100 ms */ + +ssize_t dm_attr_rq_based_queue_deadline_show(struct mapped_device *md, char *buf) +{ + return sprintf(buf, "%lu\n", + (unsigned long)ktime_to_us(md->rq_based_queue_deadline)); +} + +ssize_t dm_attr_rq_based_queue_deadline_store(struct mapped_device *md, + const char *buf, size_t count) +{ + int err; + u64 deadline; + + if (!dm_request_based(md)) + return count; + + err = kstrtou64(buf, 10, &deadline); + if (err) + return -EINVAL; + + if (!deadline) + deadline = DEF_QUEUE_DEADLINE_USECS; + else if (deadline > MAX_QUEUE_DEADLINE_USECS) + deadline = MAX_QUEUE_DEADLINE_USECS; + + md->rq_based_queue_deadline = ns_to_ktime(deadline * NSEC_PER_USEC); + + return count; +} + +static bool dm_request_dispatched_before_queue_deadline(struct mapped_device *md) +{ + ktime_t kt_now = ktime_get(); + ktime_t kt_deadline = ktime_add_safe(md->last_rq_start_time, + md->rq_based_queue_deadline); + + return !ktime_after(kt_now, kt_deadline); +} + /* * q->request_fn for request-based dm. * Called with the queue lock held. @@ -1987,7 +2032,8 @@ static void dm_request_fn(struct request_queue *q) continue; } - if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && + if (dm_request_dispatched_before_queue_deadline(md) && + md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) goto delay_and_out; @@ -2527,6 +2573,9 @@ static int dm_init_request_based_queue(struct mapped_device *md) if (!q) return 0; + md->rq_based_queue_deadline = + ns_to_ktime(DEF_QUEUE_DEADLINE_USECS * NSEC_PER_USEC); + md->queue = q; dm_init_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index db495863fa5f4..722fc0f03c768 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -234,4 +234,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen return !maxlen || strlen(result) + 1 >= maxlen; } +ssize_t dm_attr_rq_based_queue_deadline_show(struct mapped_device *md, char *buf); +ssize_t dm_attr_rq_based_queue_deadline_store(struct mapped_device *md, + const char *buf, size_t count); + #endif |
