md/raid5: only add to wq if reshape is in progress
Now that actual overlaps are not handled on the wait_for_overlap wq anymore, the remaining cases when we wait on this wq are limited to reshape. If reshape is not in progress, don't add to the wq in raid5_make_request() because add_wait_queue() / remove_wait_queue() operations take a spinlock and cause noticeable contention when multiple threads are submitting requests to the mddev. Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com> Link: https://lore.kernel.org/r/20240827153536.6743-3-artur.paszkiewicz@intel.com Signed-off-by: Song Liu <song@kernel.org>
This commit is contained in:
parent
e6a03207b9
commit
0e4aac7366
1 changed files with 10 additions and 3 deletions
|
@ -6070,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
|
|||
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
bool on_wq;
|
||||
struct r5conf *conf = mddev->private;
|
||||
sector_t logical_sector;
|
||||
struct stripe_request_ctx ctx = {};
|
||||
|
@ -6143,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||
* sequential IO pattern. We don't bother with the optimization when
|
||||
* reshaping as the performance benefit is not worth the complexity.
|
||||
*/
|
||||
if (likely(conf->reshape_progress == MaxSector))
|
||||
if (likely(conf->reshape_progress == MaxSector)) {
|
||||
logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
|
||||
on_wq = false;
|
||||
} else {
|
||||
add_wait_queue(&conf->wait_for_overlap, &wait);
|
||||
on_wq = true;
|
||||
}
|
||||
s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
|
||||
|
||||
add_wait_queue(&conf->wait_for_overlap, &wait);
|
||||
while (1) {
|
||||
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
|
||||
bi);
|
||||
|
@ -6158,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||
continue;
|
||||
|
||||
if (res == STRIPE_SCHEDULE_AND_RETRY) {
|
||||
WARN_ON_ONCE(!on_wq);
|
||||
/*
|
||||
* Must release the reference to batch_last before
|
||||
* scheduling and waiting for work to be done,
|
||||
|
@ -6182,7 +6188,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||
logical_sector = ctx.first_sector +
|
||||
(s << RAID5_STRIPE_SHIFT(conf));
|
||||
}
|
||||
remove_wait_queue(&conf->wait_for_overlap, &wait);
|
||||
if (unlikely(on_wq))
|
||||
remove_wait_queue(&conf->wait_for_overlap, &wait);
|
||||
|
||||
if (ctx.batch_last)
|
||||
raid5_release_stripe(ctx.batch_last);
|
||||
|
|
Loading…
Reference in a new issue