From ab713cdc6f70da62c254c4acf77a0cfcda87b7f5 Mon Sep 17 00:00:00 2001 From: Nate Dailey Date: Thu, 12 Feb 2015 12:02:09 -0500 Subject: md/raid1: round up to bdev_logical_block_size in narrow_write_error This modifies raid1's narrow_write_error to round up block_sectors to the device's logical block size. This prevents sd complaining about "Bad block number requested" for non-512-byte sector disks. Signed-off-by: Nate Dailey Signed-off-by: NeilBrown --- drivers/md/raid1.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5dd0c2e59ab9..4153da5d4011 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2196,7 +2196,8 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) if (rdev->badblocks.shift < 0) return 0; - block_sectors = 1 << rdev->badblocks.shift; + block_sectors = roundup(1 << rdev->badblocks.shift, + bdev_logical_block_size(rdev->bdev) >> 9); sector = r1_bio->sector; sectors = ((sector + block_sectors) & ~(sector_t)(block_sectors - 1)) -- cgit v1.2.3 From f04ebb0be74283b32f539c7f6b84d5cfcaa0ead3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 16 Feb 2015 14:51:54 +1100 Subject: md/raid10: round up to bdev_logical_block_size in narrow_write_error. RAID10 version of earlier fix for RAID1. We must never initiate IO with sizes less that logical_block_size. Signed-off-by: NeilBrown --- drivers/md/raid10.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b8d76b1fba64..a7196c49d15d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2572,7 +2572,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) if (rdev->badblocks.shift < 0) return 0; - block_sectors = 1 << rdev->badblocks.shift; + block_sectors = roundup(1 << rdev->badblocks.shift, + bdev_logical_block_size(rdev->bdev) >> 9); sector = r10_bio->sector; sectors = ((r10_bio->sector + block_sectors) & ~(sector_t)(block_sectors - 1)) -- cgit v1.2.3 From 26ac107378c4742978216be1005b7291b799c7b2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 18 Feb 2015 11:35:14 +1100 Subject: md/raid5: Fix livelock when array is both resyncing and degraded. Commit a7854487cd7128a30a7f4f5259de9f67d5efb95f: md: When RAID5 is dirty, force reconstruct-write instead of read-modify-write. Causes an RCW cycle to be forced even when the array is degraded. A degraded array cannot support RCW as that requires reading all data blocks, and one may be missing. Forcing an RCW when it is not possible causes a live-lock and the code spins, repeatedly deciding to do something that cannot succeed. So change the condition to only force RCW on non-degraded arrays. Reported-by: Manibalan P Bisected-by: Jes Sorensen Tested-by: Jes Sorensen Signed-off-by: NeilBrown Fixes: a7854487cd7128a30a7f4f5259de9f67d5efb95f Cc: stable@vger.kernel.org (v3.7+) --- drivers/md/raid5.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index aa76865b804b..e75d48c0421a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3170,7 +3170,8 @@ static void handle_stripe_dirtying(struct r5conf *conf, * generate correct data from the parity. */ if (conf->max_degraded == 2 || - (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { + (recovery_cp < MaxSector && sh->sector >= recovery_cp && + s->failed == 0)) { /* Calculate the real rcw later - for now make it * look like rcw is cheaper */ -- cgit v1.2.3