summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-table.c29
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c61
5 files changed, 93 insertions, 52 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 621a272a2c74..7e65bad522cb 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1234,7 +1234,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
case 0:
bitmap_file_set_bit(bitmap, offset);
bitmap_count_page(bitmap,offset, 1);
- blk_plug_device(bitmap->mddev->queue);
+ blk_plug_device_unlocked(bitmap->mddev->queue);
/* fall through */
case 1:
*bmc = 2;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 798e468103b8..61f441409234 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -316,29 +316,12 @@ static inline int check_space(struct dm_table *t)
*/
static int lookup_device(const char *path, dev_t *dev)
{
- int r;
- struct nameidata nd;
- struct inode *inode;
-
- if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
- return r;
-
- inode = nd.path.dentry->d_inode;
- if (!inode) {
- r = -ENOENT;
- goto out;
- }
-
- if (!S_ISBLK(inode->i_mode)) {
- r = -ENOTBLK;
- goto out;
- }
-
- *dev = inode->i_rdev;
-
- out:
- path_put(&nd.path);
- return r;
+ struct block_device *bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ *dev = bdev->bd_dev;
+ bdput(bdev);
+ return 0;
}
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c2ff77ccec50..8cfadc5bd2ba 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2393,6 +2393,8 @@ static void analyze_sbs(mddev_t * mddev)
}
+static void md_safemode_timeout(unsigned long data);
+
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
@@ -2432,9 +2434,12 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
if (msec == 0)
mddev->safemode_delay = 0;
else {
+ unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
+ if (mddev->safemode_delay < old_delay)
+ md_safemode_timeout((unsigned long)mddev);
}
return len;
}
@@ -3483,7 +3488,7 @@ static void md_safemode_timeout(unsigned long data)
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags);
}
md_wakeup_thread(mddev->thread);
}
@@ -4634,6 +4639,11 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->bitmap)
+ /* Sorry, cannot grow a bitmap yet, just remove it,
+ * grow, and re-add.
+ */
+ return -EBUSY;
rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
@@ -5993,10 +6003,11 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded) {
+ if (mddev->degraded && ! mddev->ro) {
rdev_for_each(rdev, rtmp, mddev) {
if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags))
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Blocked, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
@@ -6051,6 +6062,9 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
bitmap_daemon_work(mddev->bitmap);
+ if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags))
+ sysfs_notify(&mddev->kobj, NULL, "array_state");
+
if (mddev->ro)
return;
@@ -6063,6 +6077,8 @@ void md_check_recovery(mddev_t *mddev)
flush_signals(current);
}
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
if ( ! (
(mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
@@ -6076,6 +6092,15 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (mddev->ro) {
+ /* Only thing we do on a ro array is remove
+ * failed devices.
+ */
+ remove_and_add_spares(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
@@ -6113,7 +6138,8 @@ void md_check_recovery(mddev_t *mddev)
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
@@ -6165,6 +6191,7 @@ void md_check_recovery(mddev_t *mddev)
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -6228,7 +6255,11 @@ static int md_notify_reboot(struct notifier_block *this,
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
- do_md_stop (mddev, 1, 0);
+ /* Force a switch to readonly even array
+ * appears to still be in use. Hence
+ * the '100'.
+ */
+ do_md_stop (mddev, 1, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 159535d73567..e34cd0e62473 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,11 +76,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
-#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+/* amount of memory to reserve for resync requests */
+#define RESYNC_WINDOW (1024*1024)
+/* maximum number of concurrent requests, memory permitting */
+#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
/*
* When performing a resync, we need to read and compare, so
@@ -215,6 +217,9 @@ static void reschedule_retry(r10bio_t *r10_bio)
conf->nr_queued ++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ /* wake up frozen array... */
+ wake_up(&conf->wait_barrier);
+
md_wakeup_thread(mddev->thread);
}
@@ -687,7 +692,6 @@ static int flush_pending_writes(conf_t *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
static void raise_barrier(conf_t *conf, int force)
{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 55e7c56045a0..224de022e7c5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2507,7 +2507,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
*
*/
-static void handle_stripe5(struct stripe_head *sh)
+static bool handle_stripe5(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
@@ -2568,10 +2568,10 @@ static void handle_stripe5(struct stripe_head *sh)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2588,8 +2588,14 @@ static void handle_stripe5(struct stripe_head *sh)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -2717,10 +2723,11 @@ static void handle_stripe5(struct stripe_head *sh)
if (sh->reconstruct_state == reconstruct_state_result) {
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
- for (i = conf->raid_disks; i--; )
+ for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
- set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
+ }
}
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
@@ -2754,9 +2761,11 @@ static void handle_stripe5(struct stripe_head *sh)
ops_run_io(sh, &s);
return_io(return_bi);
+
+ return blocked_rdev == NULL;
}
-static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
@@ -2829,10 +2838,10 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2850,9 +2859,16 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -2967,14 +2983,17 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
ops_run_io(sh, &s);
return_io(return_bi);
+
+ return blocked_rdev == NULL;
}
-static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+/* returns true if the stripe was handled */
+static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
{
if (sh->raid_conf->level == 6)
- handle_stripe6(sh, tmp_page);
+ return handle_stripe6(sh, tmp_page);
else
- handle_stripe5(sh);
+ return handle_stripe5(sh);
}
@@ -3692,7 +3711,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
clear_bit(STRIPE_INSYNC, &sh->state);
spin_unlock(&sh->lock);
- handle_stripe(sh, NULL);
+ /* wait for any blocked device to be handled */
+ while(unlikely(!handle_stripe(sh, NULL)))
+ ;
release_stripe(sh);
return STRIPE_SECTORS;
@@ -3811,10 +3832,8 @@ static void raid5d(mddev_t *mddev)
sh = __get_priority_stripe(conf);
- if (!sh) {
- async_tx_issue_pending_all();
+ if (!sh)
break;
- }
spin_unlock_irq(&conf->device_lock);
handled++;
@@ -3827,6 +3846,7 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
+ async_tx_issue_pending_all();
unplug_slaves(mddev);
pr_debug("--- raid5d inactive\n");
@@ -4439,6 +4459,9 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL; /* Cannot shrink array or change level yet */
if (mddev->delta_disks == 0)
return 0; /* nothing to do */
+ if (mddev->bitmap)
+ /* Cannot grow a bitmap yet */
+ return -EBUSY;
/* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress