summaryrefslogtreecommitdiff
path: root/drivers/block/drbd/drbd_main.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-12-17 21:14:23 +0100
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 11:43:35 +0100
commit5a22db8968a69bec835d1ed9a96ab3381719e0c0 (patch)
tree6dd29c3cec008a2f846f54a02cdb139c4ce94be3 /drivers/block/drbd/drbd_main.c
parentf735e3635430c6d1c319664d82b34376e3f9aa17 (diff)
drbd: serialize sending of resync uuid with pending w_send_oos
To improve the latency of IO requests during bitmap exchange, we recently allowed writes while waiting for the bitmap, sending "set out-of-sync" information packets for any newly dirtied bits. We have to make sure that the new resync-uuid does not overtake these "set oos" packets. Once the resync-uuid is received, the sync target starts the resync process, and expects the bitmap to only be cleared, not re-set. If we use this protocol extension, we queue the generation and sending of the resync-uuid on the worker, which naturally serializes with all previously queued packets. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r--drivers/block/drbd/drbd_main.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4da6f11cc82e..2190064d59bd 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1387,6 +1387,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
spin_unlock_irq(&mdev->req_lock);
}
+ /* Became sync source. With protocol >= 96, we still need to send out
+ * the sync uuid now. Need to do that before any drbd_send_state, or
+ * the other side may go "paused sync" before receiving the sync uuids,
+ * which is unexpected. */
+ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+ mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
+ drbd_gen_and_send_sync_uuid(mdev);
+ put_ldev(mdev);
+ }
+
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
@@ -1980,12 +1991,17 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
return _drbd_send_uuids(mdev, 8);
}
-
-int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
+int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
{
struct p_rs_uuid p;
+ u64 uuid;
+
+ D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- p.uuid = cpu_to_be64(val);
+ get_random_bytes(&uuid, sizeof(u64));
+ drbd_uuid_set(mdev, UI_BITMAP, uuid);
+ drbd_md_sync(mdev);
+ p.uuid = cpu_to_be64(uuid);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
(struct p_header80 *)&p, sizeof(p));