summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-12-06 15:56:21 +0000
committerDavid Sterba <dsterba@suse.com>2020-12-18 14:59:53 +0100
commit1ea2872fc6f2aaee0a4b4f1578b83ffd9f55c6a7 (patch)
tree7aed7e743d59c9efb9e1b2430918426b2bedbd28 /fs
parentea9ed87c73e87e044b2c58d658eb4ba5216bc488 (diff)
btrfs: fix racy access to discard_ctl data
Because only one discard worker may be running at any given point, it could have been safe to modify ->prev_discard, etc. without synchronization, if not for @override flag in btrfs_discard_schedule_work() and delayed_work_pending() returning false while workfn is running. That may lead to torn reads of u64 for some architectures, but that's not a big problem as only slightly affects the discard rate. Suggested-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/discard.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 36431d7e1334..d641f451f840 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -477,13 +477,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
discard_ctl->discard_extent_bytes += trimmed;
}
- /*
- * Updated without locks as this is inside the workfn and nothing else
- * is reading the values
- */
- discard_ctl->prev_discard = trimmed;
- discard_ctl->prev_discard_time = ktime_get_ns();
-
/* Determine next steps for a block_group */
if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -499,7 +492,10 @@ static void btrfs_discard_workfn(struct work_struct *work)
}
}
+ now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
+ discard_ctl->prev_discard = trimmed;
+ discard_ctl->prev_discard_time = now;
discard_ctl->block_group = NULL;
spin_unlock(&discard_ctl->lock);