summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2021-01-26 16:33:49 +0800
committerDavid Sterba <dsterba@suse.com>2021-02-08 22:59:01 +0100
commit819822107d8837fc3363ceaeb172b981c8600a2b (patch)
tree13e5ef85ffbe8ea9ab2e4320c479cdc65018d376 /fs/btrfs
parent760f991f1428f25fd18b8638004c95f0a2a43b2f (diff)
btrfs: make grab_extent_buffer_from_page() handle subpage case
For subpage case, grab_extent_buffer() can't really get an extent buffer just from btrfs_subpage. We have radix tree lock protecting us from inserting the same eb into the tree. Thus we don't really need to do the extra hassle, just let alloc_extent_buffer() handle the existing eb in radix tree. Now if two ebs are being allocated as the same time, one will fail with -EEIXST when inserting into the radix tree. So for grab_extent_buffer(), just always return NULL for subpage case. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent_io.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e498d496560b..133ff4531472 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5284,10 +5284,19 @@ free_eb:
}
#endif
-static struct extent_buffer *grab_extent_buffer(struct page *page)
+static struct extent_buffer *grab_extent_buffer(
+ struct btrfs_fs_info *fs_info, struct page *page)
{
struct extent_buffer *exists;
+ /*
+ * For subpage case, we completely rely on radix tree to ensure we
+ * don't try to insert two ebs for the same bytenr. So here we always
+ * return NULL and just continue.
+ */
+ if (fs_info->sectorsize < PAGE_SIZE)
+ return NULL;
+
/* Page not yet attached to an extent buffer */
if (!PagePrivate(page))
return NULL;
@@ -5373,7 +5382,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
}
spin_lock(&mapping->private_lock);
- exists = grab_extent_buffer(p);
+ exists = grab_extent_buffer(fs_info, p);
if (exists) {
spin_unlock(&mapping->private_lock);
unlock_page(p);