summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-18 09:51:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-18 09:51:48 -0700
commit3039fadf2bfdc104dc963820c305778c7c1a6229 (patch)
tree1adbef5e37da3f3feece08b8c7dfa6e1bab4ce74
parentc332f3a70e7a094e4a60d68a2c4c6f051ed7f04d (diff)
parent07301df7d2fc220d3de5f7ad804dcb941400cb00 (diff)
Merge tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "Two fixes that popped up during testing: - fix for sysfs-related code that adds/removes block groups, warnings appear during several fstests in connection with sysfs updates in 5.3, the fix essentially replaces a workaround with scope NOFS and applies to 5.2-based branch too - add sanity check of trim range" * tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: trim: Check the range passed into to prevent overflow Btrfs: fix sysfs warning and missing raid sysfs directories
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c71
-rw-r--r--fs/btrfs/volumes.c13
4 files changed, 35 insertions, 55 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..94660063a162 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
struct raid_kobject {
u64 flags;
struct kobject kobj;
- struct list_head list;
};
/*
@@ -915,8 +914,6 @@ struct btrfs_fs_info {
u32 thread_pool_size;
struct kobject *space_info_kobj;
- struct list_head pending_raid_kobjs;
- spinlock_t pending_raid_kobjs_lock; /* uncontended */
u64 total_pinned;
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type, u64 chunk_offset,
u64 size);
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f7ee70b3d1a..97beb351a10c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_roots);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
- INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
- spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
return 0;
}
-/* link_block_group will queue up kobjects to add when we're reclaim-safe */
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *space_info;
- struct raid_kobject *rkobj;
- LIST_HEAD(list);
- int ret = 0;
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_splice_init(&fs_info->pending_raid_kobjs, &list);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
-
- list_for_each_entry(rkobj, &list, list) {
- space_info = btrfs_find_space_info(fs_info, rkobj->flags);
-
- ret = kobject_add(&rkobj->kobj, &space_info->kobj,
- "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
- if (ret) {
- kobject_put(&rkobj->kobj);
- break;
- }
- }
- if (ret)
- btrfs_warn(fs_info,
- "failed to add kobject for block cache, ignoring");
-}
-
static void link_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
up_write(&space_info->groups_sem);
if (first) {
- struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
+ struct raid_kobject *rkobj;
+ unsigned int nofs_flag;
+ int ret;
+
+ /*
+ * Setup a NOFS context because kobject_add(), deep in its call
+ * chain, does GFP_KERNEL allocations, and we are often called
+ * in a context where if reclaim is triggered we can deadlock
+ * (we are either holding a transaction handle or some lock
+ * required for a transaction commit).
+ */
+ nofs_flag = memalloc_nofs_save();
+ rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
if (!rkobj) {
+ memalloc_nofs_restore(nofs_flag);
btrfs_warn(cache->fs_info,
"couldn't alloc memory for raid level kobject");
return;
}
rkobj->flags = cache->flags;
kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
+ ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
+ btrfs_bg_type_to_raid_name(rkobj->flags));
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ kobject_put(&rkobj->kobj);
+ btrfs_warn(fs_info,
+ "failed to add kobject for block cache, ignoring");
+ return;
+ }
space_info->block_group_kobjs[index] = &rkobj->kobj;
}
}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
inc_block_group_ro(cache, 1);
}
- btrfs_add_raid_kobjects(info);
btrfs_init_global_block_rsv(info);
ret = check_chunk_block_group_mappings(info);
error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
+ u64 range_end = U64_MAX;
u64 start;
u64 end;
u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ /*
+ * Check range overflow if range->len is set.
+ * The default range->len is U64_MAX.
+ */
+ if (range->len != U64_MAX &&
+ check_add_overflow(range->start, range->len, &range_end))
+ return -EINVAL;
+
cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = next_block_group(cache)) {
- if (cache->key.objectid >= (range->start + range->len)) {
+ if (cache->key.objectid >= range_end) {
btrfs_put_block_group(cache);
break;
}
start = max(range->start, cache->key.objectid);
- end = min(range->start + range->len,
- cache->key.objectid + cache->key.offset);
+ end = min(range_end, cache->key.objectid + cache->key.offset);
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d74b74ca07af..a447d3ec48d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
if (ret)
return ret;
- /*
- * We add the kobjects here (and after forcing data chunk creation)
- * since relocation is the only place we'll create chunks of a new
- * type at runtime. The only place where we'll remove the last
- * chunk of a type is the call immediately below this one. Even
- * so, we're protected against races with the cleaner thread since
- * we're covered by the delete_unused_bgs_mutex.
- */
- btrfs_add_raid_kobjects(fs_info);
-
trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset);
if (IS_ERR(trans)) {
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
btrfs_end_transaction(trans);
if (ret < 0)
return ret;
-
- btrfs_add_raid_kobjects(fs_info);
-
return 1;
}
}