for-7.0-rc1-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmmYpGMACgkQxWXV+ddt
 WDvKNQ//cy7gb2nItc9hbYXUM/88Ks3Usu94u/4gREL9I97u2vHer6sT8cRfWA2k
 OSOZF2L7yPWIcxcj39YC66ubs7uebSwt/bZAL1TKAyA7wUvR/kdhD7DUTWX4ySf3
 2+1BANv1Bng8C7vGnWDhYPHcb1u8LvKxKcn+9h8SzBGpW5dyx3k4xUrneaMYq+jf
 D9sPjkkM6fxsKn+S3OJP/zFUIQ2DQiv7nF+Jv4Ke2h9c9nCVfn8fRK0AuTlYXFY/
 mWkKWo1ATGVd0fBg/otRp/ZlZczoKs3/1YBUMYTxZZngyweIms4Q6I4/GIGHO+RD
 QFFoIQ7OQd0aqBGhuKTDlYMlc6OS2jwoTgVYr6vSIxSRUsCK/grHdPL+s+9dLc3h
 p7+/URH9Gpfad46wFypb5w7zmmc8jCRkR1Ff+jf6Pi8GgffqocCro3C3HlGRKwcf
 CAj6gI3ypNPNFfYidcKbS+ehXhjmMVb9xhNa8YwCC1CdgM54ZMmEs/ksAN+uBc/u
 EfcAbB3T15LQgzUJs2WKvCI3E/0XUYEi54ng8UwCJ6P01p3egfvQo8t6jZal9Vx8
 ba/LUG50W1xRRjxgG1AU5s42GmGkO8WNyIixmLlT+Pwog0I2auPVDQBudbXZK4ps
 +FOtNnN9hYLmuZyRSTT03MHHf0Rqtckdjvq3413KMFILVh+ZM+Q=
 =CQsu
 -----END PGP SIGNATURE-----

Merge tag 'for-7.0-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - multiple error handling fixes of unexpected conditions

 - reset block group size class once it becomes empty so that
   its class can be changed

 - error message level adjustments

 - fixes of returned error values

 - use correct block reserve for delayed refs

* tag 'for-7.0-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix invalid leaf access in btrfs_quota_enable() if ref key not found
  btrfs: fix lost error return in btrfs_find_orphan_roots()
  btrfs: fix lost return value on error in finish_verity()
  btrfs: change unaligned root messages to error level in btrfs_validate_super()
  btrfs: use the correct type to initialize block reserve for delayed refs
  btrfs: do not ASSERT() when the fs flips RO inside btrfs_repair_io_failure()
  btrfs: reset block group size class when it becomes empty
  btrfs: replace BUG() with error handling in __btrfs_balance()
  btrfs: handle unexpected exact match in btrfs_set_inode_index_count()
This commit is contained in:
Linus Torvalds 2026-02-20 14:57:09 -08:00
commit b3f1da2a4d
10 changed files with 56 additions and 21 deletions

View file

@ -934,7 +934,6 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
struct bio *bio = NULL;
int ret = 0;
ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
BUG_ON(!mirror_num);
/* Basic alignment checks. */
@ -946,6 +945,13 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 fileoff,
ASSERT(step <= length);
ASSERT(is_power_of_2(step));
/*
* The fs either mounted RO or hit critical errors, no need
* to continue repairing.
*/
if (unlikely(sb_rdonly(fs_info->sb)))
return 0;
if (btrfs_repair_one_zone(fs_info, logical))
return 0;

View file

@ -3760,6 +3760,14 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
return ret;
}
static void btrfs_maybe_reset_size_class(struct btrfs_block_group *bg)
{
lockdep_assert_held(&bg->lock);
if (btrfs_block_group_should_use_size_class(bg) &&
bg->used == 0 && bg->reserved == 0)
bg->size_class = BTRFS_BG_SZ_NONE;
}
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc)
{
@ -3824,6 +3832,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
btrfs_maybe_reset_size_class(cache);
btrfs_space_info_update_bytes_pinned(space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
@ -3952,6 +3961,7 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes,
spin_lock(&cache->lock);
bg_ro = cache->ro;
cache->reserved -= num_bytes;
btrfs_maybe_reset_size_class(cache);
if (is_delalloc)
cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock);

View file

@ -276,10 +276,11 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *target = NULL;
/*
* If we are a delayed block reserve then push to the global rsv,
* otherwise dump into the global delayed reserve if it is not full.
* If we are a delayed refs block reserve then push to the global
* reserve, otherwise dump into the global delayed refs reserve if it is
* not full.
*/
if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
if (block_rsv->type == BTRFS_BLOCK_RSV_DELREFS)
target = global_rsv;
else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
target = delayed_rsv;

View file

@ -2416,18 +2416,18 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
/* Root alignment check */
if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
btrfs_warn(fs_info, "tree_root block unaligned: %llu",
btrfs_super_root(sb));
btrfs_err(fs_info, "tree_root block unaligned: %llu",
btrfs_super_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
btrfs_err(fs_info, "chunk_root block unaligned: %llu",
btrfs_super_chunk_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
btrfs_warn(fs_info, "log_root block unaligned: %llu",
btrfs_super_log_root(sb));
btrfs_err(fs_info, "log_root block unaligned: %llu",
btrfs_super_log_root(sb));
ret = -EINVAL;
}

View file

@ -6146,9 +6146,18 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
/* FIXME: we should be able to handle this */
if (ret == 0)
return ret;
if (unlikely(ret == 0)) {
/*
* Key with offset -1 found, there would have to exist a dir
* index item with such offset, but this is out of the valid
* range.
*/
btrfs_err(root->fs_info,
"unexpected exact match for DIR_INDEX key, inode %llu",
btrfs_ino(inode));
return -EUCLEAN;
}
if (path->slots[0] == 0) {
inode->index_cnt = BTRFS_DIR_START_INDEX;

View file

@ -1169,11 +1169,14 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
}
if (ret > 0) {
/*
* Shouldn't happen, but in case it does we
* don't need to do the btrfs_next_item, just
* continue.
* Shouldn't happen because the key should still
* be there (return 0), but in case it does it
* means we have reached the end of the tree -
* there are no more leaves with items that have
* a key greater than or equals to @found_key,
* so just stop the search loop.
*/
continue;
break;
}
}
ret = btrfs_next_item(tree_root, path);

View file

@ -257,7 +257,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
root = btrfs_get_fs_root(fs_info, root_objectid, false);
ret = PTR_ERR_OR_ZERO(root);
if (ret && ret != -ENOENT) {
break;
return ret;
} else if (ret == -ENOENT) {
struct btrfs_trans_handle *trans;

View file

@ -726,7 +726,7 @@ again:
h->type = type;
INIT_LIST_HEAD(&h->new_bgs);
btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS);
btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELREFS);
smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START &&

View file

@ -552,7 +552,7 @@ static int finish_verity(struct btrfs_inode *inode, const void *desc,
btrfs_set_fs_compat_ro(root->fs_info, VERITY);
end_trans:
btrfs_end_transaction(trans);
return 0;
return ret;
}

View file

@ -4367,8 +4367,14 @@ again:
* this shouldn't happen, it means the last relocate
* failed
*/
if (ret == 0)
BUG(); /* FIXME break ? */
if (unlikely(ret == 0)) {
btrfs_err(fs_info,
"unexpected exact match of CHUNK_ITEM in chunk tree, offset 0x%llx",
key.offset);
mutex_unlock(&fs_info->reclaim_bgs_lock);
ret = -EUCLEAN;
goto error;
}
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);