mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
Merge branch 'block-6.19' into for-7.0/block
Merge in fixes that went to 6.19 after for-7.0/block was branched. Pending ublk changes depend on particularly the async scan work. * block-6.19: block: zero non-PI portion of auto integrity buffer ublk: fix use-after-free in ublk_partition_scan_work blk-mq: avoid stall during boot due to synchronize_rcu_expedited loop: add missing bd_abort_claiming in loop_set_status block: don't merge bios with different app_tags blk-rq-qos: Remove unlikely() hints from QoS checks loop: don't change loop device under exclusive opener in loop_set_status block, bfq: update outdated comment blk-mq: skip CPU offline notify on unmapped hctx selftests/ublk: fix Makefile to rebuild on header changes selftests/ublk: add test for async partition scan ublk: scan partition in async way block,bfq: fix aux stat accumulation destination md: Fix forward incompatibility from configurable logical block size md: Fix logical_block_size configuration being overwritten md: suspend array while updating raid_disks via sysfs md/raid5: fix possible null-pointer dereferences in raid5_store_group_thread_cnt() md: Fix static checker warning in analyze_sbs
This commit is contained in:
commit
5df832ba5f
13 changed files with 243 additions and 63 deletions
|
|
@ -380,7 +380,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
|
|||
blkg_rwstat_add_aux(&to->merged, &from->merged);
|
||||
blkg_rwstat_add_aux(&to->service_time, &from->service_time);
|
||||
blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
|
||||
bfq_stat_add_aux(&from->time, &from->time);
|
||||
bfq_stat_add_aux(&to->time, &from->time);
|
||||
bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
|
||||
bfq_stat_add_aux(&to->avg_queue_size_samples,
|
||||
&from->avg_queue_size_samples);
|
||||
|
|
|
|||
|
|
@ -984,7 +984,7 @@ struct bfq_group_data {
|
|||
* unused for the root group. Used to know whether there
|
||||
* are groups with more than one active @bfq_entity
|
||||
* (see the comments to the function
|
||||
* bfq_bfqq_may_idle()).
|
||||
* bfq_better_to_idle()).
|
||||
* @rq_pos_tree: rbtree sorted by next_request position, used when
|
||||
* determining if two or more queues have interleaving
|
||||
* requests (see bfq_find_close_cooperator()).
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ bool bio_integrity_prep(struct bio *bio)
|
|||
return true;
|
||||
set_flags = false;
|
||||
gfp |= __GFP_ZERO;
|
||||
} else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
|
||||
} else if (bi->metadata_size > bi->pi_tuple_size)
|
||||
gfp |= __GFP_ZERO;
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
|
|||
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
|
||||
struct request *next)
|
||||
{
|
||||
struct bio_integrity_payload *bip, *bip_next;
|
||||
|
||||
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
|
||||
return true;
|
||||
|
||||
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
|
||||
return false;
|
||||
|
||||
if (bio_integrity(req->bio)->bip_flags !=
|
||||
bio_integrity(next->bio)->bip_flags)
|
||||
bip = bio_integrity(req->bio);
|
||||
bip_next = bio_integrity(next->bio);
|
||||
if (bip->bip_flags != bip_next->bip_flags)
|
||||
return false;
|
||||
|
||||
if (bip->bip_flags & BIP_CHECK_APPTAG &&
|
||||
bip->app_tag != bip_next->app_tag)
|
||||
return false;
|
||||
|
||||
if (req->nr_integrity_segments + next->nr_integrity_segments >
|
||||
|
|
@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
|
|||
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
|
||||
int nr_integrity_segs;
|
||||
|
||||
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
|
||||
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
|
||||
return true;
|
||||
|
||||
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
|
||||
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
|
||||
return false;
|
||||
|
||||
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
|
||||
bip = bio_integrity(req->bio);
|
||||
if (bip->bip_flags != bip_bio->bip_flags)
|
||||
return false;
|
||||
|
||||
if (bip->bip_flags & BIP_CHECK_APPTAG &&
|
||||
bip->app_tag != bip_bio->app_tag)
|
||||
return false;
|
||||
|
||||
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
|
||||
|
|
|
|||
|
|
@ -3721,7 +3721,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
|
|||
struct blk_mq_hw_ctx, cpuhp_online);
|
||||
int ret = 0;
|
||||
|
||||
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
|
||||
if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
|
@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||
* Make sure reading the old queue_hw_ctx from other
|
||||
* context concurrently won't trigger uaf.
|
||||
*/
|
||||
synchronize_rcu_expedited();
|
||||
kfree(hctxs);
|
||||
kfree_rcu_mightsleep(hctxs);
|
||||
hctxs = new_hctxs;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
|
|||
|
||||
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos)
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_cleanup(q->rq_qos, bio);
|
||||
}
|
||||
|
||||
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
|
||||
q->rq_qos && !blk_rq_is_passthrough(rq))
|
||||
__rq_qos_done(q->rq_qos, rq);
|
||||
}
|
||||
|
||||
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos)
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_issue(q->rq_qos, rq);
|
||||
}
|
||||
|
||||
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos)
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_requeue(q->rq_qos, rq);
|
||||
}
|
||||
|
||||
|
|
@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
|
|||
|
||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos) {
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
|
||||
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
||||
__rq_qos_throttle(q->rq_qos, bio);
|
||||
}
|
||||
|
|
@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
|||
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos)
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_track(q->rq_qos, rq, bio);
|
||||
}
|
||||
|
||||
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos) {
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
|
||||
bio_set_flag(bio, BIO_QOS_MERGED);
|
||||
__rq_qos_merge(q->rq_qos, rq, bio);
|
||||
}
|
||||
|
|
@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
|||
|
||||
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
||||
{
|
||||
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||
q->rq_qos)
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_queue_depth_changed(q->rq_qos);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1225,16 +1225,28 @@ static int loop_clr_fd(struct loop_device *lo)
|
|||
}
|
||||
|
||||
static int
|
||||
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
||||
loop_set_status(struct loop_device *lo, blk_mode_t mode,
|
||||
struct block_device *bdev, const struct loop_info64 *info)
|
||||
{
|
||||
int err;
|
||||
bool partscan = false;
|
||||
bool size_changed = false;
|
||||
unsigned int memflags;
|
||||
|
||||
/*
|
||||
* If we don't hold exclusive handle for the device, upgrade to it
|
||||
* here to avoid changing device under exclusive owner.
|
||||
*/
|
||||
if (!(mode & BLK_OPEN_EXCL)) {
|
||||
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
|
||||
if (err)
|
||||
goto out_reread_partitions;
|
||||
}
|
||||
|
||||
err = mutex_lock_killable(&lo->lo_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_abort_claiming;
|
||||
|
||||
if (lo->lo_state != Lo_bound) {
|
||||
err = -ENXIO;
|
||||
goto out_unlock;
|
||||
|
|
@ -1273,6 +1285,10 @@ out_unfreeze:
|
|||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
out_abort_claiming:
|
||||
if (!(mode & BLK_OPEN_EXCL))
|
||||
bd_abort_claiming(bdev, loop_set_status);
|
||||
out_reread_partitions:
|
||||
if (partscan)
|
||||
loop_reread_partitions(lo);
|
||||
|
||||
|
|
@ -1352,7 +1368,9 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
|
|||
}
|
||||
|
||||
static int
|
||||
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
|
||||
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
|
||||
struct block_device *bdev,
|
||||
const struct loop_info __user *arg)
|
||||
{
|
||||
struct loop_info info;
|
||||
struct loop_info64 info64;
|
||||
|
|
@ -1360,17 +1378,19 @@ loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
|
|||
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
|
||||
return -EFAULT;
|
||||
loop_info64_from_old(&info, &info64);
|
||||
return loop_set_status(lo, &info64);
|
||||
return loop_set_status(lo, mode, bdev, &info64);
|
||||
}
|
||||
|
||||
static int
|
||||
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
|
||||
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
|
||||
struct block_device *bdev,
|
||||
const struct loop_info64 __user *arg)
|
||||
{
|
||||
struct loop_info64 info64;
|
||||
|
||||
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
|
||||
return -EFAULT;
|
||||
return loop_set_status(lo, &info64);
|
||||
return loop_set_status(lo, mode, bdev, &info64);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
case LOOP_SET_STATUS:
|
||||
err = -EPERM;
|
||||
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
|
||||
err = loop_set_status_old(lo, argp);
|
||||
err = loop_set_status_old(lo, mode, bdev, argp);
|
||||
break;
|
||||
case LOOP_GET_STATUS:
|
||||
return loop_get_status_old(lo, argp);
|
||||
case LOOP_SET_STATUS64:
|
||||
err = -EPERM;
|
||||
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
|
||||
err = loop_set_status64(lo, argp);
|
||||
err = loop_set_status64(lo, mode, bdev, argp);
|
||||
break;
|
||||
case LOOP_GET_STATUS64:
|
||||
return loop_get_status64(lo, argp);
|
||||
|
|
@ -1650,7 +1670,8 @@ loop_info64_to_compat(const struct loop_info64 *info64,
|
|||
}
|
||||
|
||||
static int
|
||||
loop_set_status_compat(struct loop_device *lo,
|
||||
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
|
||||
struct block_device *bdev,
|
||||
const struct compat_loop_info __user *arg)
|
||||
{
|
||||
struct loop_info64 info64;
|
||||
|
|
@ -1659,7 +1680,7 @@ loop_set_status_compat(struct loop_device *lo,
|
|||
ret = loop_info64_from_compat(arg, &info64);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return loop_set_status(lo, &info64);
|
||||
return loop_set_status(lo, mode, bdev, &info64);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
|
||||
switch(cmd) {
|
||||
case LOOP_SET_STATUS:
|
||||
err = loop_set_status_compat(lo,
|
||||
err = loop_set_status_compat(lo, mode, bdev,
|
||||
(const struct compat_loop_info __user *)arg);
|
||||
break;
|
||||
case LOOP_GET_STATUS:
|
||||
|
|
|
|||
|
|
@ -237,6 +237,7 @@ struct ublk_device {
|
|||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
struct delayed_work exit_work;
|
||||
struct work_struct partition_scan_work;
|
||||
|
||||
struct ublk_queue *queues[];
|
||||
};
|
||||
|
|
@ -1582,6 +1583,27 @@ static void ublk_put_disk(struct gendisk *disk)
|
|||
put_device(disk_to_dev(disk));
|
||||
}
|
||||
|
||||
static void ublk_partition_scan_work(struct work_struct *work)
|
||||
{
|
||||
struct ublk_device *ub =
|
||||
container_of(work, struct ublk_device, partition_scan_work);
|
||||
/* Hold disk reference to prevent UAF during concurrent teardown */
|
||||
struct gendisk *disk = ublk_get_disk(ub);
|
||||
|
||||
if (!disk)
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
|
||||
&disk->state)))
|
||||
goto out;
|
||||
|
||||
mutex_lock(&disk->open_mutex);
|
||||
bdev_disk_changed(disk, false);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
out:
|
||||
ublk_put_disk(disk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this function to ensure that ->canceling is consistently set for
|
||||
* the device and all queues. Do not set these flags directly.
|
||||
|
|
@ -2026,6 +2048,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
|
|||
mutex_lock(&ub->mutex);
|
||||
ublk_stop_dev_unlocked(ub);
|
||||
mutex_unlock(&ub->mutex);
|
||||
cancel_work_sync(&ub->partition_scan_work);
|
||||
ublk_cancel_dev(ub);
|
||||
}
|
||||
|
||||
|
|
@ -2954,8 +2977,16 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
|||
|
||||
ublk_apply_params(ub);
|
||||
|
||||
/* don't probe partitions if any daemon task is un-trusted */
|
||||
if (ub->unprivileged_daemons)
|
||||
/*
|
||||
* Suppress partition scan to avoid potential IO hang.
|
||||
*
|
||||
* If ublk server error occurs during partition scan, the IO may
|
||||
* wait while holding ub->mutex, which can deadlock with other
|
||||
* operations that need the mutex. Defer partition scan to async
|
||||
* work.
|
||||
* For unprivileged daemons, keep GD_SUPPRESS_PART_SCAN set
|
||||
* permanently.
|
||||
*/
|
||||
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
||||
|
||||
ublk_get_device(ub);
|
||||
|
|
@ -2973,6 +3004,10 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
|||
|
||||
set_bit(UB_STATE_USED, &ub->state);
|
||||
|
||||
/* Schedule async partition scan for trusted daemons */
|
||||
if (!ub->unprivileged_daemons)
|
||||
schedule_work(&ub->partition_scan_work);
|
||||
|
||||
out_put_cdev:
|
||||
if (ret) {
|
||||
ublk_detach_disk(ub);
|
||||
|
|
@ -3138,6 +3173,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
|
|||
mutex_init(&ub->mutex);
|
||||
spin_lock_init(&ub->lock);
|
||||
mutex_init(&ub->cancel_mutex);
|
||||
INIT_WORK(&ub->partition_scan_work, ublk_partition_scan_work);
|
||||
|
||||
ret = ublk_alloc_dev_number(ub, header->dev_id);
|
||||
if (ret < 0)
|
||||
|
|
|
|||
|
|
@ -1999,7 +1999,6 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
|
|||
mddev->layout = le32_to_cpu(sb->layout);
|
||||
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
|
||||
mddev->dev_sectors = le64_to_cpu(sb->size);
|
||||
mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
|
||||
mddev->events = ev1;
|
||||
mddev->bitmap_info.offset = 0;
|
||||
mddev->bitmap_info.space = 0;
|
||||
|
|
@ -2015,6 +2014,9 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
|
|||
|
||||
mddev->max_disks = (4096-256)/2;
|
||||
|
||||
if (!mddev->logical_block_size)
|
||||
mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
|
||||
|
||||
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
|
||||
mddev->bitmap_info.file == NULL) {
|
||||
mddev->bitmap_info.offset =
|
||||
|
|
@ -3882,7 +3884,6 @@ out_free_rdev:
|
|||
|
||||
static int analyze_sbs(struct mddev *mddev)
|
||||
{
|
||||
int i;
|
||||
struct md_rdev *rdev, *freshest, *tmp;
|
||||
|
||||
freshest = NULL;
|
||||
|
|
@ -3909,11 +3910,9 @@ static int analyze_sbs(struct mddev *mddev)
|
|||
super_types[mddev->major_version].
|
||||
validate_super(mddev, NULL/*freshest*/, freshest);
|
||||
|
||||
i = 0;
|
||||
rdev_for_each_safe(rdev, tmp, mddev) {
|
||||
if (mddev->max_disks &&
|
||||
(rdev->desc_nr >= mddev->max_disks ||
|
||||
i > mddev->max_disks)) {
|
||||
rdev->desc_nr >= mddev->max_disks) {
|
||||
pr_warn("md: %s: %pg: only %d devices permitted\n",
|
||||
mdname(mddev), rdev->bdev,
|
||||
mddev->max_disks);
|
||||
|
|
@ -4407,7 +4406,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = mddev_lock(mddev);
|
||||
err = mddev_suspend_and_lock(mddev);
|
||||
if (err)
|
||||
return err;
|
||||
if (mddev->pers)
|
||||
|
|
@ -4432,7 +4431,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
} else
|
||||
mddev->raid_disks = n;
|
||||
out_unlock:
|
||||
mddev_unlock(mddev);
|
||||
mddev_unlock_and_resume(mddev);
|
||||
return err ? err : len;
|
||||
}
|
||||
static struct md_sysfs_entry md_raid_disks =
|
||||
|
|
@ -5981,13 +5980,33 @@ lbs_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
if (mddev->major_version == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mddev->pers)
|
||||
return -EBUSY;
|
||||
|
||||
err = kstrtouint(buf, 10, &lbs);
|
||||
if (err < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mddev->pers) {
|
||||
unsigned int curr_lbs;
|
||||
|
||||
if (mddev->logical_block_size)
|
||||
return -EBUSY;
|
||||
/*
|
||||
* To fix forward compatibility issues, LBS is not
|
||||
* configured for arrays from old kernels (<=6.18) by default.
|
||||
* If the user confirms no rollback to old kernels,
|
||||
* enable LBS by writing current LBS — to prevent data
|
||||
* loss from LBS changes.
|
||||
*/
|
||||
curr_lbs = queue_logical_block_size(mddev->gendisk->queue);
|
||||
if (lbs != curr_lbs)
|
||||
return -EINVAL;
|
||||
|
||||
mddev->logical_block_size = curr_lbs;
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
pr_info("%s: logical block size configured successfully, array will not be assembled in old kernels (<= 6.18)\n",
|
||||
mdname(mddev));
|
||||
return len;
|
||||
}
|
||||
|
||||
err = mddev_lock(mddev);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
|
@ -6163,7 +6182,27 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
|
|||
mdname(mddev));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Only 1.x meta needs to set logical block size */
|
||||
if (mddev->major_version == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Fix forward compatibility issue. Only set LBS by default for
|
||||
* new arrays, mddev->events == 0 indicates the array was just
|
||||
* created. When assembling an array, read LBS from the superblock
|
||||
* instead — LBS is 0 in superblocks created by old kernels.
|
||||
*/
|
||||
if (!mddev->events) {
|
||||
pr_info("%s: array will not be assembled in old kernels that lack configurable LBS support (<= 6.18)\n",
|
||||
mdname(mddev));
|
||||
mddev->logical_block_size = lim->logical_block_size;
|
||||
}
|
||||
|
||||
if (!mddev->logical_block_size)
|
||||
pr_warn("%s: echo current LBS to md/logical_block_size to prevent data loss issues from LBS changes.\n"
|
||||
"\tNote: After setting, array will not be assembled in old kernels (<= 6.18)\n",
|
||||
mdname(mddev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7187,12 +7187,14 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
|
|||
err = mddev_suspend_and_lock(mddev);
|
||||
if (err)
|
||||
return err;
|
||||
conf = mddev->private;
|
||||
if (!conf) {
|
||||
mddev_unlock_and_resume(mddev);
|
||||
return -ENODEV;
|
||||
}
|
||||
raid5_quiesce(mddev, true);
|
||||
|
||||
conf = mddev->private;
|
||||
if (!conf)
|
||||
err = -ENODEV;
|
||||
else if (new != conf->worker_cnt_per_group) {
|
||||
if (new != conf->worker_cnt_per_group) {
|
||||
old_groups = conf->worker_groups;
|
||||
if (old_groups)
|
||||
flush_workqueue(raid5_wq);
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ TEST_PROGS += test_generic_11.sh
|
|||
TEST_PROGS += test_generic_12.sh
|
||||
TEST_PROGS += test_generic_13.sh
|
||||
TEST_PROGS += test_generic_14.sh
|
||||
TEST_PROGS += test_generic_15.sh
|
||||
|
||||
TEST_PROGS += test_null_01.sh
|
||||
TEST_PROGS += test_null_02.sh
|
||||
|
|
@ -50,10 +51,10 @@ TEST_PROGS += test_stress_07.sh
|
|||
|
||||
TEST_GEN_PROGS_EXTENDED = kublk
|
||||
|
||||
LOCAL_HDRS += $(wildcard *.h)
|
||||
include ../lib.mk
|
||||
|
||||
$(TEST_GEN_PROGS_EXTENDED): kublk.c null.c file_backed.c common.c stripe.c \
|
||||
fault_inject.c
|
||||
$(TEST_GEN_PROGS_EXTENDED): $(wildcard *.c)
|
||||
|
||||
check:
|
||||
shellcheck -x -f gcc *.sh
|
||||
|
|
|
|||
|
|
@ -178,8 +178,9 @@ _have_feature()
|
|||
_create_ublk_dev() {
|
||||
local dev_id;
|
||||
local cmd=$1
|
||||
local settle=$2
|
||||
|
||||
shift 1
|
||||
shift 2
|
||||
|
||||
if [ ! -c /dev/ublk-control ]; then
|
||||
return ${UBLK_SKIP_CODE}
|
||||
|
|
@ -194,7 +195,10 @@ _create_ublk_dev() {
|
|||
echo "fail to add ublk dev $*"
|
||||
return 255
|
||||
fi
|
||||
|
||||
if [ "$settle" = "yes" ]; then
|
||||
udevadm settle
|
||||
fi
|
||||
|
||||
if [[ "$dev_id" =~ ^[0-9]+$ ]]; then
|
||||
echo "${dev_id}"
|
||||
|
|
@ -204,14 +208,18 @@ _create_ublk_dev() {
|
|||
}
|
||||
|
||||
_add_ublk_dev() {
|
||||
_create_ublk_dev "add" "$@"
|
||||
_create_ublk_dev "add" "yes" "$@"
|
||||
}
|
||||
|
||||
_add_ublk_dev_no_settle() {
|
||||
_create_ublk_dev "add" "no" "$@"
|
||||
}
|
||||
|
||||
_recover_ublk_dev() {
|
||||
local dev_id
|
||||
local state
|
||||
|
||||
dev_id=$(_create_ublk_dev "recover" "$@")
|
||||
dev_id=$(_create_ublk_dev "recover" "yes" "$@")
|
||||
for ((j=0;j<20;j++)); do
|
||||
state=$(_get_ublk_dev_state "${dev_id}")
|
||||
[ "$state" == "LIVE" ] && break
|
||||
|
|
|
|||
68
tools/testing/selftests/ublk/test_generic_15.sh
Executable file
68
tools/testing/selftests/ublk/test_generic_15.sh
Executable file
|
|
@ -0,0 +1,68 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
|
||||
|
||||
TID="generic_15"
|
||||
ERR_CODE=0
|
||||
|
||||
_test_partition_scan_no_hang()
|
||||
{
|
||||
local recovery_flag=$1
|
||||
local expected_state=$2
|
||||
local dev_id
|
||||
local state
|
||||
local daemon_pid
|
||||
local start_time
|
||||
local elapsed
|
||||
|
||||
# Create ublk device with fault_inject target and very large delay
|
||||
# to simulate hang during partition table read
|
||||
# --delay_us 60000000 = 60 seconds delay
|
||||
# Use _add_ublk_dev_no_settle to avoid udevadm settle hang waiting
|
||||
# for partition scan events to complete
|
||||
if [ "$recovery_flag" = "yes" ]; then
|
||||
echo "Testing partition scan with recovery support..."
|
||||
dev_id=$(_add_ublk_dev_no_settle -t fault_inject -q 1 -d 1 --delay_us 60000000 -r 1)
|
||||
else
|
||||
echo "Testing partition scan without recovery..."
|
||||
dev_id=$(_add_ublk_dev_no_settle -t fault_inject -q 1 -d 1 --delay_us 60000000)
|
||||
fi
|
||||
|
||||
_check_add_dev "$TID" $?
|
||||
|
||||
# The add command should return quickly because partition scan is async.
|
||||
# Now sleep briefly to let the async partition scan work start and hit
|
||||
# the delay in the fault_inject handler.
|
||||
sleep 1
|
||||
|
||||
# Kill the ublk daemon while partition scan is potentially blocked
|
||||
# And check state transitions properly
|
||||
start_time=${SECONDS}
|
||||
daemon_pid=$(_get_ublk_daemon_pid "${dev_id}")
|
||||
state=$(__ublk_kill_daemon "${dev_id}" "${expected_state}")
|
||||
elapsed=$((SECONDS - start_time))
|
||||
|
||||
# Verify the device transitioned to expected state
|
||||
if [ "$state" != "${expected_state}" ]; then
|
||||
echo "FAIL: Device state is $state, expected ${expected_state}"
|
||||
ERR_CODE=255
|
||||
${UBLK_PROG} del -n "${dev_id}" > /dev/null 2>&1
|
||||
return
|
||||
fi
|
||||
echo "PASS: Device transitioned to ${expected_state} in ${elapsed}s without hanging"
|
||||
|
||||
# Clean up the device
|
||||
${UBLK_PROG} del -n "${dev_id}" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
_prep_test "partition_scan" "verify async partition scan prevents IO hang"
|
||||
|
||||
# Test 1: Without recovery support - should transition to DEAD
|
||||
_test_partition_scan_no_hang "no" "DEAD"
|
||||
|
||||
# Test 2: With recovery support - should transition to QUIESCED
|
||||
_test_partition_scan_no_hang "yes" "QUIESCED"
|
||||
|
||||
_cleanup_test "partition_scan"
|
||||
_show_result $TID $ERR_CODE
|
||||
Loading…
Add table
Add a link
Reference in a new issue