Convert 'alloc_flex' family to use the new default GFP_KERNEL argument

This is the exact same thing as the 'alloc_obj()' version, only much
smaller because there are a lot fewer users of the *alloc_flex()
interface.

As with alloc_obj() version, this was done entirely with mindless brute
force, using the same script, except using 'flex' in the pattern rather
than 'objs*'.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2026-02-21 17:06:51 -08:00
parent bf4afc53b7
commit 323bbfcf1e
310 changed files with 352 additions and 352 deletions

View file

@ -54,7 +54,7 @@ static int __init init_atags_procfs(void)
WARN_ON(tag->hdr.tag != ATAG_NONE);
b = kmalloc_flex(*b, data, size, GFP_KERNEL);
b = kmalloc_flex(*b, data, size);
if (!b)
goto nomem;

View file

@ -52,7 +52,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
for_each_mem_range(i, &start, &end)
nr_ranges++;
cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;

View file

@ -68,7 +68,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
for_each_mem_range(i, &start, &end)
nr_ranges++;
cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;

View file

@ -344,7 +344,7 @@ static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
repo->dev_index, repo->dev_type, port, blk_size, num_blocks,
num_regions);
p = kzalloc_flex(*p, regions, num_regions, GFP_KERNEL);
p = kzalloc_flex(*p, regions, num_regions);
if (!p) {
result = -ENOMEM;
goto fail_malloc;

View file

@ -64,7 +64,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
nr_ranges = 1; /* For exclusion of crashkernel region */
walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL);
cmem = kmalloc_flex(*cmem, ranges, nr_ranges);
if (!cmem)
return -ENOMEM;

View file

@ -1016,7 +1016,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
} *attr_group;
for (i = 0; type->event_descs[i].attr.attr.name; i++);
attr_group = kzalloc_flex(*attr_group, attrs, i + 1, GFP_KERNEL);
attr_group = kzalloc_flex(*attr_group, attrs, i + 1);
if (!attr_group)
goto err;

View file

@ -742,7 +742,7 @@ static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_
else if (rapl_pmu_scope != PERF_PMU_SCOPE_PKG)
return -EINVAL;
rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu, GFP_KERNEL);
rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu);
if (!rapl_pmus)
return -ENOMEM;

View file

@ -338,7 +338,7 @@ static __init int dev_mcelog_init_device(void)
int err;
mce_log_len = max(MCE_LOG_MIN_LEN, num_online_cpus());
mcelog = kzalloc_flex(*mcelog, entry, mce_log_len, GFP_KERNEL);
mcelog = kzalloc_flex(*mcelog, entry, mce_log_len);
if (!mcelog)
return -ENOMEM;

View file

@ -40,7 +40,7 @@ static void *deflate_alloc_stream(void)
DEFLATE_DEF_MEMLEVEL));
struct deflate_stream *ctx;
ctx = kvmalloc_flex(*ctx, workspace, size, GFP_KERNEL);
ctx = kvmalloc_flex(*ctx, workspace, size);
if (!ctx)
return ERR_PTR(-ENOMEM);

View file

@ -44,7 +44,7 @@ static void *zstd_alloc_stream(void)
if (!wksp_size)
return ERR_PTR(-EINVAL);
ctx = kvmalloc_flex(*ctx, wksp, wksp_size, GFP_KERNEL);
ctx = kvmalloc_flex(*ctx, wksp, wksp_size);
if (!ctx)
return ERR_PTR(-ENOMEM);

View file

@ -350,7 +350,7 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
struct async_events *events;
int i, ret;
events = kzalloc_flex(*events, event, total_col, GFP_KERNEL);
events = kzalloc_flex(*events, event, total_col);
if (!events)
return -ENOMEM;

View file

@ -266,7 +266,7 @@ static struct solver_node *create_solver_node(struct solver_state *xrs,
struct solver_node *node;
int ret;
node = kzalloc_flex(*node, start_cols, cdop->cols_len, GFP_KERNEL);
node = kzalloc_flex(*node, start_cols, cdop->cols_len);
if (!node)
return ERR_PTR(-ENOMEM);

View file

@ -436,7 +436,7 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
int ret, idx;
XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
job = kzalloc_flex(*job, bos, arg_bo_cnt, GFP_KERNEL);
job = kzalloc_flex(*job, bos, arg_bo_cnt);
if (!job)
return -ENOMEM;

View file

@ -525,7 +525,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
job = kzalloc_flex(*job, bos, bo_count, GFP_KERNEL);
job = kzalloc_flex(*job, bos, bo_count);
if (!job)
return NULL;

View file

@ -2832,7 +2832,7 @@ static void ata_dev_config_cpr(struct ata_device *dev)
if (!nr_cpr)
goto out;
cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr, GFP_KERNEL);
cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr);
if (!cpr_log)
goto out;

View file

@ -4613,7 +4613,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
goto out_unlock;
ret = -ENOMEM;
ub = kzalloc_flex(*ub, queues, info.nr_hw_queues, GFP_KERNEL);
ub = kzalloc_flex(*ub, queues, info.nr_hw_queues);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);

View file

@ -997,7 +997,7 @@ static int zloop_ctl_add(struct zloop_options *opts)
goto out;
}
zlo = kvzalloc_flex(*zlo, zones, nr_zones, GFP_KERNEL);
zlo = kvzalloc_flex(*zlo, zones, nr_zones);
if (!zlo) {
ret = -ENOMEM;
goto out;

View file

@ -284,7 +284,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
val = energy_quirk;
}
func = kzalloc_flex(*func, template, num, GFP_KERNEL);
func = kzalloc_flex(*func, template, num);
if (!func)
return ERR_PTR(-ENOMEM);

View file

@ -823,7 +823,7 @@ int hpet_alloc(struct hpet_data *hdp)
return 0;
}
hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs, GFP_KERNEL);
hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs);
if (!hpetp)
return -ENOMEM;

View file

@ -412,7 +412,7 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size
* Allocate buffer and the sg list. The sg list array is allocated
* directly after the port_buffer struct.
*/
buf = kmalloc_flex(*buf, sg, pages, GFP_KERNEL);
buf = kmalloc_flex(*buf, sg, pages);
if (!buf)
goto fail;

View file

@ -87,7 +87,7 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck;
struct pmc_data *pmc_data;
pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks, GFP_KERNEL);
pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks);
if (!pmc_data)
return NULL;

View file

@ -502,7 +502,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
clk_data = kzalloc_flex(*clk_data, hws, 2, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, 2);
if (!clk_data)
goto unregister_slow_osc;

View file

@ -735,7 +735,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll))
return;
clk_data = kzalloc_flex(*clk_data, hws, num_clks, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, num_clks);
if (WARN_ON(!clk_data))
goto err_clk_data;
clk_data->num = num_clks;

View file

@ -499,7 +499,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
u8 avpll_flags = 0;
int n, ret;
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data) {
of_node_put(parent_np);
return;

View file

@ -285,7 +285,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
struct clk_hw **hws;
int n, ret;
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data) {
of_node_put(parent_np);
return;

View file

@ -262,7 +262,7 @@ static void __init asm9260_acc_init(struct device_node *np)
u32 rate;
int n;
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;

View file

@ -400,7 +400,7 @@ static int eqc_probe(struct platform_device *pdev)
clk_count = data->pll_count + data->div_count +
data->fixed_factor_count + data->early_clk_count;
cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL);
cells = kzalloc_flex(*cells, hws, clk_count);
if (!cells)
return -ENOMEM;
@ -738,7 +738,7 @@ static void __init eqc_early_init(struct device_node *np,
clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
early_data->late_clk_count;
cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL);
cells = kzalloc_flex(*cells, hws, clk_count);
if (!cells) {
ret = -ENOMEM;
goto err;

View file

@ -1200,7 +1200,7 @@ static void __init stm32h7_rcc_init(struct device_node *np)
const char *hse_clk, *lse_clk, *i2s_clk;
struct regmap *pdrm;
clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS);
if (!clk_data)
return;

View file

@ -58,7 +58,7 @@ static void __init clk_boston_setup(struct device_node *np)
cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV);
cpu_freq = mult_frac(in_freq, mul, cpu_div);
onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT, GFP_KERNEL);
onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT);
if (!onecell)
return;

View file

@ -382,7 +382,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
struct device_node *np;
void __iomem *base;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END, GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -379,7 +379,7 @@ static int __init ingenic_tcu_probe(struct device_node *np)
}
}
tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT, GFP_KERNEL);
tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT);
if (!tcu->clocks) {
ret = -ENOMEM;
goto err_clk_disable;

View file

@ -67,7 +67,7 @@ struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
{
struct clk_hw_onecell_data *clk_data;
clk_data = kzalloc_flex(*clk_data, hws, clk_num, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, clk_num);
if (!clk_data)
return NULL;

View file

@ -372,7 +372,7 @@ static void __init mt7621_clk_init(struct device_node *node)
count = ARRAY_SIZE(mt7621_clks_base) +
ARRAY_SIZE(mt7621_fixed_clks) + ARRAY_SIZE(mt7621_gates);
clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, count);
if (!clk_data)
goto free_clk_priv;

View file

@ -936,7 +936,7 @@ static void __init mtmips_clk_init(struct device_node *node)
priv->data = data;
count = priv->data->num_clk_base + priv->data->num_clk_fixed +
priv->data->num_clk_factor + priv->data->num_clk_periph;
clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, count);
if (!clk_data)
goto free_clk_priv;

View file

@ -251,7 +251,7 @@ struct clk * __init cpg_div6_register(const char *name,
struct clk *clk;
unsigned int i;
clock = kzalloc_flex(*clock, parents, num_parents, GFP_KERNEL);
clock = kzalloc_flex(*clock, parents, num_parents);
if (!clock)
return ERR_PTR(-ENOMEM);

View file

@ -184,7 +184,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
struct clk **clks;
unsigned int i;
group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS, GFP_KERNEL);
group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS);
if (!group)
return;

View file

@ -1258,7 +1258,7 @@ static int __init cpg_mssr_common_init(struct device *dev,
}
nclks = info->num_total_core_clks + info->num_hw_mod_clks;
priv = kzalloc_flex(*priv, clks, nclks, GFP_KERNEL);
priv = kzalloc_flex(*priv, clks, nclks);
if (!priv)
return -ENOMEM;

View file

@ -82,7 +82,7 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
struct samsung_clk_provider *ctx;
int i;
ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks, GFP_KERNEL);
ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks);
if (!ctx)
panic("could not allocate clock provider context.\n");

View file

@ -330,7 +330,7 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
struct visconti_pll_provider *ctx;
int i;
ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls, GFP_KERNEL);
ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls);
if (!ctx)
return ERR_PTR(-ENOMEM);

View file

@ -759,7 +759,7 @@ static int zynqmp_clk_setup(struct device_node *np)
if (ret)
return ret;
zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx, GFP_KERNEL);
zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx);
if (!zynqmp_data)
return -ENOMEM;

View file

@ -286,7 +286,7 @@ static int __init ingenic_tcu_init(struct device_node *np)
if (IS_ERR(map))
return PTR_ERR(map);
tcu = kzalloc_flex(*tcu, timers, num_possible_cpus(), GFP_KERNEL);
tcu = kzalloc_flex(*tcu, timers, num_possible_cpus());
if (!tcu)
return -ENOMEM;

View file

@ -320,7 +320,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2;
/* Allocate private data and frequency table for current cpu */
centaur = kzalloc_flex(*centaur, freq_table, states + 1, GFP_KERNEL);
centaur = kzalloc_flex(*centaur, freq_table, states + 1);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;

View file

@ -94,7 +94,7 @@ get_supported_features(struct cxl_features_state *cxlfs)
return NULL;
struct cxl_feat_entries *entries __free(kvfree) =
kvmalloc_flex(*entries, ent, count, GFP_KERNEL);
kvmalloc_flex(*entries, ent, count);
if (!entries)
return NULL;

View file

@ -831,7 +831,7 @@ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
struct cxl_mbox_cmd mbox_cmd;
int rc;
transfer = kzalloc_flex(*transfer, data, 0, GFP_KERNEL);
transfer = kzalloc_flex(*transfer, data, 0);
if (!transfer)
return -ENOMEM;

View file

@ -2017,7 +2017,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets, GFP_KERNEL);
cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets);
if (!cxlrd)
return ERR_PTR(-ENOMEM);
@ -2070,7 +2070,7 @@ struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
cxlsd = kzalloc_flex(*cxlsd, target, nr_targets, GFP_KERNEL);
cxlsd = kzalloc_flex(*cxlsd, target, nr_targets);
if (!cxlsd)
return ERR_PTR(-ENOMEM);

View file

@ -3464,7 +3464,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
return -ENXIO;
struct cxl_pmem_region *cxlr_pmem __free(kfree) =
kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets, GFP_KERNEL);
kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
if (!cxlr_pmem)
return -ENOMEM;

View file

@ -234,7 +234,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
return -EINVAL;
set_lsa =
kvzalloc_flex(*set_lsa, data, cmd->in_length, GFP_KERNEL);
kvzalloc_flex(*set_lsa, data, cmd->in_length);
if (!set_lsa)
return -ENOMEM;

View file

@ -121,7 +121,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
init_node_memory_type(numa_node, mtype);
rc = -ENOMEM;
data = kzalloc_flex(*data, res, dev_dax->nr_range, GFP_KERNEL);
data = kzalloc_flex(*data, res, dev_dax->nr_range);
if (!data)
goto err_dax_kmem_data;

View file

@ -179,7 +179,7 @@ struct dma_fence_array *dma_fence_array_alloc(int num_fences)
{
struct dma_fence_array *array;
return kzalloc_flex(*array, callbacks, num_fences, GFP_KERNEL);
return kzalloc_flex(*array, callbacks, num_fences);
}
EXPORT_SYMBOL(dma_fence_array_alloc);

View file

@ -635,7 +635,7 @@ static int td_probe(struct platform_device *pdev)
DRIVER_NAME))
return -EBUSY;
td = kzalloc_flex(*td, channels, pdata->nr_channels, GFP_KERNEL);
td = kzalloc_flex(*td, channels, pdata->nr_channels);
if (!td) {
err = -ENOMEM;
goto err_release_region;

View file

@ -257,7 +257,7 @@ static int imh_get_all_mmio_base_h(struct res_config *cfg, struct list_head *eda
struct skx_dev *d;
for (i = 0; i < n; i++) {
d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL);
d = kzalloc_flex(*d, imc, imc_num);
if (!d)
return -ENOMEM;

View file

@ -346,7 +346,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL);
d = kzalloc_flex(*d, imc, imc_num);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;

View file

@ -68,7 +68,7 @@ static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
struct cros_ec_command *msg;
int ret;
msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL);
msg = kzalloc_flex(*msg, data, max(outsize, insize));
if (!msg)
return -ENOMEM;

View file

@ -941,7 +941,7 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
r = kmalloc_flex(*r, data, a->length, GFP_KERNEL);
r = kmalloc_flex(*r, data, a->length);
if (r == NULL)
return -ENOMEM;

View file

@ -916,7 +916,7 @@ static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
if (gpio_aggregator_count_lines(aggr) == 0)
return -EINVAL;
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL);
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
if (!aggr->lookups)
return -ENOMEM;
@ -1456,7 +1456,7 @@ static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
memcpy(aggr->args, buf, count + 1);
aggr->init_via_sysfs = true;
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL);
aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1);
if (!aggr->lookups) {
res = -ENOMEM;
goto free_ga;

View file

@ -1388,7 +1388,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
lockdep_assert_held(&dev->lock);
struct gpiod_lookup_table *table __free(kfree) =
kzalloc_flex(*table, table, num_entries + 1, GFP_KERNEL);
kzalloc_flex(*table, table, num_entries + 1);
if (!table)
return -ENOMEM;

View file

@ -1610,7 +1610,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (ret)
return ret;
lr = kvzalloc_flex(*lr, lines, ulr.num_lines, GFP_KERNEL);
lr = kvzalloc_flex(*lr, lines, ulr.num_lines);
if (!lr)
return -ENOMEM;
lr->num_lines = ulr.num_lines;

View file

@ -478,7 +478,7 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
if (!key)
return -ENOMEM;
lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup)
return -ENOMEM;

View file

@ -147,7 +147,7 @@ static int desc_set_label(struct gpio_desc *desc, const char *label)
struct gpio_desc_label *new = NULL, *old;
if (label) {
new = kzalloc_flex(*new, str, strlen(label) + 1, GFP_KERNEL);
new = kzalloc_flex(*new, str, strlen(label) + 1);
if (!new)
return -ENOMEM;

View file

@ -76,7 +76,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
unsigned i;
int r;
list = kvzalloc_flex(*list, entries, num_entries, GFP_KERNEL);
list = kvzalloc_flex(*list, entries, num_entries);
if (!list)
return -ENOMEM;

View file

@ -212,7 +212,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
int32_t ctx_prio;
int r;
entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs, GFP_KERNEL);
entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs);
if (!entity)
return -ENOMEM;

View file

@ -122,7 +122,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_range_mgr_node *node;
int r;
node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL);
node = kzalloc_flex(*node, mm_nodes, 1);
if (!node)
return -ENOMEM;

View file

@ -198,7 +198,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (num_ibs == 0)
return -EINVAL;
*job = kzalloc_flex(**job, ibs, num_ibs, GFP_KERNEL);
*job = kzalloc_flex(**job, ibs, num_ibs);
if (!*job)
return -ENOMEM;

View file

@ -165,7 +165,7 @@ static int get_vddc_lookup_table(
PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
"Invalid CAC Leakage PowerPlay Table!", return 1);
table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL);
table = kzalloc_flex(*table, entries, max_levels);
if (!table)
return -ENOMEM;

View file

@ -133,7 +133,7 @@ static int smu10_init_dynamic_state_adjustment_rule_settings(
int count = 8;
struct phm_clock_voltage_dependency_table *table_clk_vlt;
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count, GFP_KERNEL);
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
@ -472,7 +472,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
uint32_t i;
struct smu10_voltage_dependency_table *ptable;
ptable = kzalloc_flex(*ptable, entries, num_entry, GFP_KERNEL);
ptable = kzalloc_flex(*ptable, entries, num_entry);
if (NULL == ptable)
return -ENOMEM;

View file

@ -276,7 +276,7 @@ static int smu8_init_dynamic_state_adjustment_rule_settings(
{
struct phm_clock_voltage_dependency_table *table_clk_vlt;
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8, GFP_KERNEL);
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");

View file

@ -495,7 +495,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
/* initialize vddc_dep_on_dal_pwrl table */
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4, GFP_KERNEL);
table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");

View file

@ -755,7 +755,7 @@ static int get_dcefclk_voltage_dependency_table(
num_entries = clk_dep_table->ucNumEntries;
clk_table = kzalloc_flex(*clk_table, entries, num_entries, GFP_KERNEL);
clk_table = kzalloc_flex(*clk_table, entries, num_entries);
if (!clk_table)
return -ENOMEM;
@ -1040,7 +1040,7 @@ static int get_vddc_lookup_table(
PP_ASSERT_WITH_CODE((vddc_lookup_pp_tables->ucNumEntries != 0),
"Invalid SOC_VDDD Lookup Table!", return 1);
table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL);
table = kzalloc_flex(*table, entries, max_levels);
if (!table)
return -ENOMEM;

View file

@ -231,7 +231,7 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
{
struct gpiod_lookup_table *lookup;
lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL);
lookup = kzalloc_flex(*lookup, table, 2);
if (!lookup)
return;

View file

@ -1103,7 +1103,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
{
struct i915_gem_engines *e;
e = kzalloc_flex(*e, engines, count, GFP_KERNEL);
e = kzalloc_flex(*e, engines, count);
if (!e)
return NULL;

View file

@ -103,7 +103,7 @@ static struct dma_buf *mock_dmabuf(int npages)
struct dma_buf *dmabuf;
int i;
mock = kmalloc_flex(*mock, pages, npages, GFP_KERNEL);
mock = kmalloc_flex(*mock, pages, npages);
if (!mock)
return ERR_PTR(-ENOMEM);

View file

@ -3934,7 +3934,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned int n;
int err;
ve = kzalloc_flex(*ve, siblings, count, GFP_KERNEL);
ve = kzalloc_flex(*ve, siblings, count);
if (!ve)
return ERR_PTR(-ENOMEM);

View file

@ -1183,7 +1183,7 @@ static int intel_vgpu_ioctl_get_region_info(struct vfio_device *vfio_dev,
VFIO_REGION_INFO_FLAG_WRITE;
info->size = gvt_aperture_sz(vgpu->gvt);
sparse = kzalloc_flex(*sparse, areas, nr_areas, GFP_KERNEL);
sparse = kzalloc_flex(*sparse, areas, nr_areas);
if (!sparse)
return -ENOMEM;

View file

@ -197,7 +197,7 @@ __sync_alloc_leaf(struct i915_syncmap *parent, u64 id)
{
struct i915_syncmap *p;
p = kmalloc_flex(*p, seqno, KSYNCMAP, GFP_KERNEL);
p = kmalloc_flex(*p, seqno, KSYNCMAP);
if (unlikely(!p))
return NULL;
@ -279,7 +279,7 @@ static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno)
unsigned int above;
/* Insert a join above the current layer */
next = kzalloc_flex(*next, child, KSYNCMAP, GFP_KERNEL);
next = kzalloc_flex(*next, child, KSYNCMAP);
if (unlikely(!next))
return -ENOMEM;

View file

@ -2841,7 +2841,7 @@ static int perf_series_engines(void *arg)
if (!stats)
return -ENOMEM;
ps = kzalloc_flex(*ps, ce, nengines, GFP_KERNEL);
ps = kzalloc_flex(*ps, ce, nengines);
if (!ps) {
kfree(stats);
return -ENOMEM;

View file

@ -900,7 +900,7 @@ nouveau_pfns_alloc(unsigned long npages)
{
struct nouveau_pfnmap_args *args;
args = kzalloc_flex(*args, p.phys, npages, GFP_KERNEL);
args = kzalloc_flex(*args, p.phys, npages);
if (!args)
return NULL;
@ -1063,7 +1063,7 @@ nouveau_svm_init(struct nouveau_drm *drm)
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
return;
drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1, GFP_KERNEL);
drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1);
if (!drm->svm)
return;

View file

@ -89,7 +89,7 @@ nvkm_chid_new(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
struct nvkm_chid *chid;
int id;
if (!(chid = *pchid = kzalloc_flex(*chid, used, nr, GFP_KERNEL)))
if (!(chid = *pchid = kzalloc_flex(*chid, used, nr)))
return -ENOMEM;
kref_init(&chid->kref);

View file

@ -106,7 +106,7 @@ nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
{
struct nvkm_engine_func *func;
func = kzalloc_flex(*func, sclass, nclass + 1, GFP_KERNEL);
func = kzalloc_flex(*func, sclass, nclass + 1);
if (!func)
return -ENOMEM;

View file

@ -56,7 +56,7 @@ nvkm_rm_gr_new(struct nvkm_rm *rm)
struct nvkm_gr_func *func;
struct r535_gr *gr;
func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1, GFP_KERNEL);
func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1);
if (!func)
return -ENOMEM;

View file

@ -73,7 +73,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
if (!lpfn)
lpfn = man->size;
node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL);
node = kzalloc_flex(*node, mm_nodes, 1);
if (!node)
return -ENOMEM;

View file

@ -353,7 +353,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
perfmon = kzalloc_flex(*perfmon, values, req->ncounters, GFP_KERNEL);
perfmon = kzalloc_flex(*perfmon, values, req->ncounters);
if (!perfmon)
return -ENOMEM;

View file

@ -172,7 +172,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
perfmon = kzalloc_flex(*perfmon, counters, req->ncounters, GFP_KERNEL);
perfmon = kzalloc_flex(*perfmon, counters, req->ncounters);
if (!perfmon)
return -ENOMEM;
perfmon->dev = vc4;

View file

@ -167,7 +167,7 @@ struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
objs = kmalloc_flex(*objs, objs, nents, GFP_KERNEL);
objs = kmalloc_flex(*objs, objs, nents);
if (!objs)
return NULL;

View file

@ -208,7 +208,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
/* only kernel queues can be permanent */
XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
q = kzalloc_flex(*q, lrc, width, GFP_KERNEL);
q = kzalloc_flex(*q, lrc, width);
if (!q)
return ERR_PTR(-ENOMEM);

View file

@ -710,7 +710,7 @@ static int config_blob_open(struct inode *inode, struct file *file)
if (ret < 0)
return ret;
cbd = kzalloc_flex(*cbd, blob, ret, GFP_KERNEL);
cbd = kzalloc_flex(*cbd, blob, ret);
if (!cbd)
return -ENOMEM;

View file

@ -64,7 +64,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
struct xe_bo *bo;
int err;
pt = kzalloc_flex(*pt, entries, num_entries, GFP_KERNEL);
pt = kzalloc_flex(*pt, entries, num_entries);
if (!pt) {
err = -ENOMEM;
goto out;

View file

@ -33,7 +33,7 @@ static int xe_ttm_sys_mgr_new(struct ttm_resource_manager *man,
struct xe_ttm_sys_node *node;
int r;
node = kzalloc_flex(*node, base.mm_nodes, 1, GFP_KERNEL);
node = kzalloc_flex(*node, base.mm_nodes, 1);
if (!node)
return -ENOMEM;

View file

@ -93,7 +93,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
struct gb_module *module;
int i;
module = kzalloc_flex(*module, interfaces, num_interfaces, GFP_KERNEL);
module = kzalloc_flex(*module, interfaces, num_interfaces);
if (!module)
return NULL;

View file

@ -850,7 +850,7 @@ static int hte_register_chip(struct hte_chip *chip)
return -EINVAL;
}
gdev = kzalloc_flex(*gdev, ei, chip->nlines, GFP_KERNEL);
gdev = kzalloc_flex(*gdev, ei, chip->nlines);
if (!gdev)
return -ENOMEM;

View file

@ -724,7 +724,7 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
atr = kzalloc_flex(*atr, adapter, max_adapters, GFP_KERNEL);
atr = kzalloc_flex(*atr, adapter, max_adapters);
if (!atr)
return ERR_PTR(-ENOMEM);

View file

@ -187,7 +187,7 @@ static struct adi_i3c_xfer *adi_i3c_master_alloc_xfer(struct adi_i3c_master *mas
{
struct adi_i3c_xfer *xfer;
xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;

View file

@ -382,7 +382,7 @@ dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
{
struct dw_i3c_xfer *xfer;
xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;

View file

@ -498,7 +498,7 @@ cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
{
struct cdns_i3c_xfer *xfer;
xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;

View file

@ -328,7 +328,7 @@ static int hci_dma_init(struct i3c_hci *hci)
}
if (nr_rings > XFER_RINGS)
nr_rings = XFER_RINGS;
rings = kzalloc_flex(*rings, headers, nr_rings, GFP_KERNEL);
rings = kzalloc_flex(*rings, headers, nr_rings);
if (!rings)
return -ENOMEM;
hci->io_data = rings;

View file

@ -345,7 +345,7 @@ static struct renesas_i3c_xfer *renesas_i3c_alloc_xfer(struct renesas_i3c *i3c,
{
struct renesas_i3c_xfer *xfer;
xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;

View file

@ -1504,7 +1504,7 @@ svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
{
struct svc_i3c_xfer *xfer;
xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL);
xfer = kzalloc_flex(*xfer, cmds, ncmds);
if (!xfer)
return NULL;

View file

@ -60,7 +60,7 @@ static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
return buf;
}
buf = kzalloc_flex(*buf, scan_mask, mask_longs, GFP_KERNEL);
buf = kzalloc_flex(*buf, scan_mask, mask_longs);
if (!buf)
return NULL;

View file

@ -4050,7 +4050,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc_flex(*work, path, paths, GFP_KERNEL);
work = kmalloc_flex(*work, path, paths);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;

View file

@ -823,7 +823,7 @@ static int mcast_add_one(struct ib_device *device)
int i;
int count = 0;
dev = kmalloc_flex(*dev, port, device->phys_port_cnt, GFP_KERNEL);
dev = kmalloc_flex(*dev, port, device->phys_port_cnt);
if (!dev)
return -ENOMEM;

View file

@ -3205,7 +3205,7 @@ struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
{
struct rdma_hw_stats *stats;
stats = kzalloc_flex(*stats, value, num_counters, GFP_KERNEL);
stats = kzalloc_flex(*stats, value, num_counters);
if (!stats)
return NULL;

Some files were not shown because too many files have changed in this diff Show more