Convert more 'alloc_obj' cases to default GFP_KERNEL arguments

This converts some of the visually simpler cases that have been split
over multiple lines.  I only did the ones that are easy to verify the
resulting diff by having just that final GFP_KERNEL argument on the next
line.

Somebody should probably do a proper coccinelle script for this, but for
me the trivial script actually resulted in an assertion failure in the
middle of the script.  I probably had made it a bit _too_ trivial.

So after fighting that far a while I decided to just do some of the
syntactically simpler cases with variations of the previous 'sed'
scripts.

The more syntactically complex multi-line cases would mostly really want
whitespace cleanup anyway.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2026-02-21 20:03:00 -08:00
parent 323bbfcf1e
commit 32a92f8c89
826 changed files with 1211 additions and 2422 deletions

View file

@ -294,8 +294,7 @@ static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
{
int i;
omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size,
GFP_KERNEL);
omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size);
if (!omap_mcbsp_devices) {
printk(KERN_ERR "Could not register McBSP devices\n");
return;

View file

@ -81,8 +81,7 @@ static int __init __vdso_init(enum vdso_abi abi)
vdso_info[abi].vdso_code_start) >>
PAGE_SHIFT;
vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages,
GFP_KERNEL);
vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages);
if (vdso_pagelist == NULL)
return -ENOMEM;

View file

@ -52,8 +52,7 @@ static int __init init_vdso(void)
vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
vdso_info.code_mapping.pages =
kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE,
GFP_KERNEL);
kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE);
if (!vdso_info.code_mapping.pages)
return -ENOMEM;

View file

@ -1738,8 +1738,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
vcpu->arch.book3s = vcpu_book3s;
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu,
GFP_KERNEL);
vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu);
if (!vcpu->arch.shadow_vcpu)
goto free_vcpu3s;
#endif

View file

@ -96,8 +96,7 @@ static int hash__init_new_context(struct mm_struct *mm)
{
int index;
mm->context.hash_context = kmalloc_obj(struct hash_mm_context,
GFP_KERNEL);
mm->context.hash_context = kmalloc_obj(struct hash_mm_context);
if (!mm->context.hash_context)
return -ENOMEM;
@ -124,8 +123,7 @@ static int hash__init_new_context(struct mm_struct *mm)
#ifdef CONFIG_PPC_SUBPAGE_PROT
/* inherit subpage prot details if we have one. */
if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table,
GFP_KERNEL);
mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table);
if (!mm->context.hash_context->spt) {
kfree(mm->context.hash_context);
return -ENOMEM;

View file

@ -917,8 +917,7 @@ static int create_events_from_catalog(struct attribute ***events_,
goto e_event_attrs;
}
event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1,
GFP_KERNEL);
event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1);
if (!event_long_descs) {
ret = -ENOMEM;
goto e_event_descs;

View file

@ -1527,8 +1527,7 @@ static int init_nest_pmu_ref(void)
{
int nid, i, cpu;
nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes(),
GFP_KERNEL);
nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes());
if (!nest_imc_refc)
return -ENOMEM;
@ -1714,14 +1713,12 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
goto err;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores,
GFP_KERNEL);
pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores);
if (!pmu_ptr->mem_info)
goto err;
core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores,
GFP_KERNEL);
core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores);
if (!core_imc_refc) {
kfree(pmu_ptr->mem_info);
@ -1754,8 +1751,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
return -ENOMEM;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores,
GFP_KERNEL);
trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores);
if (!trace_imc_refc)
return -ENOMEM;

View file

@ -1336,8 +1336,7 @@ static int __init pnv_parse_cpuidle_dt(void)
nr_idle_states = of_property_count_u32_elems(np,
"ibm,cpu-idle-state-flags");
pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states,
GFP_KERNEL);
pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states);
temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);

View file

@ -133,8 +133,7 @@ static int memtrace_init_regions_runtime(u64 size)
u32 nid;
u64 m;
memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes(),
GFP_KERNEL);
memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes());
if (!memtrace_array) {
pr_err("Failed to allocate memtrace_array\n");
return -EINVAL;

View file

@ -108,8 +108,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
nr_chips))
goto error;
pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1,
GFP_KERNEL);
pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1);
if (!pmu_ptr->mem_info)
goto error;

View file

@ -181,13 +181,11 @@ void __init opal_powercap_init(void)
has_cur = true;
}
pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j,
GFP_KERNEL);
pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j);
if (!pcaps[i].pattrs)
goto out_pcaps_pattrs;
pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1,
GFP_KERNEL);
pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1);
if (!pcaps[i].pg.attrs) {
kfree(pcaps[i].pattrs);
goto out_pcaps_pattrs;

View file

@ -132,8 +132,7 @@ void __init opal_psr_init(void)
return;
}
psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr),
GFP_KERNEL);
psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr));
if (!psr_attrs)
goto out_put_psr;

View file

@ -190,13 +190,11 @@ void __init opal_sensor_groups_init(void)
if (!nr_attrs)
continue;
sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs,
GFP_KERNEL);
sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs);
if (!sgs[i].sgattrs)
goto out_sgs_sgattrs;
sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1,
GFP_KERNEL);
sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1);
if (!sgs[i].sg.attrs) {
kfree(sgs[i].sgattrs);

View file

@ -1639,8 +1639,7 @@ void __init mpic_init(struct mpic *mpic)
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources,
GFP_KERNEL);
mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources);
BUG_ON(mpic->save_data == NULL);
#endif

View file

@ -188,8 +188,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
dev_info(&dev->dev, "Found %d message registers\n",
mpic_msgr_count);
mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count,
GFP_KERNEL);
mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count);
if (!mpic_msgrs) {
dev_err(&dev->dev,
"No memory for message register blocks\n");

View file

@ -55,8 +55,7 @@ static void __init __vdso_init(struct __vdso_info *vdso_info)
vdso_info->vdso_code_start) >>
PAGE_SHIFT;
vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages,
GFP_KERNEL);
vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages);
if (vdso_pagelist == NULL)
panic("vDSO kcalloc failed!\n");

View file

@ -126,8 +126,7 @@ int kvm_s390_pci_aen_init(u8 nisc)
return -EPERM;
mutex_lock(&aift->aift_lock);
aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES,
GFP_KERNEL);
aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES);
if (!aift->kzdev) {
rc = -ENOMEM;
goto unlock;

View file

@ -1073,8 +1073,7 @@ static int zpci_mem_init(void)
if (!zdev_fmb_cache)
goto error_fmb;
zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES,
GFP_KERNEL);
zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES);
if (!zpci_iomap_start)
goto error_iomap;

View file

@ -558,8 +558,7 @@ static int __init sh7786_pcie_init(void)
if (unlikely(nr_ports == 0))
return -ENODEV;
sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports,
GFP_KERNEL);
sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports);
if (unlikely(!sh7786_pcie_ports))
return -ENOMEM;

View file

@ -297,8 +297,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
unsigned long hv_err;
int i;
hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings,
GFP_KERNEL);
hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.\n");

View file

@ -647,8 +647,7 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1,
GFP_KERNEL);
kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
@ -658,8 +657,7 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1,
GFP_KERNEL);
kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;

View file

@ -544,11 +544,9 @@ static struct vector_queue *create_queue(
result->max_iov_frags = num_extra_frags;
for (i = 0; i < max_size; i++) {
if (vp->header_size > 0)
iov = kmalloc_objs(struct iovec, 3 + num_extra_frags,
GFP_KERNEL);
iov = kmalloc_objs(struct iovec, 3 + num_extra_frags);
else
iov = kmalloc_objs(struct iovec, 2 + num_extra_frags,
GFP_KERNEL);
iov = kmalloc_objs(struct iovec, 2 + num_extra_frags);
if (iov == NULL)
goto out_fail;
mmsg_vector->msg_hdr.msg_iov = iov;

View file

@ -3753,8 +3753,7 @@ static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
goto err;
for (die = 0; die < uncore_max_dies(); die++) {
topology[die] = kzalloc_objs(**topology, type->num_boxes,
GFP_KERNEL);
topology[die] = kzalloc_objs(**topology, type->num_boxes);
if (!topology[die])
goto clear;
for (idx = 0; idx < type->num_boxes; idx++) {

View file

@ -467,8 +467,7 @@ void __init hyperv_init(void)
if (hv_isolation_type_tdx())
hv_vp_assist_page = NULL;
else
hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids,
GFP_KERNEL);
hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids);
if (!hv_vp_assist_page) {
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;

View file

@ -68,8 +68,7 @@ static int amd_cache_northbridges(void)
amd_northbridges.num = amd_num_nodes();
nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num,
GFP_KERNEL);
nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num);
if (!nb)
return -ENOMEM;

View file

@ -798,8 +798,7 @@ static bool __init sgx_page_cache_init(void)
int nid;
int i;
sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes(),
GFP_KERNEL);
sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes());
if (!sgx_numa_nodes)
return false;

View file

@ -1991,8 +1991,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
if (sanity_check_entries(entries, cpuid->nent, type))
return -EINVAL;
array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent,
GFP_KERNEL);
array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent);
if (!array.entries)
return -ENOMEM;

View file

@ -2218,8 +2218,7 @@ static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
if (nr_user_entries < td_conf->num_cpuid_config)
return -E2BIG;
caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config,
GFP_KERNEL);
caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config);
if (!caps)
return -ENOMEM;

View file

@ -546,8 +546,7 @@ static int blk_crypto_fallback_init(void)
goto out;
/* Dynamic allocation is needed because of lockdep_register_key(). */
blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile,
GFP_KERNEL);
blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile);
if (!blk_crypto_fallback_profile) {
err = -ENOMEM;
goto fail_free_bioset;

View file

@ -120,8 +120,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
profile->log_slot_ht_size = ilog2(slot_hashtable_size);
profile->slot_hashtable =
kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size,
GFP_KERNEL);
kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size);
if (!profile->slot_hashtable)
goto err_destroy;
for (i = 0; i < slot_hashtable_size; i++)

View file

@ -210,8 +210,7 @@ static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages,
GFP_KERNEL);
mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages);
if (!mapp->range.hmm_pfns) {
ret = -ENOMEM;
goto free_map;

View file

@ -144,8 +144,7 @@ static int ethosu_open(struct drm_device *ddev, struct drm_file *file)
if (!try_module_get(THIS_MODULE))
return -EINVAL;
struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv,
GFP_KERNEL);
struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv);
if (!priv) {
ret = -ENOMEM;
goto err_put_mod;

View file

@ -352,8 +352,7 @@ static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
struct ethosu_gem_object *bo,
u32 size)
{
struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info,
GFP_KERNEL);
struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info);
struct ethosu_device *edev = to_ethosu_device(ddev);
u32 *bocmds = bo->base.vaddr;
struct cmd_state st;

View file

@ -1422,8 +1422,7 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks, GFP_ATOMIC);
if (!*cs_chunk_array)
*cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks,
GFP_KERNEL);
*cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks);
if (!*cs_chunk_array) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);

View file

@ -2052,8 +2052,7 @@ int hl_debugfs_device_init(struct hl_device *hdev)
int count = ARRAY_SIZE(hl_debugfs_list);
dev_entry->hdev = hdev;
dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count,
GFP_KERNEL);
dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count);
if (!dev_entry->entry_arr)
return -ENOMEM;

View file

@ -837,8 +837,7 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
q->kernel_address = p;
q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH,
GFP_KERNEL);
q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH);
if (!q->shadow_queue) {
dev_err(hdev->dev,
"Failed to allocate shadow queue for H/W queue %d\n",

View file

@ -843,8 +843,7 @@ int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 h
return -ENOMEM;
}
hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid,
GFP_KERNEL);
hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid);
if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
rc = -ENOMEM;

View file

@ -312,8 +312,7 @@ int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
int i, j;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
GFP_KERNEL);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
@ -392,8 +391,7 @@ int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
int i, j, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
GFP_KERNEL);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
@ -474,8 +472,7 @@ int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
int i, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
GFP_KERNEL);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
@ -521,8 +518,7 @@ int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
int i;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
GFP_KERNEL);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;

View file

@ -2620,8 +2620,7 @@ static int gaudi2_init_pb_tpc(struct hl_device *hdev)
block_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size,
GFP_KERNEL);
glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size);
if (!glbl_sec)
return -ENOMEM;

View file

@ -457,8 +457,7 @@ static struct ssr_dump_info *alloc_dump_info(struct qaic_device *qdev,
}
/* Buffer used to send MEMORY READ request to device via MHI */
dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req,
GFP_KERNEL);
dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req);
if (!dump_info->read_buf_req) {
ret = -ENOMEM;
goto free_dump_info;

View file

@ -1021,8 +1021,7 @@ static bool setup_einjv2_component_files(void)
{
char name[32];
syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components,
GFP_KERNEL);
syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components);
if (!syndrome_data)
return false;

View file

@ -636,8 +636,7 @@ static int pcc_data_alloc(int pcc_ss_id)
if (pcc_data[pcc_ss_id]) {
pcc_data[pcc_ss_id]->refcount++;
} else {
pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data,
GFP_KERNEL);
pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data);
if (!pcc_data[pcc_ss_id])
return -ENOMEM;
pcc_data[pcc_ss_id]->refcount++;

View file

@ -91,8 +91,7 @@ static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context)
return AE_OK;
}
conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1,
GFP_KERNEL);
conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1);
if (!conn)
return AE_OK;

View file

@ -560,8 +560,7 @@ struct device *platform_profile_register(struct device *dev, const char *name,
!ops->profile_set || !ops->probe))
return ERR_PTR(-EINVAL);
struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof,
GFP_KERNEL);
struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof);
if (!pprof)
return ERR_PTR(-ENOMEM);

View file

@ -341,8 +341,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
pr->performance->state_count = pss->package.count;
pr->performance->states =
kmalloc_objs(struct acpi_processor_px, pss->package.count,
GFP_KERNEL);
kmalloc_objs(struct acpi_processor_px, pss->package.count);
if (!pr->performance->states) {
result = -ENOMEM;
goto end;

View file

@ -512,8 +512,7 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count,
GFP_KERNEL);
kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;

View file

@ -384,8 +384,7 @@ static u32 riscv_acpi_add_irq_dep(acpi_handle handle)
riscv_acpi_irq_get_dep(handle, i, &gsi_handle);
i++) {
dep_devices.count = 1;
dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1,
GFP_KERNEL);
dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1);
if (!dep_devices.handles) {
acpi_handle_err(handle, "failed to allocate memory\n");
continue;

View file

@ -757,8 +757,7 @@ int acpi_device_add(struct acpi_device *device)
if (result)
goto err_unlock;
} else {
acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id,
GFP_KERNEL);
acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id);
if (!acpi_device_bus_id) {
result = -ENOMEM;
goto err_unlock;

View file

@ -594,8 +594,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* We cannot use devm_ here, since ahci_platform_put_resources() uses
* target_pwrs after devm_ have freed memory
*/
hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports,
GFP_KERNEL);
hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports);
if (!hpriv->target_pwrs) {
rc = -ENOMEM;
goto err_out;

View file

@ -786,8 +786,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
}
/* rbpl_virt 64-bit pointers */
he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE,
GFP_KERNEL);
he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE);
if (!he_dev->rbpl_virt) {
hprintk("unable to allocate rbpl virt table\n");
goto out_free_rbpl_table;

View file

@ -1978,8 +1978,7 @@ static int tx_init(struct atm_dev *dev)
buf_desc_ptr++;
tx_pkt_start += iadev->tx_buf_sz;
}
iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc,
GFP_KERNEL);
iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc);
if (!iadev->tx_buf) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_dle;
@ -1999,8 +1998,7 @@ static int tx_init(struct atm_dev *dev)
sizeof(*cpcs),
DMA_TO_DEVICE);
}
iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc,
GFP_KERNEL);
iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc);
if (!iadev->desc_tbl) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_all_tx_bufs;
@ -2128,8 +2126,7 @@ static int tx_init(struct atm_dev *dev)
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc,
GFP_KERNEL);
iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc);
if (!iadev->testTable) {
printk("Get freepage failed\n");
goto err_free_desc_tbl;
@ -2138,8 +2135,7 @@ static int tx_init(struct atm_dev *dev)
{
memset((caddr_t)vc, 0, sizeof(*vc));
memset((caddr_t)evc, 0, sizeof(*evc));
iadev->testTable[i] = kmalloc_obj(struct testTable_t,
GFP_KERNEL);
iadev->testTable[i] = kmalloc_obj(struct testTable_t);
if (!iadev->testTable[i])
goto err_free_test_tables;
iadev->testTable[i]->lastTime = 0;

View file

@ -21,8 +21,7 @@ bool dev_add_physical_location(struct device *dev)
if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location = kzalloc_obj(*dev->physical_location,
GFP_KERNEL);
dev->physical_location = kzalloc_obj(*dev->physical_location);
if (!dev->physical_location) {
ACPI_FREE(pld);
return false;

View file

@ -3932,8 +3932,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
if (my_usize != p_usize) {
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
new_disk_conf = kzalloc_obj(struct disk_conf,
GFP_KERNEL);
new_disk_conf = kzalloc_obj(struct disk_conf);
if (!new_disk_conf) {
put_ldev(device);
return -ENOMEM;

View file

@ -401,8 +401,7 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT,
GFP_KERNEL);
priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT);
if (!priv->cache.tags)
return -ENOMEM;

View file

@ -846,8 +846,7 @@ again:
* We are using persistent grants, the grant is
* not mapped but we might have room for it.
*/
persistent_gnt = kmalloc_obj(struct persistent_gnt,
GFP_KERNEL);
persistent_gnt = kmalloc_obj(struct persistent_gnt);
if (!persistent_gnt) {
/*
* If we don't have enough memory to

View file

@ -131,8 +131,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
{
unsigned int r;
blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings,
GFP_KERNEL);
blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings);
if (!blkif->rings)
return -ENOMEM;
@ -1014,14 +1013,12 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
goto fail;
list_add_tail(&req->free_list, &ring->pending_free);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
req->segments[j] = kzalloc_obj(*req->segments[0],
GFP_KERNEL);
req->segments[j] = kzalloc_obj(*req->segments[0]);
if (!req->segments[j])
goto fail;
}
for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0],
GFP_KERNEL);
req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0]);
if (!req->indirect_pages[j])
goto fail;
}

View file

@ -2207,8 +2207,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
for (i = 0; i < BLK_RING_SIZE(info); i++) {
rinfo->shadow[i].grants_used =
kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants,
GFP_KERNEL);
kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants);
rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0],
psegs, GFP_KERNEL);
if (info->max_indirect_segments)

View file

@ -187,8 +187,7 @@ static int z2_open(struct gendisk *disk, blk_mode_t mode)
(unsigned long)z_remap_nocache_nonser(paddr, size);
#endif
z2ram_map =
kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE,
GFP_KERNEL);
kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE);
if (z2ram_map == NULL) {
printk(KERN_ERR DEVICE_NAME
": cannot get mem for z2ram_map\n");

View file

@ -1459,8 +1459,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
if (ret)
return ret;
mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS,
GFP_KERNEL);
mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_ch;

View file

@ -380,8 +380,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
return -ENOMEM;
/* Allocate memory for entries */
img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments,
GFP_KERNEL);
img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;

View file

@ -699,8 +699,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
num = config->num_events;
mhi_cntrl->total_ev_rings = num;
mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num,
GFP_KERNEL);
mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
@ -938,8 +937,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
if (ret)
return -EINVAL;
mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS,
GFP_KERNEL);
mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_event;

View file

@ -96,8 +96,7 @@ static int serverworks_create_gatt_pages(int nr_tables)
int retval = 0;
int i;
tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1,
GFP_KERNEL);
tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1);
if (tables == NULL)
return -ENOMEM;

View file

@ -404,8 +404,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
if (table == NULL)
return -ENOMEM;
uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order,
GFP_KERNEL);
uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order);
if (uninorth_priv.pages_arr == NULL)
goto enomem;

View file

@ -1812,8 +1812,7 @@ static int init_vqs(struct ports_device *portdev)
vqs = kmalloc_objs(struct virtqueue *, nr_queues);
vqs_info = kzalloc_objs(*vqs_info, nr_queues);
portdev->in_vqs = kmalloc_objs(struct virtqueue *, nr_ports);
portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports,
GFP_KERNEL);
portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports);
if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) {
err = -ENOMEM;
goto free;

View file

@ -698,8 +698,7 @@ static void __init aspeed_cc_init(struct device_node *np)
if (!scu_base)
return;
aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS,
GFP_KERNEL);
aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS);
if (!aspeed_clk_data)
return;
aspeed_clk_data->num = ASPEED_NUM_CLKS;

View file

@ -192,8 +192,7 @@ void __init iproc_asiu_setup(struct device_node *node,
if (WARN_ON(!asiu))
return;
asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks,
GFP_KERNEL);
asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks);
if (WARN_ON(!asiu->clk_data))
goto err_clks;
asiu->clk_data->num = num_clks;

View file

@ -398,8 +398,7 @@ static void __init gemini_cc_init(struct device_node *np)
int ret;
int i;
gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS,
GFP_KERNEL);
gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS);
if (!gemini_clk_data)
return;
gemini_clk_data->num = GEMINI_NUM_CLKS;

View file

@ -611,8 +611,7 @@ static void __init m10v_cc_init(struct device_node *np)
const char *parent_name;
struct clk_hw *hw;
m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS,
GFP_KERNEL);
m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS);
if (!m10v_clk_data)
return;

View file

@ -1855,8 +1855,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
stm32fx_end_primary_clk = data->end_primary;
clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk,
GFP_KERNEL);
clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk);
if (!clks)
goto fail;

View file

@ -439,8 +439,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
void __iomem *anatop_base, *base;
int ret;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -185,8 +185,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
void __iomem *base;
int ret;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -81,8 +81,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
struct device_node *np;
void __iomem *base;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -123,8 +123,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
void __iomem *base;
bool lcdif1_assigned_clk;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -130,8 +130,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
struct device_node *np;
void __iomem *base;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;

View file

@ -49,8 +49,7 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END,
GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END);
if (!clk_data)
return;
@ -138,8 +137,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END,
GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END);
if (!clk_data)
return;
@ -186,8 +184,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END,
GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END);
if (!clk_data)
return;
@ -233,8 +230,7 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END,
GFP_KERNEL);
clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END);
if (!clk_data)
return;

View file

@ -303,8 +303,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
void __iomem *base;
int ret;
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END,
GFP_KERNEL);
clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END);
if (WARN_ON(!clk_hw_data))
return -ENOMEM;

View file

@ -819,8 +819,7 @@ int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
unsigned i;
int err;
cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num,
GFP_KERNEL);
cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num);
if (!cgu->clocks.clks) {
err = -ENOMEM;
goto err_out;

View file

@ -124,8 +124,7 @@ void __init mvebu_coreclk_setup(struct device_node *np,
if (desc->get_refclk_freq)
clk_data.clk_num += 1;
clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num,
GFP_KERNEL);
clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num);
if (WARN_ON(!clk_data.clks)) {
iounmap(base);
return;

View file

@ -612,8 +612,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
} else
clk_data->clk_num = data->outputs_nb;
clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num,
GFP_KERNEL);
clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num);
if (!clk_data->clks)
goto err;

View file

@ -788,8 +788,7 @@ static void __init clkgen_c32_pll_setup(struct device_node *np,
return;
clk_data->clk_num = num_odfs;
clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num,
GFP_KERNEL);
clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num);
if (!clk_data->clks)
goto err;

View file

@ -227,8 +227,7 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
return NULL;
periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks,
GFP_KERNEL);
periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks);
if (!periph_clk_enb_refcnt)
return NULL;

View file

@ -222,8 +222,7 @@ struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
void __iomem *base, int irq, unsigned long freq)
{
struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced,
GFP_KERNEL);
struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced);
int err;
if (!dw_ced)

View file

@ -1084,8 +1084,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels,
GFP_KERNEL);
cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels);
if (cmt->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;

View file

@ -420,8 +420,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
mtu->num_channels = min_t(unsigned int, ret,
ARRAY_SIZE(sh_mtu2_channel_offsets));
mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels,
GFP_KERNEL);
mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels);
if (mtu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;

View file

@ -546,8 +546,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
}
/* Allocate and setup the channels. */
tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels,
GFP_KERNEL);
tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels);
if (tmu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;

View file

@ -198,8 +198,7 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
if (s->n_chan == 32) {
const struct comedi_lrange **range_table_list;
range_table_list = kmalloc_objs(*range_table_list, 32,
GFP_KERNEL);
range_table_list = kmalloc_objs(*range_table_list, 32);
if (!range_table_list)
return -ENOMEM;
s->range_table_list = range_table_list;

View file

@ -429,8 +429,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
if (nsp->sk) {
sk = nsp->sk;
if (sk->sk_user_data == NULL) {
sk->sk_user_data = kzalloc_obj(struct proc_input,
GFP_KERNEL);
sk->sk_user_data = kzalloc_obj(struct proc_input);
if (sk->sk_user_data == NULL) {
err = ENOMEM;
goto out;

View file

@ -798,8 +798,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
freq_table = kzalloc_objs(*freq_table, perf->state_count + 1,
GFP_KERNEL);
freq_table = kzalloc_objs(*freq_table, perf->state_count + 1);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;

View file

@ -467,8 +467,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -EINVAL;
}
armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state,
GFP_KERNEL);
armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state);
if (!armada37xx_cpufreq_state) {
clk_put(clk);
return -ENOMEM;

View file

@ -475,8 +475,7 @@ static int longhaul_get_ranges(void)
return -EINVAL;
}
longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1,
GFP_KERNEL);
longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1);
if (!longhaul_table)
return -ENOMEM;

View file

@ -323,8 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS,
GFP_KERNEL);
us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS);
if (!us2e_freq_table)
return -ENOMEM;

View file

@ -171,8 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS,
GFP_KERNEL);
us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS);
if (!us3_freq_table)
return -ENOMEM;

View file

@ -173,8 +173,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
if (!dev->pdr)
return -ENOMEM;
dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD,
GFP_KERNEL);
dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD);
if (!dev->pdr_uinfo) {
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,

View file

@ -181,8 +181,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
struct nitrox_vfdev *vfdev;
int i;
ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs,
GFP_KERNEL);
ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs);
if (!ndev->iov.vfdev)
return -ENOMEM;

View file

@ -5771,8 +5771,7 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
if (!qm->qp_array)
return -ENOMEM;
qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num,
GFP_KERNEL);
qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num);
if (!qm->poll_data) {
kfree(qm->qp_array);
return -ENOMEM;
@ -5837,8 +5836,7 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func,
GFP_KERNEL);
qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func);
if (!qm->factor)
return -ENOMEM;

View file

@ -672,8 +672,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num,
GFP_KERNEL);
ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num);
if (!ctx->qp_ctx) {
ret = -ENOMEM;
goto err_destroy_qps;

View file

@ -479,8 +479,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
}
spin_lock_init(&req_q->req_lock);
req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size,
GFP_KERNEL);
req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size);
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)

View file

@ -83,8 +83,7 @@ static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
if (unlikely(!ae_count))
return ERR_PTR(-EINVAL);
fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count,
GFP_KERNEL);
fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count);
if (!fw_counters)
return ERR_PTR(-ENOMEM);

View file

@ -173,8 +173,7 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
goto err_del_cfg;
/* Allocate memory for VF info structs */
accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs,
GFP_KERNEL);
accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs);
ret = -ENOMEM;
if (!accel_dev->pf.vf_info)
goto err_del_cfg;

View file

@ -340,8 +340,7 @@ static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
if (!cptpf->flr_wq)
return -ENOMEM;
cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs,
GFP_KERNEL);
cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs);
if (!cptpf->flr_work)
goto destroy_wq;

Some files were not shown because too many files have changed in this diff Show more