Convert remaining multi-line kmalloc_obj/flex GFP_KERNEL uses

Conversion performed via this Coccinelle script:

  // SPDX-License-Identifier: GPL-2.0-only
  // Options: --include-headers-for-types --all-includes --include-headers --keep-comments
  virtual patch

  @gfp depends on patch && !(file in "tools") && !(file in "samples")@
  identifier ALLOC = {kmalloc_obj,kmalloc_objs,kmalloc_flex,
 		    kzalloc_obj,kzalloc_objs,kzalloc_flex,
		    kvmalloc_obj,kvmalloc_objs,kvmalloc_flex,
		    kvzalloc_obj,kvzalloc_objs,kvzalloc_flex};
  @@

  	ALLOC(...
  -		, GFP_KERNEL
  	)

  $ make coccicheck MODE=patch COCCI=gfp.cocci

Build and boot tested x86_64 with Fedora 42's GCC and Clang:

Linux version 6.19.0+ (user@host) (gcc (GCC) 15.2.1 20260123 (Red Hat 15.2.1-7), GNU ld version 2.44-12.fc42) #1 SMP PREEMPT_DYNAMIC 1970-01-01
Linux version 6.19.0+ (user@host) (clang version 20.1.8 (Fedora 20.1.8-4.fc42), LLD 20.1.8) #1 SMP PREEMPT_DYNAMIC 1970-01-01

Signed-off-by: Kees Cook <kees@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kees Cook 2026-02-21 23:46:04 -08:00 committed by Linus Torvalds
parent 32a92f8c89
commit 189f164e57
415 changed files with 612 additions and 833 deletions

View file

@ -764,7 +764,7 @@ static int coverage_start_fn(const struct decode_header *h, void *args)
static int coverage_start(const union decode_item *table) static int coverage_start(const union decode_item *table)
{ {
coverage.base = kmalloc_objs(struct coverage_entry, coverage.base = kmalloc_objs(struct coverage_entry,
MAX_COVERAGE_ENTRIES, GFP_KERNEL); MAX_COVERAGE_ENTRIES);
coverage.num_entries = 0; coverage.num_entries = 0;
coverage.nesting = 0; coverage.nesting = 0;
return table_iter(table, coverage_start_fn, &coverage); return table_iter(table, coverage_start_fn, &coverage);

View file

@ -342,7 +342,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* Allocate cluster boot configuration structs */ /* Allocate cluster boot configuration structs */
nclusters = mips_cps_numclusters(); nclusters = mips_cps_numclusters();
mips_cps_cluster_bootcfg = kzalloc_objs(*mips_cps_cluster_bootcfg, mips_cps_cluster_bootcfg = kzalloc_objs(*mips_cps_cluster_bootcfg,
nclusters, GFP_KERNEL); nclusters);
if (!mips_cps_cluster_bootcfg) if (!mips_cps_cluster_bootcfg)
goto err_out; goto err_out;
@ -368,8 +368,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
int v; int v;
core_vpes = core_vpe_count(cl, c); core_vpes = core_vpe_count(cl, c);
core_bootcfg[c].vpe_config = kzalloc_objs(*core_bootcfg[c].vpe_config, core_bootcfg[c].vpe_config = kzalloc_objs(*core_bootcfg[c].vpe_config,
core_vpes, core_vpes);
GFP_KERNEL);
for (v = 0; v < core_vpes; v++) for (v = 0; v < core_vpes; v++)
cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask); cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask);
if (!core_bootcfg[c].vpe_config) if (!core_bootcfg[c].vpe_config)

View file

@ -913,8 +913,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_params[1].sets = 1; vcpu_e500->gtlb_params[1].sets = 1;
vcpu_e500->gtlb_arch = kmalloc_objs(*vcpu_e500->gtlb_arch, vcpu_e500->gtlb_arch = kmalloc_objs(*vcpu_e500->gtlb_arch,
KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE, KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE);
GFP_KERNEL);
if (!vcpu_e500->gtlb_arch) if (!vcpu_e500->gtlb_arch)
return -ENOMEM; return -ENOMEM;
@ -922,14 +921,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref, vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref,
vcpu_e500->gtlb_params[0].entries, vcpu_e500->gtlb_params[0].entries);
GFP_KERNEL);
if (!vcpu_e500->gtlb_priv[0]) if (!vcpu_e500->gtlb_priv[0])
goto free_vcpu; goto free_vcpu;
vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref, vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref,
vcpu_e500->gtlb_params[1].entries, vcpu_e500->gtlb_params[1].entries);
GFP_KERNEL);
if (!vcpu_e500->gtlb_priv[1]) if (!vcpu_e500->gtlb_priv[1])
goto free_vcpu; goto free_vcpu;

View file

@ -1699,8 +1699,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
/* Needed for hotplug/migration */ /* Needed for hotplug/migration */
if (!per_nest_pmu_arr) { if (!per_nest_pmu_arr) {
per_nest_pmu_arr = kzalloc_objs(struct imc_pmu *, per_nest_pmu_arr = kzalloc_objs(struct imc_pmu *,
get_max_nest_dev() + 1, get_max_nest_dev() + 1);
GFP_KERNEL);
if (!per_nest_pmu_arr) if (!per_nest_pmu_arr)
goto err; goto err;
} }

View file

@ -2282,7 +2282,7 @@ static int spufs_switch_log_open(struct inode *inode, struct file *file)
} }
ctx->switch_log = kmalloc_flex(*ctx->switch_log, log, ctx->switch_log = kmalloc_flex(*ctx->switch_log, log,
SWITCH_LOG_BUFSIZE, GFP_KERNEL); SWITCH_LOG_BUFSIZE);
if (!ctx->switch_log) { if (!ctx->switch_log) {
rc = -ENOMEM; rc = -ENOMEM;

View file

@ -266,7 +266,7 @@ int __init opal_async_comp_init(void)
opal_max_async_tokens = be32_to_cpup(async); opal_max_async_tokens = be32_to_cpup(async);
opal_async_tokens = kzalloc_objs(*opal_async_tokens, opal_async_tokens = kzalloc_objs(*opal_async_tokens,
opal_max_async_tokens, GFP_KERNEL); opal_max_async_tokens);
if (!opal_async_tokens) { if (!opal_async_tokens) {
err = -ENOMEM; err = -ENOMEM;
goto out_opal_node; goto out_opal_node;

View file

@ -314,8 +314,7 @@ retry:
/* Allocate the groups before registering */ /* Allocate the groups before registering */
for (idx = 0; idx < num_attrs; idx++) { for (idx = 0; idx < num_attrs; idx++) {
papr_groups[idx].pg.attrs = kzalloc_objs(*papr_groups[idx].pg.attrs, papr_groups[idx].pg.attrs = kzalloc_objs(*papr_groups[idx].pg.attrs,
KOBJ_MAX_ATTRS + 1, KOBJ_MAX_ATTRS + 1);
GFP_KERNEL);
if (!papr_groups[idx].pg.attrs) if (!papr_groups[idx].pg.attrs)
goto out_pgattrs; goto out_pgattrs;

View file

@ -754,7 +754,7 @@ initialize_relocation_hashtable(unsigned int num_relocations,
/* Number of relocations may be large, so kvmalloc it */ /* Number of relocations may be large, so kvmalloc it */
*relocation_hashtable = kvmalloc_objs(**relocation_hashtable, *relocation_hashtable = kvmalloc_objs(**relocation_hashtable,
hashtable_size, GFP_KERNEL); hashtable_size);
if (!*relocation_hashtable) if (!*relocation_hashtable)
return 0; return 0;

View file

@ -353,7 +353,7 @@ static int kvm_sbi_ext_fwft_init(struct kvm_vcpu *vcpu)
int i; int i;
fwft->configs = kzalloc_objs(struct kvm_sbi_fwft_config, fwft->configs = kzalloc_objs(struct kvm_sbi_fwft_config,
ARRAY_SIZE(features), GFP_KERNEL); ARRAY_SIZE(features));
if (!fwft->configs) if (!fwft->configs)
return -ENOMEM; return -ENOMEM;

View file

@ -282,8 +282,7 @@ static int pai_alloc_cpu(struct perf_event *event, int cpu)
need_paiext_cb = true; need_paiext_cb = true;
} }
cpump->save = kvmalloc_objs(struct pai_userdata, cpump->save = kvmalloc_objs(struct pai_userdata,
pai_pmu[idx].num_avail + 1, pai_pmu[idx].num_avail + 1);
GFP_KERNEL);
if (!cpump->area || !cpump->save || if (!cpump->area || !cpump->save ||
(need_paiext_cb && !cpump->paiext_cb)) { (need_paiext_cb && !cpump->paiext_cb)) {
pai_free(mp); pai_free(mp);

View file

@ -1070,7 +1070,7 @@ static int __init ubd_init(void)
return -1; return -1;
irq_req_buffer = kmalloc_objs(struct io_thread_req *, irq_req_buffer = kmalloc_objs(struct io_thread_req *,
UBD_REQ_BUFFER_SIZE, GFP_KERNEL); UBD_REQ_BUFFER_SIZE);
irq_remainder = 0; irq_remainder = 0;
if (irq_req_buffer == NULL) { if (irq_req_buffer == NULL) {
@ -1078,7 +1078,7 @@ static int __init ubd_init(void)
return -ENOMEM; return -ENOMEM;
} }
io_req_buffer = kmalloc_objs(struct io_thread_req *, io_req_buffer = kmalloc_objs(struct io_thread_req *,
UBD_REQ_BUFFER_SIZE, GFP_KERNEL); UBD_REQ_BUFFER_SIZE);
io_remainder = 0; io_remainder = 0;

View file

@ -515,7 +515,7 @@ static void uml_vfio_open_device(struct uml_vfio_device *dev)
} }
dev->intr_ctx = kmalloc_objs(struct uml_vfio_intr_ctx, dev->intr_ctx = kmalloc_objs(struct uml_vfio_intr_ctx,
dev->udev.irq_count, GFP_KERNEL); dev->udev.irq_count);
if (!dev->intr_ctx) { if (!dev->intr_ctx) {
pr_err("Failed to allocate interrupt context (%s)\n", pr_err("Failed to allocate interrupt context (%s)\n",
dev->name); dev->name);

View file

@ -7379,7 +7379,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
x86_pmu.hybrid_pmu = kzalloc_objs(struct x86_hybrid_pmu, x86_pmu.hybrid_pmu = kzalloc_objs(struct x86_hybrid_pmu,
x86_pmu.num_hybrid_pmus, GFP_KERNEL); x86_pmu.num_hybrid_pmus);
if (!x86_pmu.hybrid_pmu) if (!x86_pmu.hybrid_pmu)
return -ENOMEM; return -ENOMEM;

View file

@ -745,8 +745,7 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra
int i = 0; int i = 0;
uncores = kzalloc_objs(struct intel_uncore_type *, uncores = kzalloc_objs(struct intel_uncore_type *,
num_discovered_types[type_id] + num_extra + 1, num_discovered_types[type_id] + num_extra + 1);
GFP_KERNEL);
if (!uncores) if (!uncores)
return empty_uncore; return empty_uncore;

View file

@ -2407,7 +2407,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
/* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */ /* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */
kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1; kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
tdcs_pages = kzalloc_objs(*kvm_tdx->td.tdcs_pages, tdcs_pages = kzalloc_objs(*kvm_tdx->td.tdcs_pages,
kvm_tdx->td.tdcs_nr_pages, GFP_KERNEL); kvm_tdx->td.tdcs_nr_pages);
if (!tdcs_pages) if (!tdcs_pages)
goto free_tdr; goto free_tdr;

View file

@ -574,7 +574,7 @@ static int blk_crypto_fallback_init(void)
goto fail_destroy_profile; goto fail_destroy_profile;
blk_crypto_keyslots = kzalloc_objs(blk_crypto_keyslots[0], blk_crypto_keyslots = kzalloc_objs(blk_crypto_keyslots[0],
blk_crypto_num_keyslots, GFP_KERNEL); blk_crypto_num_keyslots);
if (!blk_crypto_keyslots) if (!blk_crypto_keyslots)
goto fail_free_wq; goto fail_free_wq;

View file

@ -1805,7 +1805,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
disk->zone_wplugs_hash = disk->zone_wplugs_hash =
kzalloc_objs(struct hlist_head, kzalloc_objs(struct hlist_head,
disk_zone_wplugs_hash_size(disk), GFP_KERNEL); disk_zone_wplugs_hash_size(disk));
if (!disk->zone_wplugs_hash) if (!disk->zone_wplugs_hash)
return -ENOMEM; return -ENOMEM;

View file

@ -210,8 +210,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
atomic_set(&ctx->thread_ctx_switch_token, 1); atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0; ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kzalloc_objs(struct hl_fence *, ctx->cs_pending = kzalloc_objs(struct hl_fence *,
hdev->asic_prop.max_pending_cs, hdev->asic_prop.max_pending_cs);
GFP_KERNEL);
if (!ctx->cs_pending) if (!ctx->cs_pending)
return -ENOMEM; return -ENOMEM;

View file

@ -893,8 +893,7 @@ static int device_early_init(struct hl_device *hdev)
if (hdev->asic_prop.completion_queues_count) { if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kzalloc_objs(struct workqueue_struct *, hdev->cq_wq = kzalloc_objs(struct workqueue_struct *,
hdev->asic_prop.completion_queues_count, hdev->asic_prop.completion_queues_count);
GFP_KERNEL);
if (!hdev->cq_wq) { if (!hdev->cq_wq) {
rc = -ENOMEM; rc = -ENOMEM;
goto asid_fini; goto asid_fini;
@ -2159,8 +2158,7 @@ int hl_device_init(struct hl_device *hdev)
if (user_interrupt_cnt) { if (user_interrupt_cnt) {
hdev->user_interrupt = kzalloc_objs(*hdev->user_interrupt, hdev->user_interrupt = kzalloc_objs(*hdev->user_interrupt,
user_interrupt_cnt, user_interrupt_cnt);
GFP_KERNEL);
if (!hdev->user_interrupt) { if (!hdev->user_interrupt) {
rc = -ENOMEM; rc = -ENOMEM;
goto early_fini; goto early_fini;
@ -2227,7 +2225,7 @@ int hl_device_init(struct hl_device *hdev)
*/ */
if (cq_cnt) { if (cq_cnt) {
hdev->completion_queue = kzalloc_objs(*hdev->completion_queue, hdev->completion_queue = kzalloc_objs(*hdev->completion_queue,
cq_cnt, GFP_KERNEL); cq_cnt);
if (!hdev->completion_queue) { if (!hdev->completion_queue) {
dev_err(hdev->dev, dev_err(hdev->dev,
@ -2249,8 +2247,7 @@ int hl_device_init(struct hl_device *hdev)
} }
hdev->shadow_cs_queue = kzalloc_objs(struct hl_cs *, hdev->shadow_cs_queue = kzalloc_objs(struct hl_cs *,
hdev->asic_prop.max_pending_cs, hdev->asic_prop.max_pending_cs);
GFP_KERNEL);
if (!hdev->shadow_cs_queue) { if (!hdev->shadow_cs_queue) {
rc = -ENOMEM; rc = -ENOMEM;
goto cq_fini; goto cq_fini;

View file

@ -1083,7 +1083,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
int i, rc, q_ready_cnt; int i, rc, q_ready_cnt;
hdev->kernel_queues = kzalloc_objs(*hdev->kernel_queues, hdev->kernel_queues = kzalloc_objs(*hdev->kernel_queues,
asic->max_queues, GFP_KERNEL); asic->max_queues);
if (!hdev->kernel_queues) { if (!hdev->kernel_queues) {
dev_err(hdev->dev, "Not enough memory for H/W queues\n"); dev_err(hdev->dev, "Not enough memory for H/W queues\n");

View file

@ -196,7 +196,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sen
} }
channels_info = kzalloc_objs(struct hwmon_channel_info *, channels_info = kzalloc_objs(struct hwmon_channel_info *,
num_active_sensor_types + 1, GFP_KERNEL); num_active_sensor_types + 1);
if (!channels_info) { if (!channels_info) {
rc = -ENOMEM; rc = -ENOMEM;
goto channels_info_array_err; goto channels_info_array_err;

View file

@ -540,7 +540,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->max_queues = GAUDI_QUEUE_ID_SIZE; prop->max_queues = GAUDI_QUEUE_ID_SIZE;
prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties,
prop->max_queues, GFP_KERNEL); prop->max_queues);
if (!prop->hw_queues_props) if (!prop->hw_queues_props)
return -ENOMEM; return -ENOMEM;

View file

@ -2763,7 +2763,7 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->max_queues = GAUDI2_QUEUE_ID_SIZE; prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties,
prop->max_queues, GFP_KERNEL); prop->max_queues);
if (!prop->hw_queues_props) if (!prop->hw_queues_props)
return -ENOMEM; return -ENOMEM;
@ -3944,8 +3944,7 @@ static int gaudi2_special_blocks_config(struct hl_device *hdev)
prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM; prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM;
prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks); prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
prop->special_blocks = kmalloc_objs(*prop->special_blocks, prop->special_blocks = kmalloc_objs(*prop->special_blocks,
prop->num_of_special_blocks, prop->num_of_special_blocks);
GFP_KERNEL);
if (!prop->special_blocks) if (!prop->special_blocks)
return -ENOMEM; return -ENOMEM;
@ -3960,8 +3959,7 @@ static int gaudi2_special_blocks_config(struct hl_device *hdev)
if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) { if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) {
prop->skip_special_blocks_cfg.block_types = prop->skip_special_blocks_cfg.block_types =
kmalloc_objs(gaudi2_iterator_skip_block_types[0], kmalloc_objs(gaudi2_iterator_skip_block_types[0],
ARRAY_SIZE(gaudi2_iterator_skip_block_types), ARRAY_SIZE(gaudi2_iterator_skip_block_types));
GFP_KERNEL);
if (!prop->skip_special_blocks_cfg.block_types) { if (!prop->skip_special_blocks_cfg.block_types) {
rc = -ENOMEM; rc = -ENOMEM;
goto free_special_blocks; goto free_special_blocks;
@ -3977,8 +3975,7 @@ static int gaudi2_special_blocks_config(struct hl_device *hdev)
if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) { if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) {
prop->skip_special_blocks_cfg.block_ranges = prop->skip_special_blocks_cfg.block_ranges =
kmalloc_objs(gaudi2_iterator_skip_block_ranges[0], kmalloc_objs(gaudi2_iterator_skip_block_ranges[0],
ARRAY_SIZE(gaudi2_iterator_skip_block_ranges), ARRAY_SIZE(gaudi2_iterator_skip_block_ranges));
GFP_KERNEL);
if (!prop->skip_special_blocks_cfg.block_ranges) { if (!prop->skip_special_blocks_cfg.block_ranges) {
rc = -ENOMEM; rc = -ENOMEM;
goto free_skip_special_blocks_types; goto free_skip_special_blocks_types;

View file

@ -364,7 +364,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->max_queues = GOYA_QUEUE_ID_SIZE; prop->max_queues = GOYA_QUEUE_ID_SIZE;
prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties, prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties,
prop->max_queues, GFP_KERNEL); prop->max_queues);
if (!prop->hw_queues_props) if (!prop->hw_queues_props)
return -ENOMEM; return -ENOMEM;

View file

@ -497,8 +497,7 @@ int rocket_job_open(struct rocket_file_priv *rocket_priv)
{ {
struct rocket_device *rdev = rocket_priv->rdev; struct rocket_device *rdev = rocket_priv->rdev;
struct drm_gpu_scheduler **scheds = kmalloc_objs(*scheds, struct drm_gpu_scheduler **scheds = kmalloc_objs(*scheds,
rdev->num_cores, rdev->num_cores);
GFP_KERNEL);
unsigned int core; unsigned int core;
int ret; int ret;

View file

@ -837,8 +837,7 @@ int acpi_video_get_levels(struct acpi_device *device,
* special levels (see below) * special levels (see below)
*/ */
br->levels = kmalloc_objs(*br->levels, br->levels = kmalloc_objs(*br->levels,
obj->package.count + ACPI_VIDEO_FIRST_LEVEL, obj->package.count + ACPI_VIDEO_FIRST_LEVEL);
GFP_KERNEL);
if (!br->levels) { if (!br->levels) {
result = -ENOMEM; result = -ENOMEM;
goto out_free; goto out_free;
@ -1331,7 +1330,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
dod->package.count); dod->package.count);
active_list = kzalloc_objs(struct acpi_video_enumerated_device, active_list = kzalloc_objs(struct acpi_video_enumerated_device,
1 + dod->package.count, GFP_KERNEL); 1 + dod->package.count);
if (!active_list) { if (!active_list) {
status = -ENOMEM; status = -ENOMEM;
goto out; goto out;

View file

@ -343,7 +343,7 @@ static u32 riscv_acpi_add_prt_dep(acpi_handle handle)
acpi_get_handle(handle, entry->source, &link_handle); acpi_get_handle(handle, entry->source, &link_handle);
dep_devices.count = 1; dep_devices.count = 1;
dep_devices.handles = kzalloc_objs(*dep_devices.handles, dep_devices.handles = kzalloc_objs(*dep_devices.handles,
1, GFP_KERNEL); 1);
if (!dep_devices.handles) { if (!dep_devices.handles) {
acpi_handle_err(handle, "failed to allocate memory\n"); acpi_handle_err(handle, "failed to allocate memory\n");
continue; continue;
@ -355,7 +355,7 @@ static u32 riscv_acpi_add_prt_dep(acpi_handle handle)
gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index);
dep_devices.count = 1; dep_devices.count = 1;
dep_devices.handles = kzalloc_objs(*dep_devices.handles, dep_devices.handles = kzalloc_objs(*dep_devices.handles,
1, GFP_KERNEL); 1);
if (!dep_devices.handles) { if (!dep_devices.handles) {
acpi_handle_err(handle, "failed to allocate memory\n"); acpi_handle_err(handle, "failed to allocate memory\n");
continue; continue;

View file

@ -130,8 +130,7 @@ static void lpi_device_get_constraints_amd(void)
} }
lpi_constraints_table = kzalloc_objs(*lpi_constraints_table, lpi_constraints_table = kzalloc_objs(*lpi_constraints_table,
package->package.count, package->package.count);
GFP_KERNEL);
if (!lpi_constraints_table) if (!lpi_constraints_table)
goto free_acpi_buffer; goto free_acpi_buffer;
@ -210,7 +209,7 @@ static void lpi_device_get_constraints(void)
return; return;
lpi_constraints_table = kzalloc_objs(*lpi_constraints_table, lpi_constraints_table = kzalloc_objs(*lpi_constraints_table,
out_obj->package.count, GFP_KERNEL); out_obj->package.count);
if (!lpi_constraints_table) if (!lpi_constraints_table)
goto free_acpi_buffer; goto free_acpi_buffer;

View file

@ -5903,7 +5903,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
} }
target_procs = kzalloc_objs(struct binder_proc *, target_procs = kzalloc_objs(struct binder_proc *,
target_procs_count, GFP_KERNEL); target_procs_count);
if (!target_procs) { if (!target_procs) {
mutex_unlock(&binder_procs_lock); mutex_unlock(&binder_procs_lock);

View file

@ -917,7 +917,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->vm_start = vma->vm_start; alloc->vm_start = vma->vm_start;
alloc->pages = kvzalloc_objs(alloc->pages[0], alloc->pages = kvzalloc_objs(alloc->pages[0],
alloc->buffer_size / PAGE_SIZE, GFP_KERNEL); alloc->buffer_size / PAGE_SIZE);
if (!alloc->pages) { if (!alloc->pages) {
ret = -ENOMEM; ret = -ENOMEM;
failure_string = "alloc page array"; failure_string = "alloc page array";

View file

@ -1846,8 +1846,7 @@ static int eni_start(struct atm_dev *dev)
buffer_mem = eni_dev->mem - (buf - eni_dev->ram); buffer_mem = eni_dev->mem - (buf - eni_dev->ram);
eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2;
eni_dev->free_list = kmalloc_objs(*eni_dev->free_list, eni_dev->free_list = kmalloc_objs(*eni_dev->free_list,
eni_dev->free_list_size + 1, eni_dev->free_list_size + 1);
GFP_KERNEL);
if (!eni_dev->free_list) { if (!eni_dev->free_list) {
printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n",
dev->number); dev->number);

View file

@ -884,7 +884,7 @@ static int cpu_cache_sysfs_init(unsigned int cpu)
/* Allocate all required memory */ /* Allocate all required memory */
per_cpu_index_dev(cpu) = kzalloc_objs(struct device *, per_cpu_index_dev(cpu) = kzalloc_objs(struct device *,
cache_leaves(cpu), GFP_KERNEL); cache_leaves(cpu));
if (unlikely(per_cpu_index_dev(cpu) == NULL)) if (unlikely(per_cpu_index_dev(cpu) == NULL))
goto err_out; goto err_out;

View file

@ -1418,7 +1418,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
* nr_poll_queues: the number of polling queues * nr_poll_queues: the number of polling queues
*/ */
dev->hw_queues = kzalloc_objs(*dev->hw_queues, dev->hw_queues = kzalloc_objs(*dev->hw_queues,
nr_cpu_ids + nr_poll_queues, GFP_KERNEL); nr_cpu_ids + nr_poll_queues);
if (!dev->hw_queues) { if (!dev->hw_queues) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_alloc; goto out_alloc;

View file

@ -2209,12 +2209,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used = rinfo->shadow[i].grants_used =
kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants); kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants);
rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0], rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0],
psegs, GFP_KERNEL); psegs);
if (info->max_indirect_segments) if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants = rinfo->shadow[i].indirect_grants =
kvzalloc_objs(rinfo->shadow[i].indirect_grants[0], kvzalloc_objs(rinfo->shadow[i].indirect_grants[0],
INDIRECT_GREFS(grants), INDIRECT_GREFS(grants));
GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) || if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) || (rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments && (info->max_indirect_segments &&

View file

@ -1137,7 +1137,7 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
mhi_ep_mmio_init(mhi_cntrl); mhi_ep_mmio_init(mhi_cntrl);
mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event,
mhi_cntrl->event_rings, GFP_KERNEL); mhi_cntrl->event_rings);
if (!mhi_cntrl->mhi_event) if (!mhi_cntrl->mhi_event)
return -ENOMEM; return -ENOMEM;
@ -1400,7 +1400,7 @@ static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
* only the defined channels * only the defined channels
*/ */
mhi_cntrl->mhi_chan = kzalloc_objs(*mhi_cntrl->mhi_chan, mhi_cntrl->mhi_chan = kzalloc_objs(*mhi_cntrl->mhi_chan,
mhi_cntrl->max_chan, GFP_KERNEL); mhi_cntrl->max_chan);
if (!mhi_cntrl->mhi_chan) if (!mhi_cntrl->mhi_chan)
return -ENOMEM; return -ENOMEM;

View file

@ -206,7 +206,7 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
/* Allocate ring cache memory for holding the copy of host ring */ /* Allocate ring cache memory for holding the copy of host ring */
ring->ring_cache = kzalloc_objs(struct mhi_ring_element, ring->ring_cache = kzalloc_objs(struct mhi_ring_element,
ring->ring_size, GFP_KERNEL); ring->ring_size);
if (!ring->ring_cache) if (!ring->ring_cache)
return -ENOMEM; return -ENOMEM;

View file

@ -839,7 +839,7 @@ static void __init aspeed_g6_cc_init(struct device_node *np)
soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16; soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16;
aspeed_g6_clk_data = kzalloc_flex(*aspeed_g6_clk_data, hws, aspeed_g6_clk_data = kzalloc_flex(*aspeed_g6_clk_data, hws,
ASPEED_G6_NUM_CLKS, GFP_KERNEL); ASPEED_G6_NUM_CLKS);
if (!aspeed_g6_clk_data) if (!aspeed_g6_clk_data)
return; return;
aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS; aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS;

View file

@ -54,7 +54,7 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
BUG_ON(!base); BUG_ON(!base);
clps711x_clk = kzalloc_flex(*clps711x_clk, clk_data.hws, clps711x_clk = kzalloc_flex(*clps711x_clk, clk_data.hws,
CLPS711X_CLK_MAX, GFP_KERNEL); CLPS711X_CLK_MAX);
BUG_ON(!clps711x_clk); BUG_ON(!clps711x_clk);
spin_lock_init(&clps711x_clk->lock); spin_lock_init(&clps711x_clk->lock);

View file

@ -422,7 +422,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
goto npcm7xx_init_error; goto npcm7xx_init_error;
npcm7xx_clk_data = kzalloc_flex(*npcm7xx_clk_data, hws, npcm7xx_clk_data = kzalloc_flex(*npcm7xx_clk_data, hws,
NPCM7XX_NUM_CLOCKS, GFP_KERNEL); NPCM7XX_NUM_CLOCKS);
if (!npcm7xx_clk_data) if (!npcm7xx_clk_data)
goto npcm7xx_init_np_err; goto npcm7xx_init_np_err;

View file

@ -459,7 +459,7 @@ static int __init ingenic_ost_probe(struct device_node *np)
ost->soc_info = id->data; ost->soc_info = id->data;
ost->clocks = kzalloc_flex(*ost->clocks, hws, ost->clocks = kzalloc_flex(*ost->clocks, hws,
ost->soc_info->num_channels, GFP_KERNEL); ost->soc_info->num_channels);
if (!ost->clocks) { if (!ost->clocks) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_clk_disable; goto err_clk_disable;

View file

@ -1795,7 +1795,7 @@ ni_gpct_device_construct(struct comedi_device *dev,
counter_dev->counters = kzalloc_objs(*counter, num_counters); counter_dev->counters = kzalloc_objs(*counter, num_counters);
counter_dev->regs = kzalloc_objs(*counter_dev->regs, counter_dev->regs = kzalloc_objs(*counter_dev->regs,
counter_dev->num_chips, GFP_KERNEL); counter_dev->num_chips);
if (!counter_dev->regs || !counter_dev->counters) { if (!counter_dev->regs || !counter_dev->counters) {
kfree(counter_dev->regs); kfree(counter_dev->regs);
kfree(counter_dev->counters); kfree(counter_dev->counters);

View file

@ -76,7 +76,7 @@ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
goto err_free_tl; goto err_free_tl;
telemetry->regs_hist_buff = kmalloc_objs(*telemetry->regs_hist_buff, telemetry->regs_hist_buff = kmalloc_objs(*telemetry->regs_hist_buff,
tl_data->num_hbuff, GFP_KERNEL); tl_data->num_hbuff);
if (!telemetry->regs_hist_buff) if (!telemetry->regs_hist_buff)
goto err_free_rp_indexes; goto err_free_rp_indexes;

View file

@ -1200,8 +1200,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
if (suof_handle->img_table.num_simgs != 0) { if (suof_handle->img_table.num_simgs != 0) {
suof_img_hdr = kzalloc_objs(img_header, suof_img_hdr = kzalloc_objs(img_header,
suof_handle->img_table.num_simgs, suof_handle->img_table.num_simgs);
GFP_KERNEL);
if (!suof_img_hdr) if (!suof_img_hdr)
return -ENOMEM; return -ENOMEM;
suof_handle->img_table.simg_hdr = suof_img_hdr; suof_handle->img_table.simg_hdr = suof_img_hdr;
@ -1892,8 +1891,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
sobj_chunk_num = sobj_hdr->num_chunks; sobj_chunk_num = sobj_hdr->num_chunks;
mobj_hdr = kzalloc_objs(*mobj_hdr, mobj_hdr = kzalloc_objs(*mobj_hdr,
size_add(uobj_chunk_num, sobj_chunk_num), size_add(uobj_chunk_num, sobj_chunk_num));
GFP_KERNEL);
if (!mobj_hdr) if (!mobj_hdr)
return -ENOMEM; return -ENOMEM;

View file

@ -78,7 +78,7 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance
dev_ctl->instances = dev_inst; dev_ctl->instances = dev_inst;
dev_blk = kzalloc_objs(struct edac_device_block, dev_blk = kzalloc_objs(struct edac_device_block,
nr_instances * nr_blocks, GFP_KERNEL); nr_instances * nr_blocks);
if (!dev_blk) if (!dev_blk)
goto free; goto free;

View file

@ -776,7 +776,7 @@ static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
return NULL; return NULL;
sbridge_dev->pdev = kzalloc_objs(*sbridge_dev->pdev, sbridge_dev->pdev = kzalloc_objs(*sbridge_dev->pdev,
table->n_devs_per_imc, GFP_KERNEL); table->n_devs_per_imc);
if (!sbridge_dev->pdev) { if (!sbridge_dev->pdev) {
kfree(sbridge_dev); kfree(sbridge_dev);
return NULL; return NULL;

View file

@ -1208,8 +1208,7 @@ static int extcon_alloc_groups(struct extcon_dev *edev)
return 0; return 0;
edev->extcon_dev_type.groups = kzalloc_objs(*edev->extcon_dev_type.groups, edev->extcon_dev_type.groups = kzalloc_objs(*edev->extcon_dev_type.groups,
edev->max_supported + 2, edev->max_supported + 2);
GFP_KERNEL);
if (!edev->extcon_dev_type.groups) if (!edev->extcon_dev_type.groups)
return -ENOMEM; return -ENOMEM;

View file

@ -31,8 +31,7 @@
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{ {
struct page **page_array __free(kfree) = kzalloc_objs(page_array[0], struct page **page_array __free(kfree) = kzalloc_objs(page_array[0],
page_count, page_count);
GFP_KERNEL);
if (!page_array) if (!page_array)
return -ENOMEM; return -ENOMEM;
@ -58,8 +57,7 @@ int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
dma_addr_t *dma_addrs __free(kfree) = kzalloc_objs(dma_addrs[0], dma_addr_t *dma_addrs __free(kfree) = kzalloc_objs(dma_addrs[0],
buffer->page_count, buffer->page_count);
GFP_KERNEL);
int i; int i;
if (!dma_addrs) if (!dma_addrs)

View file

@ -1179,8 +1179,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
*/ */
ip_hw_instance = kzalloc_flex(*ip_hw_instance, ip_hw_instance = kzalloc_flex(*ip_hw_instance,
base_addr, base_addr,
ip->num_base_address, ip->num_base_address);
GFP_KERNEL);
if (!ip_hw_instance) { if (!ip_hw_instance) {
DRM_ERROR("no memory for ip_hw_instance"); DRM_ERROR("no memory for ip_hw_instance");
return -ENOMEM; return -ENOMEM;

View file

@ -1728,8 +1728,7 @@ int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
bool valid; bool valid;
adev->gmc.mem_partitions = kzalloc_objs(struct amdgpu_mem_partition_info, adev->gmc.mem_partitions = kzalloc_objs(struct amdgpu_mem_partition_info,
AMDGPU_MAX_MEM_RANGES, AMDGPU_MAX_MEM_RANGES);
GFP_KERNEL);
if (!adev->gmc.mem_partitions) if (!adev->gmc.mem_partitions)
return -ENOMEM; return -ENOMEM;

View file

@ -437,7 +437,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
if (!adev->irq.client[client_id].sources) { if (!adev->irq.client[client_id].sources) {
adev->irq.client[client_id].sources = adev->irq.client[client_id].sources =
kzalloc_objs(struct amdgpu_irq_src *, kzalloc_objs(struct amdgpu_irq_src *,
AMDGPU_MAX_IRQ_SRC_ID, GFP_KERNEL); AMDGPU_MAX_IRQ_SRC_ID);
if (!adev->irq.client[client_id].sources) if (!adev->irq.client[client_id].sources)
return -ENOMEM; return -ENOMEM;
} }

View file

@ -452,8 +452,7 @@ static int amdgpu_pmu_alloc_pmu_attrs(
return -ENOMEM; return -ENOMEM;
fmt_attr_group->attrs = kzalloc_objs(*fmt_attr_group->attrs, fmt_attr_group->attrs = kzalloc_objs(*fmt_attr_group->attrs,
config->num_formats + 1, config->num_formats + 1);
GFP_KERNEL);
if (!fmt_attr_group->attrs) if (!fmt_attr_group->attrs)
goto err_fmt_attr_grp; goto err_fmt_attr_grp;
@ -464,7 +463,7 @@ static int amdgpu_pmu_alloc_pmu_attrs(
goto err_evt_attr; goto err_evt_attr;
evt_attr_group->attrs = kzalloc_objs(*evt_attr_group->attrs, evt_attr_group->attrs = kzalloc_objs(*evt_attr_group->attrs,
config->num_events + 1, GFP_KERNEL); config->num_events + 1);
if (!evt_attr_group->attrs) if (!evt_attr_group->attrs)
goto err_evt_attr_grp; goto err_evt_attr_grp;

View file

@ -3239,7 +3239,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
if (from_rom) { if (from_rom) {
err_data.err_addr = err_data.err_addr =
kzalloc_objs(struct eeprom_table_record, kzalloc_objs(struct eeprom_table_record,
adev->umc.retire_unit, GFP_KERNEL); adev->umc.retire_unit);
if (!err_data.err_addr) { if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n"); dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
return -ENOMEM; return -ENOMEM;

View file

@ -1881,8 +1881,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
return 0; return 0;
adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools, adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
adev->gmc.num_mem_partitions, adev->gmc.num_mem_partitions);
GFP_KERNEL);
if (!adev->mman.ttm_pools) if (!adev->mman.ttm_pools)
return -ENOMEM; return -ENOMEM;

View file

@ -59,7 +59,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
err_data.err_addr = err_data.err_addr =
kzalloc_objs(struct eeprom_table_record, kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL); adev->umc.max_ras_err_cnt_per_query);
if (!err_data.err_addr) { if (!err_data.err_addr) {
dev_warn(adev->dev, dev_warn(adev->dev,
"Failed to alloc memory for umc error record in MCA notifier!\n"); "Failed to alloc memory for umc error record in MCA notifier!\n");
@ -106,7 +106,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
err_data->err_addr = err_data->err_addr =
kzalloc_objs(struct eeprom_table_record, kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL); adev->umc.max_ras_err_cnt_per_query);
/* still call query_ras_error_address to clear error status /* still call query_ras_error_address to clear error status
* even NOMEM error is encountered * even NOMEM error is encountered
@ -132,8 +132,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
adev->umc.max_ras_err_cnt_per_query) { adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr = err_data->err_addr =
kzalloc_objs(struct eeprom_table_record, kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, adev->umc.max_ras_err_cnt_per_query);
GFP_KERNEL);
/* still call query_ras_error_address to clear error status /* still call query_ras_error_address to clear error status
* even NOMEM error is encountered * even NOMEM error is encountered
@ -163,8 +162,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
adev->umc.max_ras_err_cnt_per_query) { adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr = err_data->err_addr =
kzalloc_objs(struct eeprom_table_record, kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, adev->umc.max_ras_err_cnt_per_query);
GFP_KERNEL);
/* still call query_ras_error_address to clear error status /* still call query_ras_error_address to clear error status
* even NOMEM error is encountered * even NOMEM error is encountered
@ -554,7 +552,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
struct ras_err_data err_data; struct ras_err_data err_data;
err_data.err_addr = kzalloc_objs(struct eeprom_table_record, err_data.err_addr = kzalloc_objs(struct eeprom_table_record,
adev->umc.retire_unit, GFP_KERNEL); adev->umc.retire_unit);
if (!err_data.err_addr) { if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n"); dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
return 0; return 0;

View file

@ -500,8 +500,7 @@ static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev; struct amdgpu_device *adev = ip_block->adev;
adev->amdgpu_vkms_output = kzalloc_objs(struct amdgpu_vkms_output, adev->amdgpu_vkms_output = kzalloc_objs(struct amdgpu_vkms_output,
adev->mode_info.num_crtc, adev->mode_info.num_crtc);
GFP_KERNEL);
if (!adev->amdgpu_vkms_output) if (!adev->amdgpu_vkms_output)
return -ENOMEM; return -ENOMEM;

View file

@ -781,7 +781,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* the amount of memory allocated by user * the amount of memory allocated by user
*/ */
pa = kzalloc_objs(struct kfd_process_device_apertures, pa = kzalloc_objs(struct kfd_process_device_apertures,
args->num_of_nodes, GFP_KERNEL); args->num_of_nodes);
if (!pa) if (!pa)
return -ENOMEM; return -ENOMEM;

View file

@ -959,7 +959,7 @@ void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
int i; int i;
crtc_ctx = kzalloc_objs(struct secure_display_crtc_context, crtc_ctx = kzalloc_objs(struct secure_display_crtc_context,
adev->mode_info.num_crtc, GFP_KERNEL); adev->mode_info.num_crtc);
if (!crtc_ctx) { if (!crtc_ctx) {
adev->dm.secure_display_ctx.crtc_ctx = NULL; adev->dm.secure_display_ctx.crtc_ctx = NULL;

View file

@ -2131,7 +2131,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes; display_e2e_pipe_params_st *pipes;
pipes = kzalloc_objs(display_e2e_pipe_params_st, pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, GFP_KERNEL); dc->res_pool->pipe_count);
if (!pipes) if (!pipes)
return DC_FAIL_BANDWIDTH_VALIDATE; return DC_FAIL_BANDWIDTH_VALIDATE;

View file

@ -931,7 +931,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c
display_e2e_pipe_params_st *pipes; display_e2e_pipe_params_st *pipes;
pipes = kzalloc_objs(display_e2e_pipe_params_st, pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, GFP_KERNEL); dc->res_pool->pipe_count);
if (!pipes) if (!pipes)
return DC_FAIL_BANDWIDTH_VALIDATE; return DC_FAIL_BANDWIDTH_VALIDATE;

View file

@ -2045,8 +2045,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc,
int vlevel = 0; int vlevel = 0;
int pipe_cnt = 0; int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, dc->res_pool->pipe_count);
GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT(); BW_VAL_TRACE_COUNT();

View file

@ -1768,8 +1768,7 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc,
int vlevel = 0; int vlevel = 0;
int pipe_cnt = 0; int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, dc->res_pool->pipe_count);
GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT(); BW_VAL_TRACE_COUNT();

View file

@ -1706,8 +1706,7 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc,
int vlevel = 0; int vlevel = 0;
int pipe_cnt = 0; int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, dc->res_pool->pipe_count);
GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT(); BW_VAL_TRACE_COUNT();

View file

@ -1751,8 +1751,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_valid
int vlevel = 0; int vlevel = 0;
int pipe_cnt = 0; int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st, display_e2e_pipe_params_st *pipes = kzalloc_objs(display_e2e_pipe_params_st,
dc->res_pool->pipe_count, dc->res_pool->pipe_count);
GFP_KERNEL);
/* To handle Freesync properly, setting FreeSync DML parameters /* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation * to its default state for the first stage of validation

View file

@ -1715,14 +1715,12 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) { if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) {
rgb_user = kvzalloc_objs(*rgb_user, rgb_user = kvzalloc_objs(*rgb_user,
ramp->num_entries + _EXTRA_POINTS, ramp->num_entries + _EXTRA_POINTS);
GFP_KERNEL);
if (!rgb_user) if (!rgb_user)
goto rgb_user_alloc_fail; goto rgb_user_alloc_fail;
axis_x = kvzalloc_objs(*axis_x, axis_x = kvzalloc_objs(*axis_x,
ramp->num_entries + _EXTRA_POINTS, ramp->num_entries + _EXTRA_POINTS);
GFP_KERNEL);
if (!axis_x) if (!axis_x)
goto axis_x_alloc_fail; goto axis_x_alloc_fail;
@ -1940,8 +1938,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
if (ramp && ramp->type != GAMMA_CS_TFM_1D && if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
(map_user_ramp || ramp->type != GAMMA_RGB_256)) { (map_user_ramp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvzalloc_objs(*rgb_user, rgb_user = kvzalloc_objs(*rgb_user,
ramp->num_entries + _EXTRA_POINTS, ramp->num_entries + _EXTRA_POINTS);
GFP_KERNEL);
if (!rgb_user) if (!rgb_user)
goto rgb_user_alloc_fail; goto rgb_user_alloc_fail;

View file

@ -2725,7 +2725,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps, adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!adev->pm.dpm.ps) if (!adev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -303,7 +303,7 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kzalloc_objs(struct amdgpu_phase_shedding_limits_entry, kzalloc_objs(struct amdgpu_phase_shedding_limits_entry,
psl->ucNumEntries, GFP_KERNEL); psl->ucNumEntries);
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM; return -ENOMEM;

View file

@ -7342,7 +7342,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps, adev->pm.dpm.ps = kzalloc_objs(struct amdgpu_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!adev->pm.dpm.ps) if (!adev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -371,7 +371,7 @@ static int get_mclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
mclk_table = kzalloc_flex(*mclk_table, entries, mclk_table = kzalloc_flex(*mclk_table, entries,
mclk_dep_table->ucNumEntries, GFP_KERNEL); mclk_dep_table->ucNumEntries);
if (!mclk_table) if (!mclk_table)
return -ENOMEM; return -ENOMEM;
@ -415,7 +415,7 @@ static int get_sclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
sclk_table = kzalloc_flex(*sclk_table, entries, sclk_table = kzalloc_flex(*sclk_table, entries,
tonga_table->ucNumEntries, GFP_KERNEL); tonga_table->ucNumEntries);
if (!sclk_table) if (!sclk_table)
return -ENOMEM; return -ENOMEM;
@ -444,8 +444,7 @@ static int get_sclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
sclk_table = kzalloc_flex(*sclk_table, entries, sclk_table = kzalloc_flex(*sclk_table, entries,
polaris_table->ucNumEntries, polaris_table->ucNumEntries);
GFP_KERNEL);
if (!sclk_table) if (!sclk_table)
return -ENOMEM; return -ENOMEM;
@ -492,8 +491,7 @@ static int get_pcie_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
pcie_table = kzalloc_flex(*pcie_table, entries, pcie_table = kzalloc_flex(*pcie_table, entries,
atom_pcie_table->ucNumEntries, atom_pcie_table->ucNumEntries);
GFP_KERNEL);
if (!pcie_table) if (!pcie_table)
return -ENOMEM; return -ENOMEM;
@ -529,8 +527,7 @@ static int get_pcie_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
pcie_table = kzalloc_flex(*pcie_table, entries, pcie_table = kzalloc_flex(*pcie_table, entries,
atom_pcie_table->ucNumEntries, atom_pcie_table->ucNumEntries);
GFP_KERNEL);
if (!pcie_table) if (!pcie_table)
return -ENOMEM; return -ENOMEM;
@ -725,7 +722,7 @@ static int get_mm_clock_voltage_table(
PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries), PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
mm_table = kzalloc_flex(*mm_table, entries, mm_table = kzalloc_flex(*mm_table, entries,
mm_dependency_table->ucNumEntries, GFP_KERNEL); mm_dependency_table->ucNumEntries);
if (!mm_table) if (!mm_table)
return -ENOMEM; return -ENOMEM;

View file

@ -1483,7 +1483,7 @@ static int get_cac_leakage_table(struct pp_hwmgr *hwmgr,
return -EINVAL; return -EINVAL;
cac_leakage_table = kzalloc_flex(*cac_leakage_table, entries, cac_leakage_table = kzalloc_flex(*cac_leakage_table, entries,
table->ucNumEntries, GFP_KERNEL); table->ucNumEntries);
if (!cac_leakage_table) if (!cac_leakage_table)
return -ENOMEM; return -ENOMEM;
@ -1621,7 +1621,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
table = kzalloc_flex(*table, entries, table = kzalloc_flex(*table, entries,
ptable->ucNumEntries, GFP_KERNEL); ptable->ucNumEntries);
if (!table) if (!table)
return -ENOMEM; return -ENOMEM;

View file

@ -351,7 +351,7 @@ static int get_mm_clock_voltage_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
mm_table = kzalloc_flex(*mm_table, entries, mm_table = kzalloc_flex(*mm_table, entries,
mm_dependency_table->ucNumEntries, GFP_KERNEL); mm_dependency_table->ucNumEntries);
if (!mm_table) if (!mm_table)
return -ENOMEM; return -ENOMEM;
@ -574,7 +574,7 @@ static int get_socclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
clk_table = kzalloc_flex(*clk_table, entries, clk_table = kzalloc_flex(*clk_table, entries,
clk_dep_table->ucNumEntries, GFP_KERNEL); clk_dep_table->ucNumEntries);
if (!clk_table) if (!clk_table)
return -ENOMEM; return -ENOMEM;
@ -604,7 +604,7 @@ static int get_mclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
mclk_table = kzalloc_flex(*mclk_table, entries, mclk_table = kzalloc_flex(*mclk_table, entries,
mclk_dep_table->ucNumEntries, GFP_KERNEL); mclk_dep_table->ucNumEntries);
if (!mclk_table) if (!mclk_table)
return -ENOMEM; return -ENOMEM;
@ -641,7 +641,7 @@ static int get_gfxclk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
clk_table = kzalloc_flex(*clk_table, entries, clk_table = kzalloc_flex(*clk_table, entries,
clk_dep_table->ucNumEntries, GFP_KERNEL); clk_dep_table->ucNumEntries);
if (!clk_table) if (!clk_table)
return -ENOMEM; return -ENOMEM;
@ -703,7 +703,7 @@ static int get_pix_clk_voltage_dependency_table(
"Invalid PowerPlay Table!", return -1); "Invalid PowerPlay Table!", return -1);
clk_table = kzalloc_flex(*clk_table, entries, clk_table = kzalloc_flex(*clk_table, entries,
clk_dep_table->ucNumEntries, GFP_KERNEL); clk_dep_table->ucNumEntries);
if (!clk_table) if (!clk_table)
return -ENOMEM; return -ENOMEM;
@ -794,7 +794,7 @@ static int get_pcie_table(struct pp_hwmgr *hwmgr,
return 0); return 0);
pcie_table = kzalloc_flex(*pcie_table, entries, pcie_table = kzalloc_flex(*pcie_table, entries,
atom_pcie_table->ucNumEntries, GFP_KERNEL); atom_pcie_table->ucNumEntries);
if (!pcie_table) if (!pcie_table)
return -ENOMEM; return -ENOMEM;

View file

@ -2400,8 +2400,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
return -EINVAL; return -EINVAL;
activity_monitor_external = kzalloc_objs(*activity_monitor_external, activity_monitor_external = kzalloc_objs(*activity_monitor_external,
PP_SMC_POWER_PROFILE_COUNT, PP_SMC_POWER_PROFILE_COUNT);
GFP_KERNEL);
if (!activity_monitor_external) if (!activity_monitor_external)
return -ENOMEM; return -ENOMEM;

View file

@ -4605,8 +4605,7 @@ int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
num_commit_deps = hweight32(mst_state->pending_crtc_mask); num_commit_deps = hweight32(mst_state->pending_crtc_mask);
mst_state->commit_deps = kmalloc_objs(*mst_state->commit_deps, mst_state->commit_deps = kmalloc_objs(*mst_state->commit_deps,
num_commit_deps, num_commit_deps);
GFP_KERNEL);
if (!mst_state->commit_deps) if (!mst_state->commit_deps)
return -ENOMEM; return -ENOMEM;
mst_state->num_commit_deps = num_commit_deps; mst_state->num_commit_deps = num_commit_deps;

View file

@ -136,12 +136,11 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
if (!state->crtcs) if (!state->crtcs)
goto fail; goto fail;
state->planes = kzalloc_objs(*state->planes, state->planes = kzalloc_objs(*state->planes,
dev->mode_config.num_total_plane, dev->mode_config.num_total_plane);
GFP_KERNEL);
if (!state->planes) if (!state->planes)
goto fail; goto fail;
state->colorops = kzalloc_objs(*state->colorops, state->colorops = kzalloc_objs(*state->colorops,
dev->mode_config.num_colorop, GFP_KERNEL); dev->mode_config.num_colorop);
if (!state->colorops) if (!state->colorops)
goto fail; goto fail;

View file

@ -326,7 +326,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
for_each_free_tree(i) { for_each_free_tree(i) {
mm->free_trees[i] = kmalloc_objs(struct rb_root, mm->free_trees[i] = kmalloc_objs(struct rb_root,
mm->max_order + 1, GFP_KERNEL); mm->max_order + 1);
if (!mm->free_trees[i]) if (!mm->free_trees[i])
goto out_free_tree; goto out_free_tree;

View file

@ -59,8 +59,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
for (modeset = client->modesets; modeset->crtc; modeset++) { for (modeset = client->modesets; modeset->crtc; modeset++) {
modeset->connectors = kzalloc_objs(*modeset->connectors, modeset->connectors = kzalloc_objs(*modeset->connectors,
max_connector_count, max_connector_count);
GFP_KERNEL);
if (!modeset->connectors) if (!modeset->connectors)
goto err_free; goto err_free;
} }

View file

@ -846,8 +846,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
} }
connector_set = kmalloc_objs(struct drm_connector *, connector_set = kmalloc_objs(struct drm_connector *,
crtc_req->count_connectors, crtc_req->count_connectors);
GFP_KERNEL);
if (!connector_set) { if (!connector_set) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;

View file

@ -603,14 +603,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
* connector data. * connector data.
*/ */
save_encoder_crtcs = kzalloc_objs(struct drm_crtc *, save_encoder_crtcs = kzalloc_objs(struct drm_crtc *,
dev->mode_config.num_encoder, dev->mode_config.num_encoder);
GFP_KERNEL);
if (!save_encoder_crtcs) if (!save_encoder_crtcs)
return -ENOMEM; return -ENOMEM;
save_connector_encoders = kzalloc_objs(struct drm_encoder *, save_connector_encoders = kzalloc_objs(struct drm_encoder *,
dev->mode_config.num_connector, dev->mode_config.num_connector);
GFP_KERNEL);
if (!save_connector_encoders) { if (!save_connector_encoders) {
kfree(save_encoder_crtcs); kfree(save_encoder_crtcs);
return -ENOMEM; return -ENOMEM;

View file

@ -1824,7 +1824,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
power_domains->power_well_count = power_well_count; power_domains->power_well_count = power_well_count;
power_domains->power_wells = power_domains->power_wells =
kzalloc_objs(*power_domains->power_wells, kzalloc_objs(*power_domains->power_wells,
power_well_count, GFP_KERNEL); power_well_count);
if (!power_domains->power_wells) if (!power_domains->power_wells)
return -ENOMEM; return -ENOMEM;

View file

@ -2328,8 +2328,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
if (!data->streams) if (!data->streams)
data->streams = kzalloc_objs(struct hdcp2_streamid_type, data->streams = kzalloc_objs(struct hdcp2_streamid_type,
INTEL_NUM_PIPES(display), INTEL_NUM_PIPES(display));
GFP_KERNEL);
if (!data->streams) { if (!data->streams) {
drm_err(display->drm, "Out of Memory\n"); drm_err(display->drm, "Out of Memory\n");
return -ENOMEM; return -ENOMEM;

View file

@ -990,8 +990,7 @@ guc_capture_alloc_one_node(struct intel_guc *guc)
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
new->reginfo[i].regs = kzalloc_objs(struct guc_mmio_reg, new->reginfo[i].regs = kzalloc_objs(struct guc_mmio_reg,
guc->capture->max_mmio_per_node, guc->capture->max_mmio_per_node);
GFP_KERNEL);
if (!new->reginfo[i].regs) { if (!new->reginfo[i].regs) {
while (i) while (i)
kfree(new->reginfo[--i].regs); kfree(new->reginfo[--i].regs);

View file

@ -70,16 +70,14 @@ nouveau_job_init(struct nouveau_job *job,
} }
job->out_sync.objs = kzalloc_objs(*job->out_sync.objs, job->out_sync.objs = kzalloc_objs(*job->out_sync.objs,
job->out_sync.count, job->out_sync.count);
GFP_KERNEL);
if (!job->out_sync.objs) { if (!job->out_sync.objs) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_out_sync; goto err_free_out_sync;
} }
job->out_sync.chains = kzalloc_objs(*job->out_sync.chains, job->out_sync.chains = kzalloc_objs(*job->out_sync.chains,
job->out_sync.count, job->out_sync.count);
GFP_KERNEL);
if (!job->out_sync.chains) { if (!job->out_sync.chains) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_objs; goto err_free_objs;

View file

@ -886,7 +886,7 @@ static int omap_dmm_probe(struct platform_device *dev)
/* alloc engines */ /* alloc engines */
omap_dmm->engines = kzalloc_objs(*omap_dmm->engines, omap_dmm->engines = kzalloc_objs(*omap_dmm->engines,
omap_dmm->num_engines, GFP_KERNEL); omap_dmm->num_engines);
if (!omap_dmm->engines) { if (!omap_dmm->engines) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;

View file

@ -1258,7 +1258,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21); ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages, op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages,
pt_count, GFP_KERNEL); pt_count);
if (!op_ctx->rsvd_page_tables.pages) { if (!op_ctx->rsvd_page_tables.pages) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_cleanup; goto err_cleanup;
@ -1312,8 +1312,7 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (pt_count) { if (pt_count) {
op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages, op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages,
pt_count, pt_count);
GFP_KERNEL);
if (!op_ctx->rsvd_page_tables.pages) { if (!op_ctx->rsvd_page_tables.pages) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_cleanup; goto err_cleanup;

View file

@ -59,8 +59,7 @@ static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
} }
if (!qdev->client_monitors_config) { if (!qdev->client_monitors_config) {
qdev->client_monitors_config = kzalloc_flex(*qdev->client_monitors_config, qdev->client_monitors_config = kzalloc_flex(*qdev->client_monitors_config,
heads, count, heads, count);
GFP_KERNEL);
if (!qdev->client_monitors_config) if (!qdev->client_monitors_config)
return -ENOMEM; return -ENOMEM;
} }

View file

@ -5518,7 +5518,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -2458,7 +2458,7 @@ static int kv_parse_power_table(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -4001,8 +4001,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
power_info->pplib.ucNumStates, power_info->pplib.ucNumStates);
GFP_KERNEL);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;

View file

@ -822,8 +822,7 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
radeon_table->entries = kzalloc_objs(struct radeon_clock_voltage_dependency_entry, radeon_table->entries = kzalloc_objs(struct radeon_clock_voltage_dependency_entry,
atom_table->ucNumEntries, atom_table->ucNumEntries);
GFP_KERNEL);
if (!radeon_table->entries) if (!radeon_table->entries)
return -ENOMEM; return -ENOMEM;
@ -989,7 +988,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kzalloc_objs(struct radeon_phase_shedding_limits_entry, kzalloc_objs(struct radeon_phase_shedding_limits_entry,
psl->ucNumEntries, GFP_KERNEL); psl->ucNumEntries);
if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
r600_free_extended_power_table(rdev); r600_free_extended_power_table(rdev);
return -ENOMEM; return -ENOMEM;

View file

@ -2118,7 +2118,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
if (num_modes == 0) if (num_modes == 0)
return state_index; return state_index;
rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, rdev->pm.power_state = kzalloc_objs(struct radeon_power_state,
num_modes, GFP_KERNEL); num_modes);
if (!rdev->pm.power_state) if (!rdev->pm.power_state)
return state_index; return state_index;
/* last mode is usually default, array is low to high */ /* last mode is usually default, array is low to high */
@ -2590,8 +2590,7 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
if (power_info->pplib.ucNumStates == 0) if (power_info->pplib.ucNumStates == 0)
return state_index; return state_index;
rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, rdev->pm.power_state = kzalloc_objs(struct radeon_power_state,
power_info->pplib.ucNumStates, power_info->pplib.ucNumStates);
GFP_KERNEL);
if (!rdev->pm.power_state) if (!rdev->pm.power_state)
return state_index; return state_index;
/* first mode is usually default, followed by low to high */ /* first mode is usually default, followed by low to high */
@ -2608,8 +2607,7 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info->pplib.ucNonClockSize)); power_info->pplib.ucNonClockSize));
rdev->pm.power_state[i].clock_info = rdev->pm.power_state[i].clock_info =
kzalloc_objs(struct radeon_pm_clock_info, kzalloc_objs(struct radeon_pm_clock_info,
(power_info->pplib.ucStateEntrySize - 1) ? (power_info->pplib.ucStateEntrySize - 1) : 1, (power_info->pplib.ucStateEntrySize - 1) ? (power_info->pplib.ucStateEntrySize - 1) : 1);
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info) if (!rdev->pm.power_state[i].clock_info)
return state_index; return state_index;
if (power_info->pplib.ucStateEntrySize - 1) { if (power_info->pplib.ucStateEntrySize - 1) {
@ -2692,8 +2690,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (state_array->ucNumEntries == 0) if (state_array->ucNumEntries == 0)
return state_index; return state_index;
rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, rdev->pm.power_state = kzalloc_objs(struct radeon_power_state,
state_array->ucNumEntries, state_array->ucNumEntries);
GFP_KERNEL);
if (!rdev->pm.power_state) if (!rdev->pm.power_state)
return state_index; return state_index;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;
@ -2705,8 +2702,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
&non_clock_info_array->nonClockInfo[non_clock_array_index]; &non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = rdev->pm.power_state[i].clock_info =
kzalloc_objs(struct radeon_pm_clock_info, kzalloc_objs(struct radeon_pm_clock_info,
power_state->v2.ucNumDPMLevels ? power_state->v2.ucNumDPMLevels : 1, power_state->v2.ucNumDPMLevels ? power_state->v2.ucNumDPMLevels : 1);
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info) if (!rdev->pm.power_state[i].clock_info)
return state_index; return state_index;
if (power_state->v2.ucNumDPMLevels) { if (power_state->v2.ucNumDPMLevels) {

View file

@ -805,8 +805,7 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
power_info->pplib.ucNumStates, power_info->pplib.ucNumStates);
GFP_KERNEL);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;

View file

@ -1888,8 +1888,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
power_info->pplib.ucNumStates, power_info->pplib.ucNumStates);
GFP_KERNEL);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;

View file

@ -2284,8 +2284,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
power_info->pplib.ucNumStates, power_info->pplib.ucNumStates);
GFP_KERNEL);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;

View file

@ -6779,7 +6779,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -1480,7 +1480,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -1711,7 +1711,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps, rdev->pm.dpm.ps = kzalloc_objs(struct radeon_ps,
state_array->ucNumEntries, GFP_KERNEL); state_array->ucNumEntries);
if (!rdev->pm.dpm.ps) if (!rdev->pm.dpm.ps)
return -ENOMEM; return -ENOMEM;
power_state_offset = (u8 *)state_array->states; power_state_offset = (u8 *)state_array->states;

View file

@ -485,7 +485,7 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
timestamp.count, GFP_KERNEL); timestamp.count);
if (!query_info->queries) if (!query_info->queries)
return -ENOMEM; return -ENOMEM;
@ -543,7 +543,7 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
reset.count, GFP_KERNEL); reset.count);
if (!query_info->queries) if (!query_info->queries)
return -ENOMEM; return -ENOMEM;
@ -599,7 +599,7 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
copy.count, GFP_KERNEL); copy.count);
if (!query_info->queries) if (!query_info->queries)
return -ENOMEM; return -ENOMEM;

View file

@ -982,8 +982,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
pages_stat = (struct page **) kmalloc_objs(*pages_stat, pages_stat = (struct page **) kmalloc_objs(*pages_stat,
ARRAY_SIZE(pdesc->statPPNs) + ARRAY_SIZE(pdesc->infoPPNs) + ARRAY_SIZE(pdesc->strsPPNs), ARRAY_SIZE(pdesc->statPPNs) + ARRAY_SIZE(pdesc->infoPPNs) + ARRAY_SIZE(pdesc->strsPPNs));
GFP_KERNEL);
if (!pages_stat) if (!pages_stat)
goto err_nomem; goto err_nomem;

View file

@ -213,7 +213,7 @@ int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
front_info->evt_pairs = front_info->evt_pairs =
kzalloc_objs(struct xen_drm_front_evtchnl_pair, kzalloc_objs(struct xen_drm_front_evtchnl_pair,
cfg->num_connectors, GFP_KERNEL); cfg->num_connectors);
if (!front_info->evt_pairs) { if (!front_info->evt_pairs) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;

View file

@ -1320,8 +1320,7 @@ int hid_open_report(struct hid_device *device)
end = start + size; end = start + size;
device->collection = kzalloc_objs(struct hid_collection, device->collection = kzalloc_objs(struct hid_collection,
HID_DEFAULT_NUM_COLLECTIONS, HID_DEFAULT_NUM_COLLECTIONS);
GFP_KERNEL);
if (!device->collection) { if (!device->collection) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;

View file

@ -314,7 +314,7 @@ int vmbus_connect(void)
version >> 16, version & 0xFFFF); version >> 16, version & 0xFFFF);
vmbus_connection.channels = kzalloc_objs(struct vmbus_channel *, vmbus_connection.channels = kzalloc_objs(struct vmbus_channel *,
MAX_CHANNEL_RELIDS, GFP_KERNEL); MAX_CHANNEL_RELIDS);
if (vmbus_connection.channels == NULL) { if (vmbus_connection.channels == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto cleanup; goto cleanup;

Some files were not shown because too many files have changed in this diff Show more