mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
Convert 'alloc_obj' family to use the new default GFP_KERNEL argument
This was done entirely with mindless brute force, using
git grep -l '\<k[vmz]*alloc_objs*(.*, GFP_KERNEL)' |
xargs sed -i 's/\(alloc_objs*(.*\), GFP_KERNEL)/\1)/'
to convert the new alloc_obj() users that had a simple GFP_KERNEL
argument to just drop that argument.
Note that due to the extreme simplicity of the scripting, any slightly
more complex cases spread over multiple lines would not be triggered:
they definitely exist, but this covers the vast bulk of the cases, and
the resulting diff is also then easier to check automatically.
For the same reason the 'flex' versions will be done as a separate
conversion.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e19e1b480a
commit
bf4afc53b7
6673 changed files with 13013 additions and 13013 deletions
|
|
@ -127,7 +127,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||
if (copy_from_user(&fd, fds, sizeof(*fds)))
|
||||
return -EFAULT;
|
||||
|
||||
ev_fd = kmalloc_obj(*ev_fd, GFP_KERNEL);
|
||||
ev_fd = kmalloc_obj(*ev_fd);
|
||||
if (!ev_fd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -897,7 +897,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wq_acct *acct)
|
|||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
worker = kzalloc_obj(*worker, GFP_KERNEL);
|
||||
worker = kzalloc_obj(*worker);
|
||||
if (!worker) {
|
||||
fail:
|
||||
atomic_dec(&acct->nr_running);
|
||||
|
|
@ -1255,7 +1255,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
|||
if (WARN_ON_ONCE(!bounded))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wq = kzalloc_obj(struct io_wq, GFP_KERNEL);
|
||||
wq = kzalloc_obj(struct io_wq);
|
||||
if (!wq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
int hash_bits;
|
||||
bool ret;
|
||||
|
||||
ctx = kzalloc_obj(*ctx, GFP_KERNEL);
|
||||
ctx = kzalloc_obj(*ctx);
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
|
|||
* a speculative peek operation.
|
||||
*/
|
||||
if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
|
||||
iov = kmalloc_objs(struct iovec, nr_avail, GFP_KERNEL);
|
||||
iov = kmalloc_objs(struct iovec, nr_avail);
|
||||
if (unlikely(!iov))
|
||||
return -ENOMEM;
|
||||
if (arg->mode & KBUF_MODE_FREE)
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len)
|
|||
struct io_mock_file *mf = iocb->ki_filp->private_data;
|
||||
struct io_mock_iocb *mio;
|
||||
|
||||
mio = kzalloc_obj(*mio, GFP_KERNEL);
|
||||
mio = kzalloc_obj(*mio);
|
||||
if (!mio)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -683,7 +683,7 @@ static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
|
|||
unsigned i, j;
|
||||
|
||||
/* Store head pages only*/
|
||||
new_array = kvmalloc_objs(struct page *, nr_folios, GFP_KERNEL);
|
||||
new_array = kvmalloc_objs(struct page *, nr_folios);
|
||||
if (!new_array)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
|
|||
return sqd;
|
||||
}
|
||||
|
||||
sqd = kzalloc_obj(*sqd, GFP_KERNEL);
|
||||
sqd = kzalloc_obj(*sqd);
|
||||
if (!sqd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
|
|||
mutex_lock(&ctx->uring_lock);
|
||||
hash = ctx->hash_map;
|
||||
if (!hash) {
|
||||
hash = kzalloc_obj(*hash, GFP_KERNEL);
|
||||
hash = kzalloc_obj(*hash);
|
||||
if (!hash) {
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
@ -80,7 +80,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
|
|||
struct io_uring_task *tctx;
|
||||
int ret;
|
||||
|
||||
tctx = kzalloc_obj(*tctx, GFP_KERNEL);
|
||||
tctx = kzalloc_obj(*tctx);
|
||||
if (unlikely(!tctx))
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
|
|||
if (tctx->io_wq)
|
||||
io_wq_set_exit_on_idle(tctx->io_wq, false);
|
||||
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
|
||||
node = kmalloc_obj(*node, GFP_KERNEL);
|
||||
node = kmalloc_obj(*node);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
node->ctx = ctx;
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ static int __io_getxattr_prep(struct io_kiocb *req,
|
|||
if (ix->ctx.flags)
|
||||
return -EINVAL;
|
||||
|
||||
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname, GFP_KERNEL);
|
||||
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
|
||||
if (!ix->ctx.kname)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
|
|||
ix->ctx.size = READ_ONCE(sqe->len);
|
||||
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
|
||||
|
||||
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname, GFP_KERNEL);
|
||||
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
|
||||
if (!ix->ctx.kname)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -452,7 +452,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
|
|||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
area = kzalloc_obj(*area, GFP_KERNEL);
|
||||
area = kzalloc_obj(*area);
|
||||
if (!area)
|
||||
goto err;
|
||||
area->ifq = ifq;
|
||||
|
|
@ -514,7 +514,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
|
|||
{
|
||||
struct io_zcrx_ifq *ifq;
|
||||
|
||||
ifq = kzalloc_obj(*ifq, GFP_KERNEL);
|
||||
ifq = kzalloc_obj(*ifq);
|
||||
if (!ifq)
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue