mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
This was done entirely with mindless brute force, using
git grep -l '\<k[vmz]*alloc_objs*(.*, GFP_KERNEL)' |
xargs sed -i 's/\(alloc_objs*(.*\), GFP_KERNEL)/\1)/'
to convert the new alloc_obj() users that had a simple GFP_KERNEL
argument to just drop that argument.
Note that due to the extreme simplicity of the scripting, any slightly
more complex cases spread over multiple lines would not be triggered:
they definitely exist, but this covers the vast bulk of the cases, and
the resulting diff is also then easier to check automatically.
For the same reason the 'flex' versions will be done as a separate
conversion.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
197 lines
4.5 KiB
C
197 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/file.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/namei.h>
|
|
#include <linux/io_uring.h>
|
|
#include <linux/xattr.h>
|
|
|
|
#include <uapi/linux/io_uring.h>
|
|
|
|
#include "../fs/internal.h"
|
|
|
|
#include "io_uring.h"
|
|
#include "xattr.h"
|
|
|
|
struct io_xattr {
|
|
struct file *file;
|
|
struct kernel_xattr_ctx ctx;
|
|
struct delayed_filename filename;
|
|
};
|
|
|
|
void io_xattr_cleanup(struct io_kiocb *req)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
|
|
dismiss_delayed_filename(&ix->filename);
|
|
kfree(ix->ctx.kname);
|
|
kvfree(ix->ctx.kvalue);
|
|
}
|
|
|
|
static void io_xattr_finish(struct io_kiocb *req, int ret)
|
|
{
|
|
req->flags &= ~REQ_F_NEED_CLEANUP;
|
|
|
|
io_xattr_cleanup(req);
|
|
io_req_set_res(req, ret, 0);
|
|
}
|
|
|
|
static int __io_getxattr_prep(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
const char __user *name;
|
|
int ret;
|
|
|
|
INIT_DELAYED_FILENAME(&ix->filename);
|
|
ix->ctx.kvalue = NULL;
|
|
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
|
ix->ctx.value = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
|
ix->ctx.size = READ_ONCE(sqe->len);
|
|
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
|
|
|
|
if (ix->ctx.flags)
|
|
return -EINVAL;
|
|
|
|
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
|
|
if (!ix->ctx.kname)
|
|
return -ENOMEM;
|
|
|
|
ret = import_xattr_name(ix->ctx.kname, name);
|
|
if (ret) {
|
|
kfree(ix->ctx.kname);
|
|
return ret;
|
|
}
|
|
|
|
req->flags |= REQ_F_NEED_CLEANUP;
|
|
req->flags |= REQ_F_FORCE_ASYNC;
|
|
return 0;
|
|
}
|
|
|
|
int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
return __io_getxattr_prep(req, sqe);
|
|
}
|
|
|
|
int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
const char __user *path;
|
|
int ret;
|
|
|
|
if (unlikely(req->flags & REQ_F_FIXED_FILE))
|
|
return -EBADF;
|
|
|
|
ret = __io_getxattr_prep(req, sqe);
|
|
if (ret)
|
|
return ret;
|
|
|
|
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
|
|
|
|
return delayed_getname(&ix->filename, path);
|
|
}
|
|
|
|
int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
int ret;
|
|
|
|
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
|
|
|
ret = file_getxattr(req->file, &ix->ctx);
|
|
io_xattr_finish(req, ret);
|
|
return IOU_COMPLETE;
|
|
}
|
|
|
|
int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
CLASS(filename_complete_delayed, name)(&ix->filename);
|
|
int ret;
|
|
|
|
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
|
|
|
ret = filename_getxattr(AT_FDCWD, name, LOOKUP_FOLLOW, &ix->ctx);
|
|
io_xattr_finish(req, ret);
|
|
return IOU_COMPLETE;
|
|
}
|
|
|
|
static int __io_setxattr_prep(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
const char __user *name;
|
|
int ret;
|
|
|
|
INIT_DELAYED_FILENAME(&ix->filename);
|
|
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
|
ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
|
ix->ctx.kvalue = NULL;
|
|
ix->ctx.size = READ_ONCE(sqe->len);
|
|
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
|
|
|
|
ix->ctx.kname = kmalloc_obj(*ix->ctx.kname);
|
|
if (!ix->ctx.kname)
|
|
return -ENOMEM;
|
|
|
|
ret = setxattr_copy(name, &ix->ctx);
|
|
if (ret) {
|
|
kfree(ix->ctx.kname);
|
|
return ret;
|
|
}
|
|
|
|
req->flags |= REQ_F_NEED_CLEANUP;
|
|
req->flags |= REQ_F_FORCE_ASYNC;
|
|
return 0;
|
|
}
|
|
|
|
int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
const char __user *path;
|
|
int ret;
|
|
|
|
if (unlikely(req->flags & REQ_F_FIXED_FILE))
|
|
return -EBADF;
|
|
|
|
ret = __io_setxattr_prep(req, sqe);
|
|
if (ret)
|
|
return ret;
|
|
|
|
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
|
|
|
|
return delayed_getname(&ix->filename, path);
|
|
}
|
|
|
|
int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
return __io_setxattr_prep(req, sqe);
|
|
}
|
|
|
|
int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
int ret;
|
|
|
|
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
|
|
|
ret = file_setxattr(req->file, &ix->ctx);
|
|
io_xattr_finish(req, ret);
|
|
return IOU_COMPLETE;
|
|
}
|
|
|
|
int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
|
CLASS(filename_complete_delayed, name)(&ix->filename);
|
|
int ret;
|
|
|
|
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
|
|
|
ret = filename_setxattr(AT_FDCWD, name, LOOKUP_FOLLOW, &ix->ctx);
|
|
io_xattr_finish(req, ret);
|
|
return IOU_COMPLETE;
|
|
}
|