mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
rseq: slice ext: Ensure rseq feature size differs from original rseq size
Before rseq became extensible, its original size was 32 bytes even though the active rseq area was only 20 bytes. This had the following impact in terms of userspace ecosystem evolution: * The GNU libc between 2.35 and 2.39 expose a __rseq_size symbol set to 32, even though the size of the active rseq area is really 20. * The GNU libc 2.40 changes this __rseq_size to 20, thus making it express the active rseq area. * Starting from glibc 2.41, __rseq_size corresponds to the AT_RSEQ_FEATURE_SIZE from getauxval(3). This means that users of __rseq_size can always expect it to correspond to the active rseq area, except for the value 32, for which the active rseq area is 20 bytes. Exposing a 32 bytes feature size would make life needlessly painful for userspace. Therefore, add a reserved field at the end of the rseq area to bump the feature size to 33 bytes. This reserved field is expected to be replaced with whatever field will come next, expecting that this field will be larger than 1 byte. The effect of this change is to increase the size from 32 to 64 bytes before we actually have fields using that memory. Clarify the allocation size and alignment requirements in the struct rseq uapi comment. Change the value returned by getauxval(AT_RSEQ_ALIGN) to return the value of the active rseq area size rounded up to next power of 2, which guarantees that the rseq structure will always be aligned on the nearest power of two large enough to contain it, even as it grows. Change the alignment check in the rseq registration accordingly. This will minimize the amount of ABI corner-cases we need to document and require userspace to play games with. The rule stays simple when __rseq_size != 32: #define rseq_field_available(field) (__rseq_size >= offsetofend(struct rseq_abi, field)) Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20260220200642.1317826-3-mathieu.desnoyers@efficios.com
This commit is contained in:
parent
26d43a90be
commit
3b68df9781
4 changed files with 38 additions and 6 deletions
|
|
@ -47,6 +47,7 @@
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <uapi/linux/rseq.h>
|
#include <uapi/linux/rseq.h>
|
||||||
|
#include <linux/rseq.h>
|
||||||
#include <asm/param.h>
|
#include <asm/param.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
|
@ -286,7 +287,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_RSEQ
|
#ifdef CONFIG_RSEQ
|
||||||
NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
|
NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
|
||||||
NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
|
NEW_AUX_ENT(AT_RSEQ_ALIGN, rseq_alloc_align());
|
||||||
#endif
|
#endif
|
||||||
#undef NEW_AUX_ENT
|
#undef NEW_AUX_ENT
|
||||||
/* AT_NULL is zero; clear the rest too */
|
/* AT_NULL is zero; clear the rest too */
|
||||||
|
|
|
||||||
|
|
@ -146,6 +146,18 @@ static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
|
||||||
t->rseq = current->rseq;
|
t->rseq = current->rseq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Value returned by getauxval(AT_RSEQ_ALIGN) and expected by rseq
|
||||||
|
* registration. This is the active rseq area size rounded up to next
|
||||||
|
* power of 2, which guarantees that the rseq structure will always be
|
||||||
|
* aligned on the nearest power of two large enough to contain it, even
|
||||||
|
* as it grows.
|
||||||
|
*/
|
||||||
|
static inline unsigned int rseq_alloc_align(void)
|
||||||
|
{
|
||||||
|
return 1U << get_count_order(offsetof(struct rseq, end));
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_RSEQ */
|
#else /* CONFIG_RSEQ */
|
||||||
static inline void rseq_handle_slowpath(struct pt_regs *regs) { }
|
static inline void rseq_handle_slowpath(struct pt_regs *regs) { }
|
||||||
static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
|
static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
|
||||||
|
|
|
||||||
|
|
@ -87,10 +87,17 @@ struct rseq_slice_ctrl {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct rseq is aligned on 4 * 8 bytes to ensure it is always
|
* The original size and alignment of the allocation for struct rseq is
|
||||||
* contained within a single cache-line.
|
* 32 bytes.
|
||||||
*
|
*
|
||||||
* A single struct rseq per thread is allowed.
|
* The allocation size needs to be greater or equal to
|
||||||
|
* max(getauxval(AT_RSEQ_FEATURE_SIZE), 32), and the allocation needs to
|
||||||
|
* be aligned on max(getauxval(AT_RSEQ_ALIGN), 32).
|
||||||
|
*
|
||||||
|
* As an alternative, userspace is allowed to use both the original size
|
||||||
|
* and alignment of 32 bytes for backward compatibility.
|
||||||
|
*
|
||||||
|
* A single active struct rseq registration per thread is allowed.
|
||||||
*/
|
*/
|
||||||
struct rseq {
|
struct rseq {
|
||||||
/*
|
/*
|
||||||
|
|
@ -180,10 +187,21 @@ struct rseq {
|
||||||
*/
|
*/
|
||||||
struct rseq_slice_ctrl slice_ctrl;
|
struct rseq_slice_ctrl slice_ctrl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Before rseq became extensible, its original size was 32 bytes even
|
||||||
|
* though the active rseq area was only 20 bytes.
|
||||||
|
* Exposing a 32 bytes feature size would make life needlessly painful
|
||||||
|
* for userspace. Therefore, add a reserved byte after byte 32
|
||||||
|
* to bump the rseq feature size from 32 to 33.
|
||||||
|
* The next field to be added to the rseq area will be larger
|
||||||
|
* than one byte, and will replace this reserved byte.
|
||||||
|
*/
|
||||||
|
__u8 __reserved;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flexible array member at end of structure, after last feature field.
|
* Flexible array member at end of structure, after last feature field.
|
||||||
*/
|
*/
|
||||||
char end[];
|
char end[];
|
||||||
} __attribute__((aligned(4 * sizeof(__u64))));
|
} __attribute__((aligned(32)));
|
||||||
|
|
||||||
#endif /* _UAPI_LINUX_RSEQ_H */
|
#endif /* _UAPI_LINUX_RSEQ_H */
|
||||||
|
|
|
||||||
|
|
@ -80,6 +80,7 @@
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/rseq.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
|
|
@ -456,7 +457,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32
|
||||||
*/
|
*/
|
||||||
if (rseq_len < ORIG_RSEQ_SIZE ||
|
if (rseq_len < ORIG_RSEQ_SIZE ||
|
||||||
(rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
|
(rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
|
||||||
(rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
|
(rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, rseq_alloc_align()) ||
|
||||||
rseq_len < offsetof(struct rseq, end))))
|
rseq_len < offsetof(struct rseq, end))))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (!access_ok(rseq, rseq_len))
|
if (!access_ok(rseq, rseq_len))
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue