vdso: Remove remnants of architecture-specific time storage

All users of the time releated parts of the vDSO are now using the generic
storage implementation. Remove the therefore unnecessary compatibility
accessor functions and symbols.

Co-developed-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250204-vdso-store-rng-v3-18-13a4669dfc8c@linutronix.de
This commit is contained in:
Thomas Weißschuh 2025-02-04 13:05:50 +01:00 committed by Thomas Gleixner
parent 998a8a2608
commit ac1a42f4e4
8 changed files with 53 additions and 89 deletions

View file

@ -20,31 +20,19 @@ static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(vo
}
#endif
#else /* !CONFIG_GENERIC_VDSO_DATA_STORE */
#ifndef __arch_get_k_vdso_data
static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
{
return NULL;
}
#endif /* __arch_get_k_vdso_data */
#define vdso_k_time_data __arch_get_k_vdso_data()
#define __arch_get_vdso_u_time_data __arch_get_vdso_data
#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata)
static __always_inline void __arch_update_vsyscall(struct vdso_time_data *vdata)
{
}
#endif /* __arch_update_vsyscall */
#ifndef __arch_sync_vdso_data
static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
#ifndef __arch_sync_vdso_time_data
static __always_inline void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
{
}
#endif /* __arch_sync_vdso_data */
#endif /* __arch_sync_vdso_time_data */
#endif /* !__ASSEMBLY__ */

View file

@ -8,7 +8,6 @@
#include <linux/ns_common.h>
#include <linux/err.h>
#include <linux/time64.h>
#include <vdso/datapage.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
@ -166,6 +165,4 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
}
#endif
struct vdso_data *arch_get_vdso_data(void *vvar_page);
#endif /* _LINUX_TIMENS_H */

View file

@ -128,8 +128,6 @@ struct vdso_time_data {
struct arch_vdso_time_data arch_data;
};
#define vdso_data vdso_time_data
/**
* struct vdso_rng_data - vdso RNG state information
* @generation: counter representing the number of RNG reseeds
@ -149,10 +147,7 @@ struct vdso_rng_data {
* With the hidden visibility, the compiler simply generates a PC-relative
* relocation, and this is what we need.
*/
#ifndef CONFIG_GENERIC_VDSO_DATA_STORE
extern struct vdso_time_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_time_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
#else
#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
extern struct vdso_time_data vdso_u_time_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_rng_data vdso_u_rng_data __attribute__((visibility("hidden")));
extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden")));
@ -160,17 +155,6 @@ extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden"
extern struct vdso_time_data *vdso_k_time_data;
extern struct vdso_rng_data *vdso_k_rng_data;
extern struct vdso_arch_data *vdso_k_arch_data;
#endif
/**
* union vdso_data_store - Generic vDSO data page
*/
union vdso_data_store {
struct vdso_time_data data[CS_BASES];
u8 page[1U << CONFIG_PAGE_SHIFT];
};
#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
#define VDSO_ARCH_DATA_SIZE ALIGN(sizeof(struct vdso_arch_data), PAGE_SIZE)
#define VDSO_ARCH_DATA_PAGES (VDSO_ARCH_DATA_SIZE >> PAGE_SHIFT)
@ -189,7 +173,6 @@ enum vdso_pages {
/*
* The generic vDSO implementation requires that gettimeofday.h
* provides:
* - __arch_get_vdso_data(): to get the vdso datapage.
* - __arch_get_hw_counter(): to get the hw counter based on the
* clock_mode.
* - gettimeofday_fallback(): fallback for gettimeofday.

View file

@ -7,7 +7,7 @@
#include <asm/barrier.h>
#include <vdso/datapage.h>
static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
static __always_inline u32 vdso_read_begin(const struct vdso_time_data *vd)
{
u32 seq;
@ -18,7 +18,7 @@ static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
return seq;
}
static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
static __always_inline u32 vdso_read_retry(const struct vdso_time_data *vd,
u32 start)
{
u32 seq;
@ -28,7 +28,7 @@ static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
return seq != start;
}
static __always_inline void vdso_write_begin(struct vdso_data *vd)
static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
{
/*
* WRITE_ONCE() is required otherwise the compiler can validly tear
@ -40,7 +40,7 @@ static __always_inline void vdso_write_begin(struct vdso_data *vd)
smp_wmb();
}
static __always_inline void vdso_write_end(struct vdso_data *vd)
static __always_inline void vdso_write_end(struct vdso_time_data *vd)
{
smp_wmb();
/*

View file

@ -165,18 +165,18 @@ static struct timens_offset offset_from_ts(struct timespec64 off)
* HVCLOCK
* VVAR
*
* The check for vdso_data->clock_mode is in the unlikely path of
* The check for vdso_time_data->clock_mode is in the unlikely path of
* the seq begin magic. So for the non-timens case most of the time
* 'seq' is even, so the branch is not taken.
*
* If 'seq' is odd, i.e. a concurrent update is in progress, the extra check
* for vdso_data->clock_mode is a non-issue. The task is spin waiting for the
* for vdso_time_data->clock_mode is a non-issue. The task is spin waiting for the
* update to finish and for 'seq' to become even anyway.
*
* Timens page has vdso_data->clock_mode set to VDSO_CLOCKMODE_TIMENS which
* Timens page has vdso_time_data->clock_mode set to VDSO_CLOCKMODE_TIMENS which
* enforces the time namespace handling path.
*/
static void timens_setup_vdso_data(struct vdso_data *vdata,
static void timens_setup_vdso_data(struct vdso_time_data *vdata,
struct time_namespace *ns)
{
struct timens_offset *offset = vdata->offset;
@ -219,7 +219,7 @@ static DEFINE_MUTEX(offset_lock);
static void timens_set_vvar_page(struct task_struct *task,
struct time_namespace *ns)
{
struct vdso_data *vdata;
struct vdso_time_data *vdata;
unsigned int i;
if (ns == &init_time_ns)
@ -235,7 +235,7 @@ static void timens_set_vvar_page(struct task_struct *task,
goto out;
ns->frozen_offsets = true;
vdata = arch_get_vdso_data(page_address(ns->vvar_page));
vdata = page_address(ns->vvar_page);
for (i = 0; i < CS_BASES; i++)
timens_setup_vdso_data(&vdata[i], ns);

View file

@ -15,8 +15,7 @@
#include "timekeeping_internal.h"
static inline void update_vdso_data(struct vdso_data *vdata,
struct timekeeper *tk)
static inline void update_vdso_time_data(struct vdso_time_data *vdata, struct timekeeper *tk)
{
struct vdso_timestamp *vdso_ts;
u64 nsec, sec;
@ -77,7 +76,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
void update_vsyscall(struct timekeeper *tk)
{
struct vdso_data *vdata = vdso_k_time_data;
struct vdso_time_data *vdata = vdso_k_time_data;
struct vdso_timestamp *vdso_ts;
s32 clock_mode;
u64 nsec;
@ -117,23 +116,23 @@ void update_vsyscall(struct timekeeper *tk)
* update of the high resolution parts.
*/
if (clock_mode != VDSO_CLOCKMODE_NONE)
update_vdso_data(vdata, tk);
update_vdso_time_data(vdata, tk);
__arch_update_vsyscall(vdata);
vdso_write_end(vdata);
__arch_sync_vdso_data(vdata);
__arch_sync_vdso_time_data(vdata);
}
void update_vsyscall_tz(void)
{
struct vdso_data *vdata = vdso_k_time_data;
struct vdso_time_data *vdata = vdso_k_time_data;
vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
__arch_sync_vdso_data(vdata);
__arch_sync_vdso_time_data(vdata);
}
/**
@ -150,7 +149,7 @@ void update_vsyscall_tz(void)
*/
unsigned long vdso_update_begin(void)
{
struct vdso_data *vdata = vdso_k_time_data;
struct vdso_time_data *vdata = vdso_k_time_data;
unsigned long flags = timekeeper_lock_irqsave();
vdso_write_begin(vdata);
@ -167,9 +166,9 @@ unsigned long vdso_update_begin(void)
*/
void vdso_update_end(unsigned long flags)
{
struct vdso_data *vdata = vdso_k_time_data;
struct vdso_time_data *vdata = vdso_k_time_data;
vdso_write_end(vdata);
__arch_sync_vdso_data(vdata);
__arch_sync_vdso_time_data(vdata);
timekeeper_unlock_irqrestore(flags);
}

View file

@ -12,7 +12,10 @@
* The vDSO data page.
*/
#ifdef CONFIG_HAVE_GENERIC_VDSO
static union vdso_data_store vdso_time_data_store __page_aligned_data;
static union {
struct vdso_time_data data[CS_BASES];
u8 page[PAGE_SIZE];
} vdso_time_data_store __page_aligned_data;
struct vdso_time_data *vdso_k_time_data = vdso_time_data_store.data;
static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
#endif /* CONFIG_HAVE_GENERIC_VDSO */
@ -123,9 +126,4 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
return 0;
}
struct vdso_time_data *arch_get_vdso_data(void *vvar_page)
{
return (struct vdso_time_data *)vvar_page;
}
#endif

View file

@ -17,12 +17,12 @@
#endif
#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
static __always_inline bool vdso_delta_ok(const struct vdso_time_data *vd, u64 delta)
{
return delta < vd->max_cycles;
}
#else
static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
static __always_inline bool vdso_delta_ok(const struct vdso_time_data *vd, u64 delta)
{
return true;
}
@ -39,7 +39,7 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
* Default implementation which works for all sane clocksources. That
* obviously excludes x86/TSC.
*/
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
static __always_inline u64 vdso_calc_ns(const struct vdso_time_data *vd, u64 cycles, u64 base)
{
u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
@ -58,7 +58,7 @@ static inline bool __arch_vdso_hres_capable(void)
#endif
#ifndef vdso_clocksource_ok
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
static inline bool vdso_clocksource_ok(const struct vdso_time_data *vd)
{
return vd->clock_mode != VDSO_CLOCKMODE_NONE;
}
@ -79,21 +79,20 @@ const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_tim
{
return (void *)vd + PAGE_SIZE;
}
#define __arch_get_timens_vdso_data(vd) __arch_get_vdso_u_timens_data(vd)
#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
static __always_inline int do_hres_timens(const struct vdso_time_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
const struct vdso_data *vd;
const struct vdso_time_data *vd;
u64 cycles, ns;
u32 seq;
s64 sec;
vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
vd = __arch_get_timens_vdso_data(vd);
vd = __arch_get_vdso_u_timens_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
vd = &vd[CS_HRES_COARSE];
else
@ -128,19 +127,19 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
}
#else
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
{
return NULL;
}
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
static __always_inline int do_hres_timens(const struct vdso_time_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
return -EINVAL;
}
#endif
static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
static __always_inline int do_hres(const struct vdso_time_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
@ -192,10 +191,10 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
}
#ifdef CONFIG_TIME_NS
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
static __always_inline int do_coarse_timens(const struct vdso_time_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
const struct timens_offset *offs = &vdns->offset[clk];
u64 nsec;
@ -221,14 +220,14 @@ static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clocki
return 0;
}
#else
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
static __always_inline int do_coarse_timens(const struct vdso_time_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
return -1;
}
#endif
static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
static __always_inline int do_coarse(const struct vdso_time_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
@ -255,7 +254,7 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __always_inline int
__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
u32 msk;
@ -282,7 +281,7 @@ __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
}
static __maybe_unused int
__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
__cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
int ret = __cvdso_clock_gettime_common(vd, clock, ts);
@ -300,7 +299,7 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
#ifdef BUILD_VDSO32
static __maybe_unused int
__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
__cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
@ -326,7 +325,7 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
#endif /* BUILD_VDSO32 */
static __maybe_unused int
__cvdso_gettimeofday_data(const struct vdso_data *vd,
__cvdso_gettimeofday_data(const struct vdso_time_data *vd,
struct __kernel_old_timeval *tv, struct timezone *tz)
{
@ -343,7 +342,7 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
vd = __arch_get_vdso_u_timens_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
@ -360,13 +359,13 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
#ifdef VDSO_HAS_TIME
static __maybe_unused __kernel_old_time_t
__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
__cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time)
{
__kernel_old_time_t t;
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
vd = __arch_get_vdso_u_timens_data(vd);
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@ -384,7 +383,7 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
u32 msk;
@ -396,7 +395,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
vd = __arch_get_vdso_u_timens_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which
@ -425,7 +424,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
}
static __maybe_unused
int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(vd, clock, res);
@ -443,7 +442,7 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
#ifdef BUILD_VDSO32
static __maybe_unused int
__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
__cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;