From 11fece49e956ef97318177f5af15a84317594244 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Wed, 11 Feb 2026 21:52:13 +0000 Subject: [PATCH 01/10] tools/sched_ext: scx_flatcg: zero-initialize stats counter array The local cnts array in read_stats() is not initialized before being accumulated into per-CPU stats, which may lead to reading garbage values. Zero it out with memset alongside the existing stats array initialization. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_flatcg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c index cd85eb401179..bea76d060201 100644 --- a/tools/sched_ext/scx_flatcg.c +++ b/tools/sched_ext/scx_flatcg.c @@ -106,6 +106,7 @@ static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats) __u32 idx; memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS); + memset(cnts, 0, sizeof(cnts)); for (idx = 0; idx < FCG_NR_STATS; idx++) { int ret, cpu; From 988369d236e46e6bc68d2616fbc008aa6b06a454 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Wed, 11 Feb 2026 21:30:27 +0000 Subject: [PATCH 02/10] tools/sched_ext: scx_central: fix sched_setaffinity() call with the set size The cpu set is dynamically allocated for nr_cpu_ids using CPU_ALLOC(), so the size passed to sched_setaffinity() should be CPU_ALLOC_SIZE() rather than sizeof(cpu_set_t). Valgrind flagged this as accessing unaddressable bytes past the allocation. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_central.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index 55931a4cd71c..a6dfd45de70c 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -50,6 +50,7 @@ int main(int argc, char **argv) __u64 seq = 0, ecode; __s32 opt; cpu_set_t *cpuset; + size_t cpuset_size; libbpf_set_print(libbpf_print_fn); signal(SIGINT, sigint_handler); @@ -106,9 +107,10 @@ restart: */ cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); SCX_BUG_ON(!cpuset, "Failed to allocate cpuset"); - CPU_ZERO_S(CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids), cpuset); + cpuset_size = CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids); + CPU_ZERO_S(cpuset_size, cpuset); CPU_SET(skel->rodata->central_cpu, cpuset); - SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset), + SCX_BUG_ON(sched_setaffinity(0, cpuset_size, cpuset), "Failed to affinitize to central CPU %d (max %d)", skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); CPU_FREE(cpuset); From 048714d9df73a724d3f84b587f1110963e32f9b3 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Thu, 12 Feb 2026 20:35:19 +0000 Subject: [PATCH 03/10] tools/sched_ext: scx_userland: fix restart and stats thread lifecycle bugs Fix three issues in scx_userland's restart path: - exit_req is not reset on restart, causing sched_main_loop() to exit immediately without doing any scheduling work. - stats_printer thread handle is local to spawn_stats_thread(), making it impossible to join from main(). Promote it to file scope. - The stats thread continues reading skel->bss after the skeleton is destroyed on restart, causing a use-after-free. Join the stats thread before destroying the skeleton to ensure it has exited. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_userland.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/sched_ext/scx_userland.c b/tools/sched_ext/scx_userland.c index 10b31020f44f..63f89b35d999 100644 --- a/tools/sched_ext/scx_userland.c +++ b/tools/sched_ext/scx_userland.c @@ -54,6 +54,7 @@ static bool verbose; static volatile int exit_req; static int enqueued_fd, dispatched_fd; +static pthread_t stats_printer; static struct scx_userland *skel; static struct bpf_link *ops_link; @@ -319,8 +320,6 @@ static void *run_stats_printer(void *arg) static int spawn_stats_thread(void) { - pthread_t stats_printer; - return pthread_create(&stats_printer, NULL, run_stats_printer, NULL); } @@ -375,6 +374,7 @@ static void pre_bootstrap(int argc, char **argv) static void bootstrap(char *comm) { + exit_req = 0; skel = SCX_OPS_OPEN(userland_ops, scx_userland); skel->rodata->num_possible_cpus = libbpf_num_possible_cpus(); @@ -428,6 +428,7 @@ restart: exit_req = 1; bpf_link__destroy(ops_link); + pthread_join(stats_printer, NULL); ecode = UEI_REPORT(skel, uei); scx_userland__destroy(skel); From 0b82cc331d2e23537670878c62c19ee3f4147a93 Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Fri, 13 Feb 2026 10:21:36 -0800 Subject: [PATCH 04/10] selftests/sched_ext: Fix rt_stall flaky failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rt_stall test measures the runtime ratio between an EXT and an RT task pinned to the same CPU, verifying that the deadline server prevents RT tasks from starving SCHED_EXT tasks. It expects the EXT task to get at least 4% of CPU time. The test is flaky because sched_stress_test() calls sleep(RUN_TIME) immediately after fork(), without waiting for the RT child to complete its setup (set_affinity + set_sched). If the RT child experiences scheduling latency before completing setup, that delay eats into the measurement window: the RT child runs for less than RUN_TIME seconds, and the EXT task's measured ratio drops below the 4% threshold. For example, in the failing CI run [1]: EXT=0.140s RT=4.750s total=4.890s (expected ~5.0s) ratio=2.86% < 4% → FAIL The 110ms gap (5.0 - 4.89) corresponds to the RT child's setup time being counted inside the measurement window, during which fewer deadline server ticks fire for the EXT task. Fix by using pipes to synchronize: each child signals the parent after completing its setup, and the parent waits for both signals before starting sleep(RUN_TIME). This ensures the measurement window only counts time when both tasks are fully configured and competing. [1] https://github.com/kernel-patches/bpf/actions/runs/21961895809/job/63442490449 Fixes: be621a76341c ("selftests/sched_ext: Add test for sched_ext dl_server") Assisted-by: claude-opus-4-6-v1 Signed-off-by: Ihor Solodrai Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- tools/testing/selftests/sched_ext/rt_stall.c | 49 ++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tools/testing/selftests/sched_ext/rt_stall.c b/tools/testing/selftests/sched_ext/rt_stall.c index 015200f80f6e..ab772e336f86 100644 --- a/tools/testing/selftests/sched_ext/rt_stall.c +++ b/tools/testing/selftests/sched_ext/rt_stall.c @@ -23,6 +23,30 @@ #define CORE_ID 0 /* CPU to pin tasks to */ #define RUN_TIME 5 /* How long to run the test in seconds */ +/* Signal the parent that setup is complete by writing to a pipe */ +static void signal_ready(int fd) +{ + char c = 1; + + if (write(fd, &c, 1) != 1) { + perror("write to ready pipe"); + exit(EXIT_FAILURE); + } + close(fd); +} + +/* Wait for a child to signal readiness via a pipe */ +static void wait_ready(int fd) +{ + char c; + + if (read(fd, &c, 1) != 1) { + perror("read from ready pipe"); + exit(EXIT_FAILURE); + } + close(fd); +} + /* Simple busy-wait function for test tasks */ static void process_func(void) { @@ -122,14 +146,24 @@ static bool sched_stress_test(bool is_ext) float ext_runtime, rt_runtime, actual_ratio; int ext_pid, rt_pid; + int ext_ready[2], rt_ready[2]; ksft_print_header(); ksft_set_plan(1); + if (pipe(ext_ready) || pipe(rt_ready)) { + perror("pipe"); + ksft_exit_fail(); + } + /* Create and set up a EXT task */ ext_pid = fork(); if (ext_pid == 0) { + close(ext_ready[0]); + close(rt_ready[0]); + close(rt_ready[1]); set_affinity(CORE_ID); + signal_ready(ext_ready[1]); process_func(); exit(0); } else if (ext_pid < 0) { @@ -140,8 +174,12 @@ static bool sched_stress_test(bool is_ext) /* Create an RT task */ rt_pid = fork(); if (rt_pid == 0) { + close(ext_ready[0]); + close(ext_ready[1]); + close(rt_ready[0]); set_affinity(CORE_ID); set_sched(SCHED_FIFO, 50); + signal_ready(rt_ready[1]); process_func(); exit(0); } else if (rt_pid < 0) { @@ -149,6 +187,17 @@ static bool sched_stress_test(bool is_ext) ksft_exit_fail(); } + /* + * Wait for both children to complete their setup (affinity and + * scheduling policy) before starting the measurement window. + * This prevents flaky failures caused by the RT child's setup + * time eating into the measurement period. + */ + close(ext_ready[1]); + close(rt_ready[1]); + wait_ready(ext_ready[0]); + wait_ready(rt_ready[0]); + /* Let the processes run for the specified time */ sleep(RUN_TIME); From cabd76bbc03617e55c25f0b06167aa5e0b911a36 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Sat, 14 Feb 2026 07:32:05 +0000 Subject: [PATCH 05/10] tools/sched_ext: scx_flatcg: fix potential stack overflow from VLA in fcg_read_stats fcg_read_stats() had a VLA allocating 21 * nr_cpus * 8 bytes on the stack, risking stack overflow on large CPU counts (nr_cpus can be up to 512). Fix by using a single heap allocation with the correct size, reusing it across all stat indices, and freeing it at the end. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_flatcg.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c index bea76d060201..a8446509949e 100644 --- a/tools/sched_ext/scx_flatcg.c +++ b/tools/sched_ext/scx_flatcg.c @@ -102,22 +102,27 @@ static float read_cpu_util(__u64 *last_sum, __u64 *last_idle) static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats) { - __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus]; + __u64 *cnts; __u32 idx; + cnts = calloc(skel->rodata->nr_cpus, sizeof(__u64)); + if (!cnts) + return; + memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS); - memset(cnts, 0, sizeof(cnts)); for (idx = 0; idx < FCG_NR_STATS; idx++) { int ret, cpu; ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), - &idx, cnts[idx]); + &idx, cnts); if (ret < 0) continue; for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++) - stats[idx] += cnts[idx][cpu]; + stats[idx] += cnts[cpu]; } + + free(cnts); } int main(int argc, char **argv) From 07676846132340c7d0f50eca189a24cea4ae3cd8 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Sat, 14 Feb 2026 08:00:33 +0000 Subject: [PATCH 06/10] tools/sched_ext: scx_userland: fix stale data on restart Reset all counters, tasks and vruntime_head list on restart. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_userland.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/sched_ext/scx_userland.c b/tools/sched_ext/scx_userland.c index 63f89b35d999..504a80824f5c 100644 --- a/tools/sched_ext/scx_userland.c +++ b/tools/sched_ext/scx_userland.c @@ -375,6 +375,14 @@ static void pre_bootstrap(int argc, char **argv) static void bootstrap(char *comm) { exit_req = 0; + min_vruntime = 0.0; + nr_vruntime_enqueues = 0; + nr_vruntime_dispatches = 0; + nr_vruntime_failed = 0; + nr_curr_enqueued = 0; + memset(tasks, 0, pid_max * sizeof(*tasks)); + LIST_INIT(&vruntime_head); + skel = SCX_OPS_OPEN(userland_ops, scx_userland); skel->rodata->num_possible_cpus = libbpf_num_possible_cpus(); From 55a24d9203979d1cd0196ba1d189860e8b828c2e Mon Sep 17 00:00:00 2001 From: David Carlier Date: Tue, 17 Feb 2026 19:48:00 +0000 Subject: [PATCH 07/10] tools/sched_ext: scx_central: fix CPU_SET and skeleton leak on early exit Use CPU_SET_S() instead of CPU_SET() on the dynamically allocated cpuset to avoid a potential out-of-bounds write when nr_cpu_ids exceeds CPU_SETSIZE. Also destroy the skeleton before returning on invalid central CPU ID to prevent a resource leak. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_central.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index a6dfd45de70c..39f21b00a208 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -74,6 +74,7 @@ restart: u32 central_cpu = strtoul(optarg, NULL, 0); if (central_cpu >= skel->rodata->nr_cpu_ids) { fprintf(stderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids); + scx_central__destroy(skel); return -1; } skel->rodata->central_cpu = (s32)central_cpu; @@ -109,7 +110,7 @@ restart: SCX_BUG_ON(!cpuset, "Failed to allocate cpuset"); cpuset_size = CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids); CPU_ZERO_S(cpuset_size, cpuset); - CPU_SET(skel->rodata->central_cpu, cpuset); + CPU_SET_S(skel->rodata->central_cpu, cpuset_size, cpuset); SCX_BUG_ON(sched_setaffinity(0, cpuset_size, cpuset), "Failed to affinitize to central CPU %d (max %d)", skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); From 625be3456b3ced6e2dca6166962c0cf6cc2e546d Mon Sep 17 00:00:00 2001 From: David Carlier Date: Tue, 17 Feb 2026 20:08:36 +0000 Subject: [PATCH 08/10] tools/sched_ext: scx_pair: fix stride == 0 crash on single-CPU systems nr_cpu_ids / 2 produces stride 0 on a single-CPU system, which later causes SCX_BUG_ON(i == j) to fire. Validate stride after option parsing to also catch invalid user-supplied values via -S. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_pair.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/sched_ext/scx_pair.c b/tools/sched_ext/scx_pair.c index d3e97faa6334..2a82d8a8a0aa 100644 --- a/tools/sched_ext/scx_pair.c +++ b/tools/sched_ext/scx_pair.c @@ -56,7 +56,6 @@ restart: skel = SCX_OPS_OPEN(pair_ops, scx_pair); skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); - assert(skel->rodata->nr_cpu_ids > 0); skel->rodata->pair_batch_dur_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); /* pair up the earlier half to the latter by default, override with -s */ @@ -76,6 +75,12 @@ restart: } } + /* Stride must be positive to pair distinct CPUs. */ + if (stride <= 0) { + fprintf(stderr, "Invalid stride %d, must be positive\n", stride); + scx_pair__destroy(skel); + return -1; + } bpf_map__set_max_entries(skel->maps.pair_ctx, skel->rodata->nr_cpu_ids / 2); /* Resize arrays so their element count is equal to cpu count. */ From f892f9f99464bead942a75d2b00dda6be07de97f Mon Sep 17 00:00:00 2001 From: David Carlier Date: Wed, 18 Feb 2026 19:22:23 +0000 Subject: [PATCH 09/10] tools/sched_ext: scx_userland: fix data races on shared counters The stats thread reads nr_vruntime_enqueues, nr_vruntime_dispatches, nr_vruntime_failed, and nr_curr_enqueued concurrently with the main thread writing them, with no synchronization. Use __atomic builtins with relaxed ordering for all accesses to these counters to eliminate the data races. Only display accuracy is affected, not scheduling correctness. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_userland.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/sched_ext/scx_userland.c b/tools/sched_ext/scx_userland.c index 504a80824f5c..3f2aba658b4a 100644 --- a/tools/sched_ext/scx_userland.c +++ b/tools/sched_ext/scx_userland.c @@ -157,9 +157,9 @@ static int dispatch_task(__s32 pid) err = bpf_map_update_elem(dispatched_fd, NULL, &pid, 0); if (err) { - nr_vruntime_failed++; + __atomic_add_fetch(&nr_vruntime_failed, 1, __ATOMIC_RELAXED); } else { - nr_vruntime_dispatches++; + __atomic_add_fetch(&nr_vruntime_dispatches, 1, __ATOMIC_RELAXED); } return err; @@ -202,8 +202,8 @@ static int vruntime_enqueue(const struct scx_userland_enqueued_task *bpf_task) return ENOENT; update_enqueued(curr, bpf_task); - nr_vruntime_enqueues++; - nr_curr_enqueued++; + __atomic_add_fetch(&nr_vruntime_enqueues, 1, __ATOMIC_RELAXED); + __atomic_add_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED); /* * Enqueue the task in a vruntime-sorted list. A more optimal data @@ -279,9 +279,9 @@ static void dispatch_batch(void) LIST_INSERT_HEAD(&vruntime_head, task, entries); break; } - nr_curr_enqueued--; + __atomic_sub_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED); } - skel->bss->nr_scheduled = nr_curr_enqueued; + skel->bss->nr_scheduled = __atomic_load_n(&nr_curr_enqueued, __ATOMIC_RELAXED); } static void *run_stats_printer(void *arg) @@ -306,9 +306,9 @@ static void *run_stats_printer(void *arg) printf("|-----------------------|\n"); printf("| VRUNTIME / USER |\n"); printf("|-----------------------|\n"); - printf("| enq: %10llu |\n", nr_vruntime_enqueues); - printf("| disp: %10llu |\n", nr_vruntime_dispatches); - printf("| failed: %10llu |\n", nr_vruntime_failed); + printf("| enq: %10llu |\n", __atomic_load_n(&nr_vruntime_enqueues, __ATOMIC_RELAXED)); + printf("| disp: %10llu |\n", __atomic_load_n(&nr_vruntime_dispatches, __ATOMIC_RELAXED)); + printf("| failed: %10llu |\n", __atomic_load_n(&nr_vruntime_failed, __ATOMIC_RELAXED)); printf("o-----------------------o\n"); printf("\n\n"); fflush(stdout); @@ -376,10 +376,10 @@ static void bootstrap(char *comm) { exit_req = 0; min_vruntime = 0.0; - nr_vruntime_enqueues = 0; - nr_vruntime_dispatches = 0; - nr_vruntime_failed = 0; - nr_curr_enqueued = 0; + __atomic_store_n(&nr_vruntime_enqueues, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_vruntime_dispatches, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_vruntime_failed, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_curr_enqueued, 0, __ATOMIC_RELAXED); memset(tasks, 0, pid_max * sizeof(*tasks)); LIST_INIT(&vruntime_head); From 640c9dc72f21f325700a4b0f839ad568ff21c697 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Wed, 18 Feb 2026 19:22:35 +0000 Subject: [PATCH 10/10] tools/sched_ext: fix getopt not re-parsed on restart After goto restart, optind retains its advanced position from the previous getopt loop, causing getopt() to immediately return -1. This silently drops all command-line options on the restarted skeleton. Reset optind to 1 at the restart label so options are re-parsed. Affected schedulers: scx_simple, scx_central, scx_flatcg, scx_pair, scx_sdt, scx_cpu0. Signed-off-by: David Carlier Signed-off-by: Tejun Heo --- tools/sched_ext/scx_central.c | 1 + tools/sched_ext/scx_cpu0.c | 1 + tools/sched_ext/scx_flatcg.c | 1 + tools/sched_ext/scx_pair.c | 1 + tools/sched_ext/scx_sdt.c | 1 + tools/sched_ext/scx_simple.c | 1 + 6 files changed, 6 insertions(+) diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index 39f21b00a208..2a805f1d6c8f 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -56,6 +56,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(central_ops, scx_central); skel->rodata->central_cpu = 0; diff --git a/tools/sched_ext/scx_cpu0.c b/tools/sched_ext/scx_cpu0.c index 1e4fa4ab8da9..a6fba9978b9c 100644 --- a/tools/sched_ext/scx_cpu0.c +++ b/tools/sched_ext/scx_cpu0.c @@ -69,6 +69,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(cpu0_ops, scx_cpu0); skel->rodata->nr_cpus = libbpf_num_possible_cpus(); diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c index a8446509949e..d865c381589b 100644 --- a/tools/sched_ext/scx_flatcg.c +++ b/tools/sched_ext/scx_flatcg.c @@ -141,6 +141,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg); skel->rodata->nr_cpus = libbpf_num_possible_cpus(); diff --git a/tools/sched_ext/scx_pair.c b/tools/sched_ext/scx_pair.c index 2a82d8a8a0aa..2e509391f3da 100644 --- a/tools/sched_ext/scx_pair.c +++ b/tools/sched_ext/scx_pair.c @@ -53,6 +53,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(pair_ops, scx_pair); skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); diff --git a/tools/sched_ext/scx_sdt.c b/tools/sched_ext/scx_sdt.c index b0363363476d..d8ca9aa316a5 100644 --- a/tools/sched_ext/scx_sdt.c +++ b/tools/sched_ext/scx_sdt.c @@ -51,6 +51,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(sdt_ops, scx_sdt); while ((opt = getopt(argc, argv, "fvh")) != -1) { diff --git a/tools/sched_ext/scx_simple.c b/tools/sched_ext/scx_simple.c index 06d4b13bf76b..c3b48611712b 100644 --- a/tools/sched_ext/scx_simple.c +++ b/tools/sched_ext/scx_simple.c @@ -71,6 +71,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(simple_ops, scx_simple); while ((opt = getopt(argc, argv, "fvh")) != -1) {