diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 4700a183e6..f4e244ecf1 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -3,8 +3,7 @@ on: pull_request: push: branches: - - master - - llvm19 + - 0.14.x concurrency: # Cancels pending runs when a PR gets updated. group: ${{ github.head_ref || github.run_id }}-${{ github.actor }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 700b724b00..db0febee72 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,7 +39,7 @@ project(zig set(ZIG_VERSION_MAJOR 0) set(ZIG_VERSION_MINOR 14) -set(ZIG_VERSION_PATCH 0) +set(ZIG_VERSION_PATCH 1) set(ZIG_VERSION "" CACHE STRING "Override Zig version string. Default is to find out with git.") if("${ZIG_VERSION}" STREQUAL "") @@ -90,6 +90,7 @@ set(ZIG_STATIC_LLVM ${ZIG_STATIC} CACHE BOOL "Prefer linking against static LLVM set(ZIG_STATIC_ZLIB ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zlib") set(ZIG_STATIC_ZSTD ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zstd") set(ZIG_STATIC_CURSES OFF CACHE BOOL "Enable static linking against curses") +set(ZIG_STATIC_LIBXML2 OFF CACHE BOOL "Enable static linking against libxml2") if (ZIG_SHARED_LLVM AND ZIG_STATIC_LLVM) message(SEND_ERROR "-DZIG_SHARED_LLVM and -DZIG_STATIC_LLVM cannot both be enabled simultaneously") @@ -167,6 +168,12 @@ if(ZIG_STATIC_CURSES) list(APPEND LLVM_LIBRARIES "${CURSES}") endif() +if(ZIG_STATIC_LIBXML2) + list(REMOVE_ITEM LLVM_LIBRARIES "-lxml2") + find_library(LIBXML2 NAMES libxml2.a NAMES_PER_DIR) + list(APPEND LLVM_LIBRARIES "${LIBXML2}") +endif() + find_package(Threads) set(ZIG_CONFIG_H_OUT "${PROJECT_BINARY_DIR}/config.h") diff --git a/build.zig b/build.zig index 2d196041b3..8967906d7b 100644 --- a/build.zig +++ b/build.zig @@ -11,7 +11,7 @@ const assert = std.debug.assert; const DevEnv = @import("src/dev.zig").Env; const ValueInterpretMode = enum { direct, by_name }; -const zig_version: std.SemanticVersion = .{ .major = 0, .minor = 14, .patch = 0 }; +const zig_version: std.SemanticVersion = .{ .major = 0, .minor = 14, .patch = 1 }; const stack_size = 46 * 1024 * 1024; pub fn build(b: *std.Build) !void { @@ -214,11 +214,6 @@ pub fn build(b: *std.Build) !void { test_step.dependOn(&exe.step); - if (target.result.os.tag == .windows and target.result.abi == .gnu) { - // LTO is currently broken on mingw, this can be removed when it's fixed. - exe.want_lto = false; - } - const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend"); exe.use_llvm = use_llvm; exe.use_lld = use_llvm; @@ -257,13 +252,10 @@ pub fn build(b: *std.Build) !void { var code: u8 = undefined; const git_describe_untrimmed = b.runAllowFail(&[_][]const u8{ "git", - "-C", - b.build_root.path orelse ".", - "describe", - "--match", - "*.*.*", - "--tags", - "--abbrev=9", + "-C", b.build_root.path orelse ".", // affects the --git-dir argument + "--git-dir", ".git", // affected by the -C argument + "describe", "--match", "*.*.*", // + "--tags", "--abbrev=9", }, &code, .Ignore) catch { break :v version_string; }; @@ -334,7 +326,12 @@ pub fn build(b: *std.Build) !void { try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx); } else { // Here we are -Denable-llvm but no cmake integration. - try addStaticLlvmOptionsToModule(exe.root_module); + try addStaticLlvmOptionsToModule(exe.root_module, .{ + .llvm_has_m68k = llvm_has_m68k, + .llvm_has_csky = llvm_has_csky, + .llvm_has_arc = llvm_has_arc, + .llvm_has_xtensa = llvm_has_xtensa, + }); } if (target.result.os.tag == .windows) { // LLVM depends on networking as of version 18. @@ -362,11 +359,7 @@ pub fn build(b: *std.Build) !void { &[_][]const u8{ tracy_path, "public", "TracyClient.cpp" }, ); - // On mingw, we need to opt into windows 7+ to get some features required by tracy. - const tracy_c_flags: []const []const u8 = if (target.result.os.tag == .windows and target.result.abi == .gnu) - &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" } - else - &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" }; + const tracy_c_flags: []const []const u8 = &.{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" }; exe.root_module.addIncludePath(.{ .cwd_relative = tracy_path }); exe.root_module.addCSourceFile(.{ .file = .{ .cwd_relative = client_cpp }, .flags = tracy_c_flags }); @@ -513,8 +506,8 @@ pub fn build(b: *std.Build) !void { .skip_non_native = skip_non_native, .skip_libc = skip_libc, .use_llvm = use_llvm, - // I observed a value of 5136793600 on the M2 CI. - .max_rss = 5368709120, + // I observed a value of 5605064704 on the M2 CI. + .max_rss = 6165571174, })); const unit_tests_step = b.step("test-unit", "Run the compiler source unit tests"); @@ -821,7 +814,12 @@ fn addCmakeCfgOptionsToExe( } } -fn addStaticLlvmOptionsToModule(mod: *std.Build.Module) !void { +fn addStaticLlvmOptionsToModule(mod: *std.Build.Module, options: struct { + llvm_has_m68k: bool, + llvm_has_csky: bool, + llvm_has_arc: bool, + llvm_has_xtensa: bool, +}) !void { // Adds the Zig C++ sources which both stage1 and stage2 need. // // We need this because otherwise zig_clang_cc1_main.cpp ends up pulling @@ -845,6 +843,22 @@ fn addStaticLlvmOptionsToModule(mod: *std.Build.Module) !void { mod.linkSystemLibrary(lib_name, .{}); } + if (options.llvm_has_m68k) for (llvm_libs_m68k) |lib_name| { + mod.linkSystemLibrary(lib_name, .{}); + }; + + if (options.llvm_has_csky) for (llvm_libs_csky) |lib_name| { + mod.linkSystemLibrary(lib_name, .{}); + }; + + if (options.llvm_has_arc) for (llvm_libs_arc) |lib_name| { + mod.linkSystemLibrary(lib_name, .{}); + }; + + if (options.llvm_has_xtensa) for (llvm_libs_xtensa) |lib_name| { + mod.linkSystemLibrary(lib_name, .{}); + }; + mod.linkSystemLibrary("z", .{}); mod.linkSystemLibrary("zstd", .{}); @@ -1333,6 +1347,33 @@ const llvm_libs = [_][]const u8{ "LLVMSupport", "LLVMDemangle", }; +const llvm_libs_m68k = [_][]const u8{ + "LLVMM68kDisassembler", + "LLVMM68kAsmParser", + "LLVMM68kCodeGen", + "LLVMM68kDesc", + "LLVMM68kInfo", +}; +const llvm_libs_csky = [_][]const u8{ + "LLVMCSKYDisassembler", + "LLVMCSKYAsmParser", + "LLVMCSKYCodeGen", + "LLVMCSKYDesc", + "LLVMCSKYInfo", +}; +const llvm_libs_arc = [_][]const u8{ + "LLVMARCDisassembler", + "LLVMARCCodeGen", + "LLVMARCDesc", + "LLVMARCInfo", +}; +const llvm_libs_xtensa = [_][]const u8{ + "LLVMXtensaDisassembler", + "LLVMXtensaAsmParser", + "LLVMXtensaCodeGen", + "LLVMXtensaDesc", + "LLVMXtensaInfo", +}; fn generateLangRef(b: *std.Build) std.Build.LazyPath { const doctest_exe = b.addExecutable(.{ diff --git a/ci/aarch64-linux-debug.sh b/ci/aarch64-linux-debug.sh old mode 100644 new mode 100755 index 220aaf12d9..143dd7c1c9 --- a/ci/aarch64-linux-debug.sh +++ b/ci/aarch64-linux-debug.sh @@ -48,11 +48,6 @@ unset CXX ninja install -# simultaneously test building self-hosted without LLVM and with 32-bit arm -stage3-debug/bin/zig build \ - -Dtarget=arm-linux-musleabihf \ - -Dno-lib - # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. stage3-debug/bin/zig build test docs \ --maxrss 24696061952 \ @@ -62,34 +57,12 @@ stage3-debug/bin/zig build test docs \ --zig-lib-dir "$PWD/../lib" \ -Denable-superhtml -# Ensure that updating the wasm binary from this commit will result in a viable build. -stage3-debug/bin/zig build update-zig1 - -mkdir ../build-new -cd ../build-new - -export CC="$ZIG cc -target $TARGET -mcpu=$MCPU" -export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU" - -cmake .. \ - -DCMAKE_PREFIX_PATH="$PREFIX" \ - -DCMAKE_BUILD_TYPE=Debug \ - -DZIG_TARGET_TRIPLE="$TARGET" \ - -DZIG_TARGET_MCPU="$MCPU" \ - -DZIG_STATIC=ON \ - -DZIG_NO_LIB=ON \ - -GNinja - -unset CC -unset CXX - -ninja install - -stage3/bin/zig test ../test/behavior.zig -stage3/bin/zig build -p stage4 \ - -Dstatic-llvm \ - -Dtarget=native-native-musl \ +stage3-debug/bin/zig build \ + --prefix stage4-debug \ + -Denable-llvm \ -Dno-lib \ - --search-prefix "$PREFIX" \ - --zig-lib-dir "$PWD/../lib" -stage4/bin/zig test ../test/behavior.zig + -Dtarget=$TARGET \ + -Duse-zig-libcxx \ + -Dversion-string="$(stage3-debug/bin/zig version)" + +stage4-debug/bin/zig test ../test/behavior.zig diff --git a/ci/aarch64-linux-release.sh b/ci/aarch64-linux-release.sh old mode 100644 new mode 100755 index 69eed679e3..f6602850a9 --- a/ci/aarch64-linux-release.sh +++ b/ci/aarch64-linux-release.sh @@ -48,11 +48,6 @@ unset CXX ninja install -# simultaneously test building self-hosted without LLVM and with 32-bit arm -stage3-release/bin/zig build \ - -Dtarget=arm-linux-musleabihf \ - -Dno-lib - # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. stage3-release/bin/zig build test docs \ --maxrss 24696061952 \ @@ -77,35 +72,3 @@ stage3-release/bin/zig build \ echo "If the following command fails, it means nondeterminism has been" echo "introduced, making stage3 and stage4 no longer byte-for-byte identical." diff stage3-release/bin/zig stage4-release/bin/zig - -# Ensure that updating the wasm binary from this commit will result in a viable build. -stage3-release/bin/zig build update-zig1 - -mkdir ../build-new -cd ../build-new - -export CC="$ZIG cc -target $TARGET -mcpu=$MCPU" -export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU" - -cmake .. \ - -DCMAKE_PREFIX_PATH="$PREFIX" \ - -DCMAKE_BUILD_TYPE=Release \ - -DZIG_TARGET_TRIPLE="$TARGET" \ - -DZIG_TARGET_MCPU="$MCPU" \ - -DZIG_STATIC=ON \ - -DZIG_NO_LIB=ON \ - -GNinja - -unset CC -unset CXX - -ninja install - -stage3/bin/zig test ../test/behavior.zig -stage3/bin/zig build -p stage4 \ - -Dstatic-llvm \ - -Dtarget=native-native-musl \ - -Dno-lib \ - --search-prefix "$PREFIX" \ - --zig-lib-dir "$PWD/../lib" -stage4/bin/zig test ../test/behavior.zig diff --git a/cmake/Findlld.cmake b/cmake/Findlld.cmake index 7c86aaed07..e71838d03d 100644 --- a/cmake/Findlld.cmake +++ b/cmake/Findlld.cmake @@ -12,8 +12,9 @@ find_path(LLD_INCLUDE_DIRS NAMES lld/Common/Driver.h /usr/lib/llvm-19/include /usr/local/llvm190/include /usr/local/llvm19/include - /usr/local/opt/llvm@19/include - /opt/homebrew/opt/llvm@19/include + /usr/local/opt/lld@19/include + /opt/homebrew/opt/lld@19/include + /home/linuxbrew/.linuxbrew/opt/lld@19/include /mingw64/include) find_library(LLD_LIBRARY NAMES lld-19.0 lld190 lld NAMES_PER_DIR @@ -22,8 +23,9 @@ find_library(LLD_LIBRARY NAMES lld-19.0 lld190 lld NAMES_PER_DIR /usr/lib/llvm-19/lib /usr/local/llvm190/lib /usr/local/llvm19/lib - /usr/local/opt/llvm@19/lib - /opt/homebrew/opt/llvm@19/lib + /usr/local/opt/lld@19/lib + /opt/homebrew/opt/lld@19/lib + /home/linuxbrew/.linuxbrew/opt/lld@19/lib ) if(EXISTS ${LLD_LIBRARY}) set(LLD_LIBRARIES ${LLD_LIBRARY}) @@ -37,8 +39,9 @@ else() /usr/lib/llvm-19/lib /usr/local/llvm190/lib /usr/local/llvm19/lib - /usr/local/opt/llvm@19/lib - /opt/homebrew/opt/llvm@19/lib + /usr/local/opt/lld@19/lib + /opt/homebrew/opt/lld@19/lib + /home/linuxbrew/.linuxbrew/opt/lld@19/lib /mingw64/lib /c/msys64/mingw64/lib c:/msys64/mingw64/lib) diff --git a/doc/langref.html.in b/doc/langref.html.in index 5ab601ab99..b30c01dfe6 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -316,7 +316,7 @@ 0.11.0 | 0.12.0 | 0.13.0 | - 0.14.0 | + 0.14.1 | master @@ -3679,22 +3679,22 @@ void do_a_thing(struct Foo *foo) { {#syntax#}.{x}{#endsyntax#} {#syntax#}T{#endsyntax#} - {#syntax#}x{#endsyntax#} is a {#syntax#}std.meta.FieldType(T, .@"0"){#endsyntax#} + {#syntax#}x{#endsyntax#} is a {#syntax#}@FieldType(T, "0"){#endsyntax#} {#syntax#}.{ .a = x }{#endsyntax#} {#syntax#}T{#endsyntax#} - {#syntax#}x{#endsyntax#} is a {#syntax#}std.meta.FieldType(T, .a){#endsyntax#} + {#syntax#}x{#endsyntax#} is a {#syntax#}@FieldType(T, "a"){#endsyntax#} {#syntax#}T{x}{#endsyntax#} - - {#syntax#}x{#endsyntax#} is a {#syntax#}std.meta.FieldType(T, .@"0"){#endsyntax#} + {#syntax#}x{#endsyntax#} is a {#syntax#}@FieldType(T, "0"){#endsyntax#} {#syntax#}T{ .a = x }{#endsyntax#} - - {#syntax#}x{#endsyntax#} is a {#syntax#}std.meta.FieldType(T, .a){#endsyntax#} + {#syntax#}x{#endsyntax#} is a {#syntax#}@FieldType(T, "a"){#endsyntax#} {#syntax#}@Type(x){#endsyntax#} diff --git a/doc/langref/test_multidimensional_arrays.zig b/doc/langref/test_multidimensional_arrays.zig index 0a3535ca6e..c88e8cb16a 100644 --- a/doc/langref/test_multidimensional_arrays.zig +++ b/doc/langref/test_multidimensional_arrays.zig @@ -1,18 +1,22 @@ const std = @import("std"); const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; -const mat4x4 = [4][4]f32{ - [_]f32{ 1.0, 0.0, 0.0, 0.0 }, - [_]f32{ 0.0, 1.0, 0.0, 1.0 }, - [_]f32{ 0.0, 0.0, 1.0, 0.0 }, - [_]f32{ 0.0, 0.0, 0.0, 1.0 }, +const mat4x5 = [4][5]f32{ + [_]f32{ 1.0, 0.0, 0.0, 0.0, 0.0 }, + [_]f32{ 0.0, 1.0, 0.0, 1.0, 0.0 }, + [_]f32{ 0.0, 0.0, 1.0, 0.0, 0.0 }, + [_]f32{ 0.0, 0.0, 0.0, 1.0, 9.9 }, }; test "multidimensional arrays" { + // mat4x5 itself is a one-dimensional array of arrays. + try expectEqual(mat4x5[1], [_]f32{ 0.0, 1.0, 0.0, 1.0, 0.0 }); + // Access the 2D array by indexing the outer array, and then the inner array. - try expect(mat4x4[1][1] == 1.0); + try expect(mat4x5[3][4] == 9.9); // Here we iterate with for loops. - for (mat4x4, 0..) |row, row_index| { + for (mat4x5, 0..) |row, row_index| { for (row, 0..) |cell, column_index| { if (row_index == column_index) { try expect(cell == 1.0); @@ -20,8 +24,8 @@ test "multidimensional arrays" { } } - // initialize a multidimensional array to zeros - const all_zero: [4][4]f32 = .{.{0} ** 4} ** 4; + // Initialize a multidimensional array to zeros. + const all_zero: [4][5]f32 = .{.{0} ** 5} ** 4; try expect(all_zero[0][0] == 0); } diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig index ffb133dad5..9485bc79d2 100644 --- a/lib/compiler/aro_translate_c.zig +++ b/lib/compiler/aro_translate_c.zig @@ -1502,19 +1502,29 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ return scope.base.parent.?.getAlias(name); } - /// Finds the (potentially) mangled struct name for a locally scoped extern variable given the original declaration name. + /// Finds the (potentially) mangled struct name for a locally scoped extern variable or function given the original declaration name. /// /// Block scoped extern declarations translate to: /// const MangledStructName = struct {extern [qualifiers] original_extern_variable_name: [type]}; /// This finds MangledStructName given original_extern_variable_name for referencing correctly in transDeclRefExpr() pub fn getLocalExternAlias(scope: *Block, name: []const u8) ?[]const u8 { for (scope.statements.items) |node| { - if (node.tag() == .extern_local_var) { - const parent_node = node.castTag(.extern_local_var).?; - const init_node = parent_node.data.init.castTag(.var_decl).?; - if (std.mem.eql(u8, init_node.data.name, name)) { - return parent_node.data.name; - } + switch (node.tag()) { + .extern_local_var => { + const parent_node = node.castTag(.extern_local_var).?; + const init_node = parent_node.data.init.castTag(.var_decl).?; + if (std.mem.eql(u8, init_node.data.name, name)) { + return parent_node.data.name; + } + }, + .extern_local_fn => { + const parent_node = node.castTag(.extern_local_fn).?; + const init_node = parent_node.data.init.castTag(.func).?; + if (std.mem.eql(u8, init_node.data.name.?, name)) { + return parent_node.data.name; + } + }, + else => {}, } } return null; @@ -1620,7 +1630,11 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ .root => null, .block => ret: { const block = @as(*Block, @fieldParentPtr("base", scope)); - break :ret block.getLocalExternAlias(name); + const alias_name = block.getLocalExternAlias(name); + if (alias_name) |_alias_name| { + break :ret _alias_name; + } + break :ret scope.parent.?.getLocalExternAlias(name); }, .loop, .do_loop, .condition => scope.parent.?.getLocalExternAlias(name), }; diff --git a/lib/compiler/aro_translate_c/ast.zig b/lib/compiler/aro_translate_c/ast.zig index b92db7862f..b3b0e98a35 100644 --- a/lib/compiler/aro_translate_c/ast.zig +++ b/lib/compiler/aro_translate_c/ast.zig @@ -57,6 +57,8 @@ pub const Node = extern union { static_local_var, /// const ExternLocal_name = struct { init } extern_local_var, + /// const ExternLocal_name = struct { init } + extern_local_fn, /// var name = init.* mut_str, func, @@ -367,7 +369,13 @@ pub const Node = extern union { .c_pointer, .single_pointer => Payload.Pointer, .array_type, .null_sentinel_array_type => Payload.Array, .arg_redecl, .alias, .fail_decl => Payload.ArgRedecl, - .var_simple, .pub_var_simple, .static_local_var, .extern_local_var, .mut_str => Payload.SimpleVarDecl, + .var_simple, + .pub_var_simple, + .static_local_var, + .extern_local_var, + .extern_local_fn, + .mut_str, + => Payload.SimpleVarDecl, .enum_constant => Payload.EnumConstant, .array_filler => Payload.ArrayFiller, .pub_inline_fn => Payload.PubInlineFn, @@ -394,7 +402,7 @@ pub const Node = extern union { } pub fn Data(comptime t: Tag) type { - return std.meta.fieldInfo(t.Type(), .data).type; + return @FieldType(t.Type(), "data"); } }; @@ -1285,8 +1293,11 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, }); }, - .extern_local_var => { - const payload = node.castTag(.extern_local_var).?.data; + .extern_local_var, .extern_local_fn => { + const payload = if (node.tag() == .extern_local_var) + node.castTag(.extern_local_var).?.data + else + node.castTag(.extern_local_fn).?.data; const const_tok = try c.addToken(.keyword_const, "const"); _ = try c.addIdentifier(payload.name); @@ -2338,7 +2349,7 @@ fn renderNullSentinelArrayType(c: *Context, len: usize, elem_type: Node) !NodeIn fn addSemicolonIfNeeded(c: *Context, node: Node) !void { switch (node.tag()) { .warning => unreachable, - .var_decl, .var_simple, .arg_redecl, .alias, .block, .empty_block, .block_single, .@"switch", .static_local_var, .extern_local_var, .mut_str => {}, + .var_decl, .var_simple, .arg_redecl, .alias, .block, .empty_block, .block_single, .@"switch", .static_local_var, .extern_local_var, .extern_local_fn, .mut_str => {}, .while_true => { const payload = node.castTag(.while_true).?.data; return addSemicolonIfNotBlock(c, payload); @@ -2435,6 +2446,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex { .builtin_extern, .static_local_var, .extern_local_var, + .extern_local_fn, .mut_str, .macro_arithmetic, => { diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 8702acb329..b649c0103b 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -740,7 +740,7 @@ fn runStepNames( if (run.prominent_compile_errors and total_compile_errors > 0) { for (step_stack.keys()) |s| { if (s.result_error_bundle.errorMessageCount() > 0) { - s.result_error_bundle.renderToStdErr(.{ .ttyconf = ttyconf, .include_reference_trace = (b.reference_trace orelse 0) > 0 }); + s.result_error_bundle.renderToStdErr(.{ .ttyconf = ttyconf }); } } @@ -1119,11 +1119,7 @@ fn workerMakeOneStep( defer std.debug.unlockStdErr(); const gpa = b.allocator; - const options: std.zig.ErrorBundle.RenderOptions = .{ - .ttyconf = run.ttyconf, - .include_reference_trace = (b.reference_trace orelse 0) > 0, - }; - printErrorMessages(gpa, s, options, run.stderr, run.prominent_compile_errors) catch {}; + printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, run.stderr, run.prominent_compile_errors) catch {}; } handle_result: { diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index 7b8ad93547..ea86752455 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -17,6 +17,7 @@ pub const usage_string_after_command_name = \\This is necessary when the input path begins with a forward slash. \\ \\Supported option prefixes are /, -, and --, so e.g. /h, -h, and --h all work. + \\Drop-in compatible with the Microsoft Resource Compiler. \\ \\Supported Win32 RC Options: \\ /?, /h Print this help and exit. diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index e4ad61c522..3266b7921f 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -81,7 +81,8 @@ pub fn main() !void { defer options.deinit(); if (options.print_help_and_exit) { - try cli.writeUsage(stderr.writer(), "zig rc"); + const stdout = std.io.getStdOut(); + try cli.writeUsage(stdout.writer(), "zig rc"); return; } diff --git a/lib/compiler_rt/int_from_float.zig b/lib/compiler_rt/int_from_float.zig index 7bbcd90893..0c2c73bb42 100644 --- a/lib/compiler_rt/int_from_float.zig +++ b/lib/compiler_rt/int_from_float.zig @@ -72,10 +72,12 @@ pub inline fn bigIntFromFloat(comptime signedness: std.builtin.Signedness, resul } }); const parts = math.frexp(a); - const exponent = @max(parts.exponent - significand_bits, 0); + const significand_bits_adjusted_to_handle_smin = @as(i32, significand_bits) + + @intFromBool(signedness == .signed and parts.exponent == 32 * result.len); + const exponent = @max(parts.exponent - significand_bits_adjusted_to_handle_smin, 0); const int: I = @intFromFloat(switch (exponent) { 0 => a, - else => math.ldexp(parts.significand, significand_bits), + else => math.ldexp(parts.significand, significand_bits_adjusted_to_handle_smin), }); switch (signedness) { .signed => { diff --git a/lib/compiler_rt/int_from_float_test.zig b/lib/compiler_rt/int_from_float_test.zig index e10ed1ec00..5305ecf2a0 100644 --- a/lib/compiler_rt/int_from_float_test.zig +++ b/lib/compiler_rt/int_from_float_test.zig @@ -24,6 +24,8 @@ const __fixdfdi = @import("fixdfdi.zig").__fixdfdi; const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi; const __fixdfti = @import("fixdfti.zig").__fixdfti; const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti; +const __fixdfei = @import("fixdfei.zig").__fixdfei; +const __fixunsdfei = @import("fixunsdfei.zig").__fixunsdfei; // Conversion from f128 const __fixtfsi = @import("fixtfsi.zig").__fixtfsi; @@ -681,6 +683,44 @@ test "fixunsdfti" { try test__fixunsdfti(-0x1.FFFFFFFFFFFFEp+62, 0); } +fn test_fixdfei(comptime T: type, expected: T, a: f64) !void { + const int = @typeInfo(T).int; + var expected_buf: [@divExact(int.bits, 32)]u32 = undefined; + std.mem.writeInt(T, std.mem.asBytes(&expected_buf), expected, endian); + var actual_buf: [@divExact(int.bits, 32)]u32 = undefined; + _ = switch (int.signedness) { + .signed => __fixdfei, + .unsigned => __fixunsdfei, + }(&actual_buf, int.bits, a); + try testing.expect(std.mem.eql(u32, &expected_buf, &actual_buf)); +} + +test "fixdfei" { + try test_fixdfei(i256, -1 << 255, -0x1p255); + try test_fixdfei(i256, -1 << 127, -0x1p127); + try test_fixdfei(i256, -1 << 100, -0x1p100); + try test_fixdfei(i256, -1 << 50, -0x1p50); + try test_fixdfei(i256, -1 << 1, -0x1p1); + try test_fixdfei(i256, -1 << 0, -0x1p0); + try test_fixdfei(i256, 0, 0); + try test_fixdfei(i256, 1 << 0, 0x1p0); + try test_fixdfei(i256, 1 << 1, 0x1p1); + try test_fixdfei(i256, 1 << 50, 0x1p50); + try test_fixdfei(i256, 1 << 100, 0x1p100); + try test_fixdfei(i256, 1 << 127, 0x1p127); + try test_fixdfei(i256, 1 << 254, 0x1p254); +} + +test "fixundfei" { + try test_fixdfei(u256, 0, 0); + try test_fixdfei(u256, 1 << 0, 0x1p0); + try test_fixdfei(u256, 1 << 1, 0x1p1); + try test_fixdfei(u256, 1 << 50, 0x1p50); + try test_fixdfei(u256, 1 << 100, 0x1p100); + try test_fixdfei(u256, 1 << 127, 0x1p127); + try test_fixdfei(u256, 1 << 255, 0x1p255); +} + fn test__fixtfsi(a: f128, expected: i32) !void { const x = __fixtfsi(a); try testing.expect(x == expected); diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig index b804ff9359..7c33eac374 100644 --- a/lib/compiler_rt/sqrt.zig +++ b/lib/compiler_rt/sqrt.zig @@ -13,6 +13,8 @@ comptime { @export(&__sqrtx, .{ .name = "__sqrtx", .linkage = common.linkage, .visibility = common.visibility }); if (common.want_ppc_abi) { @export(&sqrtq, .{ .name = "sqrtf128", .linkage = common.linkage, .visibility = common.visibility }); + } else if (common.want_sparc_abi) { + @export(&_Qp_sqrt, .{ .name = "_Qp_sqrt", .linkage = common.linkage, .visibility = common.visibility }); } @export(&sqrtq, .{ .name = "sqrtq", .linkage = common.linkage, .visibility = common.visibility }); @export(&sqrtl, .{ .name = "sqrtl", .linkage = common.linkage, .visibility = common.visibility }); @@ -242,6 +244,10 @@ pub fn sqrtq(x: f128) callconv(.C) f128 { return sqrt(@floatCast(x)); } +fn _Qp_sqrt(c: *f128, a: *f128) callconv(.c) void { + c.* = sqrt(@floatCast(a.*)); +} + pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble { switch (@typeInfo(c_longdouble).float.bits) { 16 => return __sqrth(x), diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 0c287c6afc..29932ffc76 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -468,27 +468,42 @@ export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void { // Linkers are expected to automatically add `__start_` and // `__stop_` symbols when section names are valid C identifiers. - const pc_counters_start = @extern([*]u8, .{ - .name = "__start___sancov_cntrs", - .linkage = .weak, - }) orelse fatal("missing __start___sancov_cntrs symbol", .{}); + const ofmt = builtin.object_format; - const pc_counters_end = @extern([*]u8, .{ - .name = "__stop___sancov_cntrs", + const start_symbol_prefix: []const u8 = if (ofmt == .macho) + "\x01section$start$__DATA$__" + else + "__start___"; + const end_symbol_prefix: []const u8 = if (ofmt == .macho) + "\x01section$end$__DATA$__" + else + "__stop___"; + + const pc_counters_start_name = start_symbol_prefix ++ "sancov_cntrs"; + const pc_counters_start = @extern([*]u8, .{ + .name = pc_counters_start_name, .linkage = .weak, - }) orelse fatal("missing __stop___sancov_cntrs symbol", .{}); + }) orelse fatal("missing {s} symbol", .{pc_counters_start_name}); + + const pc_counters_end_name = end_symbol_prefix ++ "sancov_cntrs"; + const pc_counters_end = @extern([*]u8, .{ + .name = pc_counters_end_name, + .linkage = .weak, + }) orelse fatal("missing {s} symbol", .{pc_counters_end_name}); const pc_counters = pc_counters_start[0 .. pc_counters_end - pc_counters_start]; + const pcs_start_name = start_symbol_prefix ++ "sancov_pcs1"; const pcs_start = @extern([*]usize, .{ - .name = "__start___sancov_pcs1", + .name = pcs_start_name, .linkage = .weak, - }) orelse fatal("missing __start___sancov_pcs1 symbol", .{}); + }) orelse fatal("missing {s} symbol", .{pcs_start_name}); + const pcs_end_name = end_symbol_prefix ++ "sancov_pcs1"; const pcs_end = @extern([*]usize, .{ - .name = "__stop___sancov_pcs1", + .name = pcs_end_name, .linkage = .weak, - }) orelse fatal("missing __stop___sancov_pcs1 symbol", .{}); + }) orelse fatal("missing {s} symbol", .{pcs_end_name}); const pcs = pcs_start[0 .. pcs_end - pcs_start]; diff --git a/lib/libc/glibc/sysdeps/nptl/bits/thread-shared-types.h b/lib/libc/glibc/sysdeps/nptl/bits/thread-shared-types.h index 7c24c0a6be..e614c7f3c9 100644 --- a/lib/libc/glibc/sysdeps/nptl/bits/thread-shared-types.h +++ b/lib/libc/glibc/sysdeps/nptl/bits/thread-shared-types.h @@ -99,6 +99,8 @@ struct __pthread_cond_s unsigned int __g1_orig_size; unsigned int __wrefs; unsigned int __g_signals[2]; + unsigned int __unused_initialized_1; + unsigned int __unused_initialized_2; }; typedef unsigned int __tss_t; diff --git a/lib/libc/glibc/sysdeps/nptl/pthread.h b/lib/libc/glibc/sysdeps/nptl/pthread.h index 050b4ab8d1..9ad36cabe9 100644 --- a/lib/libc/glibc/sysdeps/nptl/pthread.h +++ b/lib/libc/glibc/sysdeps/nptl/pthread.h @@ -152,7 +152,7 @@ enum /* Conditional variable handling. */ -#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0} } } +#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0}, 0, 0 } } /* Cleanup buffers */ diff --git a/lib/libc/include/generic-glibc/bits/thread-shared-types.h b/lib/libc/include/generic-glibc/bits/thread-shared-types.h index 0afb8c2c6b..119dd8ae55 100644 --- a/lib/libc/include/generic-glibc/bits/thread-shared-types.h +++ b/lib/libc/include/generic-glibc/bits/thread-shared-types.h @@ -99,6 +99,8 @@ struct __pthread_cond_s unsigned int __g1_orig_size; unsigned int __wrefs; unsigned int __g_signals[2]; + unsigned int __unused_initialized_1; + unsigned int __unused_initialized_2; }; typedef unsigned int __tss_t; diff --git a/lib/libc/include/generic-glibc/pthread.h b/lib/libc/include/generic-glibc/pthread.h index 5a9977262e..1c7bf92e8c 100644 --- a/lib/libc/include/generic-glibc/pthread.h +++ b/lib/libc/include/generic-glibc/pthread.h @@ -152,7 +152,7 @@ enum /* Conditional variable handling. */ -#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0} } } +#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, 0, 0, {0, 0}, 0, 0 } } /* Cleanup buffers */ diff --git a/lib/libc/include/loongarch64-linux-gnusf/gnu/stubs-lp64s.h b/lib/libc/include/loongarch64-linux-gnusf/gnu/stubs-lp64s.h new file mode 100644 index 0000000000..b9af396166 --- /dev/null +++ b/lib/libc/include/loongarch64-linux-gnusf/gnu/stubs-lp64s.h @@ -0,0 +1,38 @@ +/* This file is automatically generated. + It defines a symbol `__stub_FUNCTION' for each function + in the C library which is a stub, meaning it will fail + every time called, usually setting errno to ENOSYS. */ + +#ifdef _LIBC + #error Applications may not define the macro _LIBC +#endif + +#define __stub___compat_bdflush +#define __stub___compat_create_module +#define __stub___compat_get_kernel_syms +#define __stub___compat_query_module +#define __stub___compat_uselib +#define __stub_chflags +#define __stub_fchflags +#define __stub_feclearexcept +#define __stub_fedisableexcept +#define __stub_feenableexcept +#define __stub_fegetenv +#define __stub_fegetexcept +#define __stub_fegetexceptflag +#define __stub_fegetmode +#define __stub_fegetround +#define __stub_feholdexcept +#define __stub_feraiseexcept +#define __stub_fesetenv +#define __stub_fesetexcept +#define __stub_fesetexceptflag +#define __stub_fesetmode +#define __stub_fesetround +#define __stub_fetestexcept +#define __stub_feupdateenv +#define __stub_gtty +#define __stub_revoke +#define __stub_setlogin +#define __stub_sigreturn +#define __stub_stty diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 24edee37a8..ecf67e9869 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -337,6 +337,7 @@ pub const Manifest = struct { manifest_create: fs.File.OpenError, manifest_read: fs.File.ReadError, manifest_lock: fs.File.LockError, + manifest_seek: fs.File.SeekError, file_open: FileOp, file_stat: FileOp, file_read: FileOp, @@ -488,7 +489,6 @@ pub const Manifest = struct { /// option, one may call `toOwnedLock` to obtain a smaller object which can represent /// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called. pub fn hit(self: *Manifest) HitError!bool { - const gpa = self.cache.gpa; assert(self.manifest_file == null); self.diagnostic = .none; @@ -501,12 +501,12 @@ pub const Manifest = struct { self.hex_digest = binToHex(bin_digest); - self.hash.hasher = hasher_init; - self.hash.hasher.update(&bin_digest); - @memcpy(manifest_file_path[0..self.hex_digest.len], &self.hex_digest); manifest_file_path[hex_digest_len..][0..ext.len].* = ext.*; + // We'll try to open the cache with an exclusive lock, but if that would block + // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll + // open with a shared lock instead. while (true) { if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ .read = true, @@ -575,26 +575,71 @@ pub const Manifest = struct { self.want_refresh_timestamp = true; const input_file_count = self.files.entries.len; - while (true) : (self.unhit(bin_digest, input_file_count)) { - const file_contents = self.manifest_file.?.reader().readAllAlloc(gpa, manifest_file_size_max) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.StreamTooLong => return error.OutOfMemory, - else => |e| { - self.diagnostic = .{ .manifest_read = e }; - return error.CacheCheckFailed; - }, - }; - defer gpa.free(file_contents); - var any_file_changed = false; - var line_iter = mem.tokenizeScalar(u8, file_contents, '\n'); - var idx: usize = 0; - if (if (line_iter.next()) |line| !std.mem.eql(u8, line, manifest_header) else true) { - if (try self.upgradeToExclusiveLock()) continue; - self.manifest_dirty = true; - while (idx < input_file_count) : (idx += 1) { - const ch_file = &self.files.keys()[idx]; - self.populateFileHash(ch_file) catch |err| { + // We're going to construct a second hash. Its input will begin with the digest we've + // already computed (`bin_digest`), and then it'll have the digests of each input file, + // including "post" files (see `addFilePost`). If this is a hit, we learn the set of "post" + // files from the manifest on disk. If this is a miss, we'll learn those from future calls + // to `addFilePost` etc. As such, the state of `self.hash.hasher` after this function + // depends on whether this is a hit or a miss. + // + // If we return `true` indicating a cache hit, then `self.hash.hasher` must already include + // the digests of the "post" files, so the caller can call `final`. Otherwise, on a cache + // miss, `self.hash.hasher` will include the digests of all non-"post" files -- that is, + // the ones we've already been told about. The rest will be discovered through calls to + // `addFilePost` etc, which will update the hasher. After all files are added, the user can + // use `final`, and will at some point `writeManifest` the file list to disk. + + self.hash.hasher = hasher_init; + self.hash.hasher.update(&bin_digest); + + hit: { + const file_digests_populated: usize = digests: { + switch (try self.hitWithCurrentLock()) { + .hit => break :hit, + .miss => |m| if (!try self.upgradeToExclusiveLock()) { + break :digests m.file_digests_populated; + }, + } + // We've just had a miss with the shared lock, and upgraded to an exclusive lock. Someone + // else might have modified the digest, so we need to check again before deciding to miss. + // Before trying again, we must reset `self.hash.hasher` and `self.files`. + // This is basically just the first half of `unhit`. + self.hash.hasher = hasher_init; + self.hash.hasher.update(&bin_digest); + while (self.files.count() != input_file_count) { + var file = self.files.pop().?; + file.key.deinit(self.cache.gpa); + } + // Also, seek the file back to the start. + self.manifest_file.?.seekTo(0) catch |err| { + self.diagnostic = .{ .manifest_seek = err }; + return error.CacheCheckFailed; + }; + + switch (try self.hitWithCurrentLock()) { + .hit => break :hit, + .miss => |m| break :digests m.file_digests_populated, + } + }; + + // This is a guaranteed cache miss. We're almost ready to return `false`, but there's a + // little bookkeeping to do first. The first `file_digests_populated` entries in `files` + // have their `bin_digest` populated; there may be some left in `input_file_count` which + // we'll need to populate ourselves. Other than that, this is basically `unhit`. + self.manifest_dirty = true; + self.hash.hasher = hasher_init; + self.hash.hasher.update(&bin_digest); + while (self.files.count() != input_file_count) { + var file = self.files.pop().?; + file.key.deinit(self.cache.gpa); + } + for (self.files.keys(), 0..) |*file, idx| { + if (idx < file_digests_populated) { + // `bin_digest` is already populated by `hitWithCurrentLock`, so we can use it directly. + self.hash.hasher.update(&file.bin_digest); + } else { + self.populateFileHash(file) catch |err| { self.diagnostic = .{ .file_hash = .{ .file_index = idx, .err = err, @@ -602,172 +647,195 @@ pub const Manifest = struct { return error.CacheCheckFailed; }; } - return false; } - while (line_iter.next()) |line| { - defer idx += 1; + return false; + } - var iter = mem.tokenizeScalar(u8, line, ' '); - const size = iter.next() orelse return error.InvalidFormat; - const inode = iter.next() orelse return error.InvalidFormat; - const mtime_nsec_str = iter.next() orelse return error.InvalidFormat; - const digest_str = iter.next() orelse return error.InvalidFormat; - const prefix_str = iter.next() orelse return error.InvalidFormat; - const file_path = iter.rest(); + if (self.want_shared_lock) { + self.downgradeToSharedLock() catch |err| { + self.diagnostic = .{ .manifest_lock = err }; + return error.CacheCheckFailed; + }; + } - const stat_size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat; - const stat_inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat; - const stat_mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat; - const file_bin_digest = b: { - if (digest_str.len != hex_digest_len) return error.InvalidFormat; - var bd: BinDigest = undefined; - _ = fmt.hexToBytes(&bd, digest_str) catch return error.InvalidFormat; - break :b bd; + return true; + } + + /// Assumes that `self.hash.hasher` has been updated only with the original digest, that + /// `self.files` contains only the original input files, and that `self.manifest_file.?` is + /// seeked to the start of the file. + fn hitWithCurrentLock(self: *Manifest) HitError!union(enum) { + hit, + miss: struct { + file_digests_populated: usize, + }, + } { + const gpa = self.cache.gpa; + const input_file_count = self.files.entries.len; + + const file_contents = self.manifest_file.?.reader().readAllAlloc(gpa, manifest_file_size_max) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.StreamTooLong => return error.OutOfMemory, + else => |e| { + self.diagnostic = .{ .manifest_read = e }; + return error.CacheCheckFailed; + }, + }; + defer gpa.free(file_contents); + + var any_file_changed = false; + var line_iter = mem.tokenizeScalar(u8, file_contents, '\n'); + var idx: usize = 0; + const header_valid = valid: { + const line = line_iter.next() orelse break :valid false; + break :valid std.mem.eql(u8, line, manifest_header); + }; + if (!header_valid) { + return .{ .miss = .{ .file_digests_populated = 0 } }; + } + while (line_iter.next()) |line| { + defer idx += 1; + + var iter = mem.tokenizeScalar(u8, line, ' '); + const size = iter.next() orelse return error.InvalidFormat; + const inode = iter.next() orelse return error.InvalidFormat; + const mtime_nsec_str = iter.next() orelse return error.InvalidFormat; + const digest_str = iter.next() orelse return error.InvalidFormat; + const prefix_str = iter.next() orelse return error.InvalidFormat; + const file_path = iter.rest(); + + const stat_size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat; + const stat_inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat; + const stat_mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat; + const file_bin_digest = b: { + if (digest_str.len != hex_digest_len) return error.InvalidFormat; + var bd: BinDigest = undefined; + _ = fmt.hexToBytes(&bd, digest_str) catch return error.InvalidFormat; + break :b bd; + }; + + const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat; + if (prefix >= self.cache.prefixes_len) return error.InvalidFormat; + + if (file_path.len == 0) return error.InvalidFormat; + + const cache_hash_file = f: { + const prefixed_path: PrefixedPath = .{ + .prefix = prefix, + .sub_path = file_path, // expires with file_contents }; + if (idx < input_file_count) { + const file = &self.files.keys()[idx]; + if (!file.prefixed_path.eql(prefixed_path)) + return error.InvalidFormat; - const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat; - if (prefix >= self.cache.prefixes_len) return error.InvalidFormat; - - if (file_path.len == 0) return error.InvalidFormat; - - const cache_hash_file = f: { - const prefixed_path: PrefixedPath = .{ - .prefix = prefix, - .sub_path = file_path, // expires with file_contents + file.stat = .{ + .size = stat_size, + .inode = stat_inode, + .mtime = stat_mtime, }; - if (idx < input_file_count) { - const file = &self.files.keys()[idx]; - if (!file.prefixed_path.eql(prefixed_path)) - return error.InvalidFormat; - - file.stat = .{ + file.bin_digest = file_bin_digest; + break :f file; + } + const gop = try self.files.getOrPutAdapted(gpa, prefixed_path, FilesAdapter{}); + errdefer _ = self.files.pop(); + if (!gop.found_existing) { + gop.key_ptr.* = .{ + .prefixed_path = .{ + .prefix = prefix, + .sub_path = try gpa.dupe(u8, file_path), + }, + .contents = null, + .max_file_size = null, + .handle = null, + .stat = .{ .size = stat_size, .inode = stat_inode, .mtime = stat_mtime, - }; - file.bin_digest = file_bin_digest; - break :f file; - } - const gop = try self.files.getOrPutAdapted(gpa, prefixed_path, FilesAdapter{}); - errdefer _ = self.files.pop(); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .prefixed_path = .{ - .prefix = prefix, - .sub_path = try gpa.dupe(u8, file_path), - }, - .contents = null, - .max_file_size = null, - .handle = null, - .stat = .{ - .size = stat_size, - .inode = stat_inode, - .mtime = stat_mtime, - }, - .bin_digest = file_bin_digest, - }; - } - break :f gop.key_ptr; + }, + .bin_digest = file_bin_digest, + }; + } + break :f gop.key_ptr; + }; + + const pp = cache_hash_file.prefixed_path; + const dir = self.cache.prefixes()[pp.prefix].handle; + const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { + error.FileNotFound => { + // Every digest before this one has been populated successfully. + return .{ .miss = .{ .file_digests_populated = idx } }; + }, + else => |e| { + self.diagnostic = .{ .file_open = .{ + .file_index = idx, + .err = e, + } }; + return error.CacheCheckFailed; + }, + }; + defer this_file.close(); + + const actual_stat = this_file.stat() catch |err| { + self.diagnostic = .{ .file_stat = .{ + .file_index = idx, + .err = err, + } }; + return error.CacheCheckFailed; + }; + const size_match = actual_stat.size == cache_hash_file.stat.size; + const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime; + const inode_match = actual_stat.inode == cache_hash_file.stat.inode; + + if (!size_match or !mtime_match or !inode_match) { + cache_hash_file.stat = .{ + .size = actual_stat.size, + .mtime = actual_stat.mtime, + .inode = actual_stat.inode, }; - const pp = cache_hash_file.prefixed_path; - const dir = self.cache.prefixes()[pp.prefix].handle; - const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { - error.FileNotFound => { - if (try self.upgradeToExclusiveLock()) continue; - return false; - }, - else => |e| { - self.diagnostic = .{ .file_open = .{ - .file_index = idx, - .err = e, - } }; - return error.CacheCheckFailed; - }, - }; - defer this_file.close(); + if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) { + // The actual file has an unreliable timestamp, force it to be hashed + cache_hash_file.stat.mtime = 0; + cache_hash_file.stat.inode = 0; + } - const actual_stat = this_file.stat() catch |err| { - self.diagnostic = .{ .file_stat = .{ + var actual_digest: BinDigest = undefined; + hashFile(this_file, &actual_digest) catch |err| { + self.diagnostic = .{ .file_read = .{ .file_index = idx, .err = err, } }; return error.CacheCheckFailed; }; - const size_match = actual_stat.size == cache_hash_file.stat.size; - const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime; - const inode_match = actual_stat.inode == cache_hash_file.stat.inode; - if (!size_match or !mtime_match or !inode_match) { - self.manifest_dirty = true; - - cache_hash_file.stat = .{ - .size = actual_stat.size, - .mtime = actual_stat.mtime, - .inode = actual_stat.inode, - }; - - if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) { - // The actual file has an unreliable timestamp, force it to be hashed - cache_hash_file.stat.mtime = 0; - cache_hash_file.stat.inode = 0; - } - - var actual_digest: BinDigest = undefined; - hashFile(this_file, &actual_digest) catch |err| { - self.diagnostic = .{ .file_read = .{ - .file_index = idx, - .err = err, - } }; - return error.CacheCheckFailed; - }; - - if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) { - cache_hash_file.bin_digest = actual_digest; - // keep going until we have the input file digests - any_file_changed = true; - } - } - - if (!any_file_changed) { - self.hash.hasher.update(&cache_hash_file.bin_digest); + if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) { + cache_hash_file.bin_digest = actual_digest; + // keep going until we have the input file digests + any_file_changed = true; } } - if (any_file_changed) { - if (try self.upgradeToExclusiveLock()) continue; - // cache miss - // keep the manifest file open - self.unhit(bin_digest, input_file_count); - return false; + if (!any_file_changed) { + self.hash.hasher.update(&cache_hash_file.bin_digest); } - - if (idx < input_file_count) { - if (try self.upgradeToExclusiveLock()) continue; - self.manifest_dirty = true; - while (idx < input_file_count) : (idx += 1) { - self.populateFileHash(&self.files.keys()[idx]) catch |err| { - self.diagnostic = .{ .file_hash = .{ - .file_index = idx, - .err = err, - } }; - return error.CacheCheckFailed; - }; - } - return false; - } - - if (self.want_shared_lock) { - self.downgradeToSharedLock() catch |err| { - self.diagnostic = .{ .manifest_lock = err }; - return error.CacheCheckFailed; - }; - } - - return true; } + + // If the manifest was somehow missing one of our input files, or if any file hash has changed, + // then this is a cache miss. However, we have successfully populated some or all of the file + // digests. + if (any_file_changed or idx < input_file_count) { + return .{ .miss = .{ .file_digests_populated = idx } }; + } + + return .hit; } + /// Reset `self.hash.hasher` to the state it should be in after `hit` returns `false`. + /// The hasher contains the original input digest, and all original input file digests (i.e. + /// not including post files). + /// Assumes that `bin_digest` is populated for all files up to `input_file_count`. As such, + /// this is not necessarily safe to call within `hit`. pub fn unhit(self: *Manifest, bin_digest: BinDigest, input_file_count: usize) void { // Reset the hash. self.hash.hasher = hasher_init; diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig index ccd7f82fdf..a1e64c006d 100644 --- a/lib/std/Build/Cache/DepTokenizer.zig +++ b/lib/std/Build/Cache/DepTokenizer.zig @@ -25,7 +25,7 @@ pub fn next(self: *Tokenizer) ?Token { }, }, .target => switch (char) { - '\t', '\n', '\r', ' ' => { + '\n', '\r' => { return errorIllegalChar(.invalid_target, self.index, char); }, '$' => { @@ -40,6 +40,15 @@ pub fn next(self: *Tokenizer) ?Token { self.state = .target_colon; self.index += 1; }, + '\t', ' ' => { + self.state = .target_space; + + const bytes = self.bytes[start..self.index]; + std.debug.assert(bytes.len != 0); + self.index += 1; + + return finishTarget(must_resolve, bytes); + }, else => { self.index += 1; }, @@ -110,6 +119,19 @@ pub fn next(self: *Tokenizer) ?Token { self.state = .target; }, }, + .target_space => switch (char) { + '\t', ' ' => { + // silently ignore additional horizontal whitespace + self.index += 1; + }, + ':' => { + self.state = .rhs; + self.index += 1; + }, + else => { + return errorIllegalChar(.expected_colon, self.index, char); + }, + }, .rhs => switch (char) { '\t', ' ' => { // silently ignore horizontal whitespace @@ -256,6 +278,10 @@ pub fn next(self: *Tokenizer) ?Token { self.state = .lhs; return null; }, + .target_space => { + const idx = self.index - 1; + return errorIllegalChar(.expected_colon, idx, self.bytes[idx]); + }, .prereq_quote => { return errorPosition(.incomplete_quoted_prerequisite, start, self.bytes[start..]); }, @@ -299,6 +325,7 @@ const State = enum { target_dollar_sign, target_colon, target_colon_reverse_solidus, + target_space, rhs, rhs_continuation, rhs_continuation_linefeed, @@ -322,6 +349,7 @@ pub const Token = union(enum) { expected_dollar_sign: IndexAndChar, continuation_eol: IndexAndChar, incomplete_escape: IndexAndChar, + expected_colon: IndexAndChar, pub const IndexAndChar = struct { index: usize, @@ -420,6 +448,7 @@ pub const Token = union(enum) { .expected_dollar_sign, .continuation_eol, .incomplete_escape, + .expected_colon, => |index_and_char| { try writer.writeAll("illegal char "); try printUnderstandableChar(writer, index_and_char.char); @@ -438,6 +467,7 @@ pub const Token = union(enum) { .expected_dollar_sign => "expecting '$'", .continuation_eol => "continuation expecting end-of-line", .incomplete_escape => "incomplete escape", + .expected_colon => "expecting ':'", }; } }; @@ -545,6 +575,16 @@ test "empty target linefeeds + hspace + continuations" { , expect); } +test "empty target + hspace + colon" { + const expect = "target = {foo.o}"; + + try depTokenizer("foo.o :", expect); + try depTokenizer("foo.o\t\t\t:", expect); + try depTokenizer("foo.o \t \t :", expect); + try depTokenizer("\r\nfoo.o :", expect); + try depTokenizer(" foo.o :", expect); +} + test "prereq" { const expect = \\target = {foo.o} @@ -923,9 +963,6 @@ test "error illegal char at position - expecting dollar_sign" { } test "error illegal char at position - invalid target" { - try depTokenizer("foo\t.o", - \\ERROR: illegal char \x09 at position 3: invalid target - ); try depTokenizer("foo\n.o", \\ERROR: illegal char \x0A at position 3: invalid target ); @@ -963,6 +1000,25 @@ test "error prereq - continuation expecting end-of-line" { ); } +test "error illegal char at position - expecting colon" { + try depTokenizer("foo\t.o:", + \\target = {foo} + \\ERROR: illegal char '.' at position 4: expecting ':' + ); + try depTokenizer("foo .o:", + \\target = {foo} + \\ERROR: illegal char '.' at position 4: expecting ':' + ); + try depTokenizer("foo \n.o:", + \\target = {foo} + \\ERROR: illegal char \x0A at position 4: expecting ':' + ); + try depTokenizer("foo.o\t\n:", + \\target = {foo.o} + \\ERROR: illegal char \x0A at position 6: expecting ':' + ); +} + // - tokenize input, emit textual representation, and compare to expect fn depTokenizer(input: []const u8, expect: []const u8) !void { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index cedf398d98..125b3e7947 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -202,6 +202,7 @@ pub fn init(options: StepOptions) Step { .state = .precheck_unstarted, .max_rss = options.max_rss, .debug_stack_trace = blk: { + if (!std.debug.sys_can_stack_trace) break :blk &.{}; const addresses = arena.alloc(usize, options.owner.debug_stack_frames_count) catch @panic("OOM"); @memset(addresses, 0); const first_ret_addr = options.first_ret_addr orelse @returnAddress(); @@ -758,7 +759,7 @@ fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: Build.Cac switch (err) { error.CacheCheckFailed => switch (man.diagnostic) { .none => unreachable, - .manifest_create, .manifest_read, .manifest_lock => |e| return s.fail("failed to check cache: {s} {s}", .{ + .manifest_create, .manifest_read, .manifest_lock, .manifest_seek => |e| return s.fail("failed to check cache: {s} {s}", .{ @tagName(man.diagnostic), @errorName(e), }), .file_open, .file_stat, .file_read, .file_hash => |op| { diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 616e8f76b4..c401a840ba 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -693,6 +693,8 @@ const PkgConfigResult = struct { /// Run pkg-config for the given library name and parse the output, returning the arguments /// that should be passed to zig to link the given library. fn runPkgConfig(compile: *Compile, lib_name: []const u8) !PkgConfigResult { + const wl_rpath_prefix = "-Wl,-rpath,"; + const b = compile.step.owner; const pkg_name = match: { // First we have to map the library name to pkg config name. Unfortunately, @@ -783,6 +785,8 @@ fn runPkgConfig(compile: *Compile, lib_name: []const u8) !PkgConfigResult { try zig_cflags.appendSlice(&[_][]const u8{ "-D", macro }); } else if (mem.startsWith(u8, arg, "-D")) { try zig_cflags.append(arg); + } else if (mem.startsWith(u8, arg, wl_rpath_prefix)) { + try zig_cflags.appendSlice(&[_][]const u8{ "-rpath", arg[wl_rpath_prefix.len..] }); } else if (b.debug_pkg_config) { return compile.step.fail("unknown pkg-config flag '{s}'", .{arg}); } diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index 3d404eb8ca..acf392f49f 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -189,9 +189,9 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const src_dir_path = dir.source.getPath3(b, step); const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path); - var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.sub_path, .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{s}': {s}", .{ - src_dir_path.sub_path, @errorName(err), + var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { + return step.fail("unable to open source directory '{}': {s}", .{ + src_dir_path, @errorName(err), }); }; defer src_dir.close(); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index a9d4808bc0..3d0b0a7068 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -620,6 +620,35 @@ fn make(step: *Step, options: Step.MakeOptions) !void { var man = b.graph.cache.obtain(); defer man.deinit(); + if (run.env_map) |env_map| { + const KV = struct { []const u8, []const u8 }; + var kv_pairs = try std.ArrayList(KV).initCapacity(arena, env_map.count()); + var iter = env_map.iterator(); + while (iter.next()) |entry| { + kv_pairs.appendAssumeCapacity(.{ entry.key_ptr.*, entry.value_ptr.* }); + } + + std.mem.sortUnstable(KV, kv_pairs.items, {}, struct { + fn lessThan(_: void, kv1: KV, kv2: KV) bool { + const k1 = kv1[0]; + const k2 = kv2[0]; + + if (k1.len != k2.len) return k1.len < k2.len; + + for (k1, k2) |c1, c2| { + if (c1 == c2) continue; + return c1 < c2; + } + unreachable; // two keys cannot be equal + } + }.lessThan); + + for (kv_pairs.items) |kv| { + man.hash.addBytes(kv[0]); + man.hash.addBytes(kv[1]); + } + } + for (run.argv.items) |arg| { switch (arg) { .bytes => |bytes| { diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index 2ddb3ca4c2..068a50769f 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -612,8 +612,6 @@ const Os = switch (builtin.os.tag) { /// -1. Otherwise, it needs to be opened in update(), and will be /// stored here. dir_fd: i32, - /// Number of files being watched by this directory handle. - ref_count: u32, }), const dir_open_flags: posix.O = f: { @@ -673,11 +671,9 @@ const Os = switch (builtin.os.tag) { try handles.append(gpa, .{ .rs = .{}, .dir_fd = if (skip_open_dir) -1 else dir_fd, - .ref_count = 1, }); - } else { - handles.items(.ref_count)[gop.index] += 1; } + break :rs &handles.items(.rs)[gop.index]; }; for (files.items) |basename| { @@ -718,10 +714,6 @@ const Os = switch (builtin.os.tag) { } } - const ref_count_ptr = &handles.items(.ref_count)[i]; - ref_count_ptr.* -= 1; - if (ref_count_ptr.* > 0) continue; - // If the sub_path == "" then this patch has already the // dir fd that we need to use as the ident to remove the // event. If it was opened above with openat() then we need @@ -738,10 +730,10 @@ const Os = switch (builtin.os.tag) { // index in the udata field. const last_dir_fd = fd: { const last_path = w.dir_table.keys()[handles.len - 1]; - const last_dir_fd = if (last_path.sub_path.len != 0) + const last_dir_fd = if (last_path.sub_path.len == 0) last_path.root_dir.handle.fd else - handles.items(.dir_fd)[i]; + handles.items(.dir_fd)[handles.len - 1]; assert(last_dir_fd != -1); break :fd last_dir_fd; }; diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 24f58e1c68..d7a19315b2 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -39,10 +39,20 @@ draw_buffer: []u8, /// CPU cache. node_parents: []Node.Parent, node_storage: []Node.Storage, -node_freelist: []Node.OptionalIndex, -node_freelist_first: Node.OptionalIndex, +node_freelist_next: []Node.OptionalIndex, +node_freelist: Freelist, +/// This is the number of elements in node arrays which have been used so far. Nodes before this +/// index are either active, or on the freelist. The remaining nodes are implicitly free. This +/// value may at times temporarily exceed the node count. node_end_index: u32, +const Freelist = packed struct(u32) { + head: Node.OptionalIndex, + /// Whenever `node_freelist` is added to, this generation is incremented + /// to avoid ABA bugs when acquiring nodes. Wrapping arithmetic is used. + generation: u24, +}; + pub const TerminalMode = union(enum) { off, ansi_escape_codes, @@ -112,7 +122,7 @@ pub const Node = struct { // causes `completed_count` to be treated as a file descriptor, so // the order here matters. @atomicStore(u32, &s.completed_count, integer, .monotonic); - @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); + @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); // synchronizes with acquire in `serialize` } /// Not thread-safe. @@ -184,12 +194,24 @@ pub const Node = struct { const node_index = node.index.unwrap() orelse return Node.none; const parent = node_index.toParent(); - const freelist_head = &global_progress.node_freelist_first; - var opt_free_index = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); - while (opt_free_index.unwrap()) |free_index| { - const freelist_ptr = freelistByIndex(free_index); - const next = @atomicLoad(Node.OptionalIndex, freelist_ptr, .seq_cst); - opt_free_index = @cmpxchgWeak(Node.OptionalIndex, freelist_head, opt_free_index, next, .seq_cst, .seq_cst) orelse { + const freelist = &global_progress.node_freelist; + var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry + while (old_freelist.head.unwrap()) |free_index| { + const next_ptr = freelistNextByIndex(free_index); + const new_freelist: Freelist = .{ + .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic), + // We don't need to increment the generation when removing nodes from the free list, + // only when adding them. (This choice is arbitrary; the opposite would also work.) + .generation = old_freelist.generation, + }; + old_freelist = @cmpxchgWeak( + Freelist, + freelist, + old_freelist, + new_freelist, + .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order + .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration + ) orelse { // We won the allocation race. return init(free_index, parent, name, estimated_total_items); }; @@ -243,18 +265,28 @@ pub const Node = struct { } const index = n.index.unwrap() orelse return; const parent_ptr = parentByIndex(index); - if (parent_ptr.unwrap()) |parent_index| { + if (@atomicLoad(Node.Parent, parent_ptr, .monotonic).unwrap()) |parent_index| { _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic); - @atomicStore(Node.Parent, parent_ptr, .unused, .seq_cst); + @atomicStore(Node.Parent, parent_ptr, .unused, .monotonic); - const freelist_head = &global_progress.node_freelist_first; - var first = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); + const freelist = &global_progress.node_freelist; + var old_freelist = @atomicLoad(Freelist, freelist, .monotonic); while (true) { - @atomicStore(Node.OptionalIndex, freelistByIndex(index), first, .seq_cst); - first = @cmpxchgWeak(Node.OptionalIndex, freelist_head, first, index.toOptional(), .seq_cst, .seq_cst) orelse break; + @atomicStore(Node.OptionalIndex, freelistNextByIndex(index), old_freelist.head, .monotonic); + old_freelist = @cmpxchgWeak( + Freelist, + freelist, + old_freelist, + .{ .head = index.toOptional(), .generation = old_freelist.generation +% 1 }, + .release, // ensure a matching `start` sees the freelist link written above + .monotonic, // our write above is irrelevant if we need to retry + ) orelse { + // We won the race. + return; + }; } } else { - @atomicStore(bool, &global_progress.done, true, .seq_cst); + @atomicStore(bool, &global_progress.done, true, .monotonic); global_progress.redraw_event.set(); if (global_progress.update_thread) |thread| thread.join(); } @@ -291,8 +323,8 @@ pub const Node = struct { return &global_progress.node_parents[@intFromEnum(index)]; } - fn freelistByIndex(index: Node.Index) *Node.OptionalIndex { - return &global_progress.node_freelist[@intFromEnum(index)]; + fn freelistNextByIndex(index: Node.Index) *Node.OptionalIndex { + return &global_progress.node_freelist_next[@intFromEnum(index)]; } fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node { @@ -307,8 +339,10 @@ pub const Node = struct { @atomicStore(u8, &storage.name[name_len], 0, .monotonic); const parent_ptr = parentByIndex(free_index); - assert(parent_ptr.* == .unused); - @atomicStore(Node.Parent, parent_ptr, parent, .release); + if (std.debug.runtime_safety) { + assert(@atomicLoad(Node.Parent, parent_ptr, .monotonic) == .unused); + } + @atomicStore(Node.Parent, parent_ptr, parent, .monotonic); return .{ .index = free_index.toOptional() }; } @@ -329,15 +363,15 @@ var global_progress: Progress = .{ .node_parents = &node_parents_buffer, .node_storage = &node_storage_buffer, - .node_freelist = &node_freelist_buffer, - .node_freelist_first = .none, + .node_freelist_next = &node_freelist_next_buffer, + .node_freelist = .{ .head = .none, .generation = 0 }, .node_end_index = 0, }; const node_storage_buffer_len = 83; var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined; var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined; -var node_freelist_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined; +var node_freelist_next_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined; var default_draw_buffer: [4096]u8 = undefined; @@ -456,7 +490,7 @@ fn updateThreadRun() void { { const resize_flag = wait(global_progress.initial_delay_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; + if (@atomicLoad(bool, &global_progress.done, .monotonic)) return; maybeUpdateSize(resize_flag); const buffer, _ = computeRedraw(&serialized_buffer); @@ -470,7 +504,7 @@ fn updateThreadRun() void { while (true) { const resize_flag = wait(global_progress.refresh_rate_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + if (@atomicLoad(bool, &global_progress.done, .monotonic)) { stderr_mutex.lock(); defer stderr_mutex.unlock(); return clearWrittenWithEscapeCodes() catch {}; @@ -500,7 +534,7 @@ fn windowsApiUpdateThreadRun() void { { const resize_flag = wait(global_progress.initial_delay_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; + if (@atomicLoad(bool, &global_progress.done, .monotonic)) return; maybeUpdateSize(resize_flag); const buffer, const nl_n = computeRedraw(&serialized_buffer); @@ -516,7 +550,7 @@ fn windowsApiUpdateThreadRun() void { while (true) { const resize_flag = wait(global_progress.refresh_rate_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + if (@atomicLoad(bool, &global_progress.done, .monotonic)) { stderr_mutex.lock(); defer stderr_mutex.unlock(); return clearWrittenWindowsApi() catch {}; @@ -558,7 +592,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void { { _ = wait(global_progress.initial_delay_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + if (@atomicLoad(bool, &global_progress.done, .monotonic)) return; const serialized = serialize(&serialized_buffer); @@ -570,7 +604,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void { while (true) { _ = wait(global_progress.refresh_rate_ns); - if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + if (@atomicLoad(bool, &global_progress.done, .monotonic)) return; const serialized = serialize(&serialized_buffer); @@ -765,37 +799,39 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { var any_ipc = false; // Iterate all of the nodes and construct a serializable copy of the state that can be examined - // without atomics. - const end_index = @atomicLoad(u32, &global_progress.node_end_index, .monotonic); + // without atomics. The `@min` call is here because `node_end_index` might briefly exceed the + // node count sometimes. + const end_index = @min(@atomicLoad(u32, &global_progress.node_end_index, .monotonic), global_progress.node_storage.len); for ( global_progress.node_parents[0..end_index], global_progress.node_storage[0..end_index], serialized_buffer.map[0..end_index], ) |*parent_ptr, *storage_ptr, *map| { - var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); - while (begin_parent != .unused) { - const dest_storage = &serialized_buffer.storage[serialized_len]; - copyAtomicLoad(&dest_storage.name, &storage_ptr.name); - dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); - dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); - const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); - if (begin_parent == end_parent) { - any_ipc = any_ipc or (dest_storage.getIpcFd() != null); - serialized_buffer.parents[serialized_len] = begin_parent; - map.* = @enumFromInt(serialized_len); - serialized_len += 1; - break; - } - - begin_parent = end_parent; - } else { - // A node may be freed during the execution of this loop, causing - // there to be a parent reference to a nonexistent node. Without - // this assignment, this would lead to the map entry containing - // stale data. By assigning none, the child node with the bad - // parent pointer will be harmlessly omitted from the tree. + const parent = @atomicLoad(Node.Parent, parent_ptr, .monotonic); + if (parent == .unused) { + // We might read "mixed" node data in this loop, due to weird atomic things + // or just a node actually being freed while this loop runs. That could cause + // there to be a parent reference to a nonexistent node. Without this assignment, + // this would lead to the map entry containing stale data. By assigning none, the + // child node with the bad parent pointer will be harmlessly omitted from the tree. + // + // Note that there's no concern of potentially creating "looping" data if we read + // "mixed" node data like this, because if a node is (directly or indirectly) its own + // parent, it will just not be printed at all. The general idea here is that performance + // is more important than 100% correct output every frame, given that this API is likely + // to be used in hot paths! map.* = .none; + continue; } + const dest_storage = &serialized_buffer.storage[serialized_len]; + copyAtomicLoad(&dest_storage.name, &storage_ptr.name); + dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); // sychronizes with release in `setIpcFd` + dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); + + any_ipc = any_ipc or (dest_storage.getIpcFd() != null); + serialized_buffer.parents[serialized_len] = parent; + map.* = @enumFromInt(serialized_len); + serialized_len += 1; } // Remap parents to point inside serialized arrays. diff --git a/lib/std/Target/Query.zig b/lib/std/Target/Query.zig index 2d5c734108..cf53a8175b 100644 --- a/lib/std/Target/Query.zig +++ b/lib/std/Target/Query.zig @@ -102,7 +102,7 @@ pub fn fromTarget(target: Target) Query { .os_version_min = undefined, .os_version_max = undefined, .abi = target.abi, - .glibc_version = target.os.versionRange().gnuLibCVersion(), + .glibc_version = if (target.abi.isGnu()) target.os.versionRange().gnuLibCVersion() else null, .android_api_level = if (target.abi.isAndroid()) target.os.version_range.linux.android else null, }; result.updateOsVersionRange(target.os); diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 0a13cec03f..f2a4ae069f 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -2486,13 +2486,13 @@ test "reIndex" { test "auto store_hash" { const HasCheapEql = AutoArrayHashMap(i32, i32); const HasExpensiveEql = AutoArrayHashMap([32]i32, i32); - try testing.expect(std.meta.fieldInfo(HasCheapEql.Data, .hash).type == void); - try testing.expect(std.meta.fieldInfo(HasExpensiveEql.Data, .hash).type != void); + try testing.expect(@FieldType(HasCheapEql.Data, "hash") == void); + try testing.expect(@FieldType(HasExpensiveEql.Data, "hash") != void); const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32); const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32); - try testing.expect(std.meta.fieldInfo(HasCheapEqlUn.Data, .hash).type == void); - try testing.expect(std.meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).type != void); + try testing.expect(@FieldType(HasCheapEqlUn.Data, "hash") == void); + try testing.expect(@FieldType(HasExpensiveEqlUn.Data, "hash") != void); } test "sort" { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 6f466e0a6a..a88b637ec0 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -177,7 +177,7 @@ pub fn isAscii(c: u8) bool { return c < 128; } -/// /// Deprecated: use `isAscii` +/// Deprecated: use `isAscii` pub const isASCII = isAscii; /// Uppercases the character and returns it as-is if already uppercase or not a letter. diff --git a/lib/std/base64.zig b/lib/std/base64.zig index 243f206445..e88b723439 100644 --- a/lib/std/base64.zig +++ b/lib/std/base64.zig @@ -1,4 +1,5 @@ -//! Base64 encoding/decoding. +//! Base64 encoding/decoding as specified by +//! [RFC 4648](https://datatracker.ietf.org/doc/html/rfc4648). const std = @import("std.zig"); const assert = std.debug.assert; @@ -24,12 +25,15 @@ pub const Codecs = struct { Decoder: Base64Decoder, }; +/// The Base64 alphabet defined in +/// [RFC 4648 section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4). pub const standard_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".*; fn standardBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore { return Base64DecoderWithIgnore.init(standard_alphabet_chars, '=', ignore); } -/// Standard Base64 codecs, with padding +/// Standard Base64 codecs, with padding, as defined in +/// [RFC 4648 section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4). pub const standard = Codecs{ .alphabet_chars = standard_alphabet_chars, .pad_char = '=', @@ -38,7 +42,8 @@ pub const standard = Codecs{ .Decoder = Base64Decoder.init(standard_alphabet_chars, '='), }; -/// Standard Base64 codecs, without padding +/// Standard Base64 codecs, without padding, as defined in +/// [RFC 4648 section 3.2](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2). pub const standard_no_pad = Codecs{ .alphabet_chars = standard_alphabet_chars, .pad_char = null, @@ -47,12 +52,15 @@ pub const standard_no_pad = Codecs{ .Decoder = Base64Decoder.init(standard_alphabet_chars, null), }; +/// The URL-safe Base64 alphabet defined in +/// [RFC 4648 section 5](https://datatracker.ietf.org/doc/html/rfc4648#section-5). pub const url_safe_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*; fn urlSafeBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore { return Base64DecoderWithIgnore.init(url_safe_alphabet_chars, null, ignore); } -/// URL-safe Base64 codecs, with padding +/// URL-safe Base64 codecs, with padding, as defined in +/// [RFC 4648 section 5](https://datatracker.ietf.org/doc/html/rfc4648#section-5). pub const url_safe = Codecs{ .alphabet_chars = url_safe_alphabet_chars, .pad_char = '=', @@ -61,7 +69,8 @@ pub const url_safe = Codecs{ .Decoder = Base64Decoder.init(url_safe_alphabet_chars, '='), }; -/// URL-safe Base64 codecs, without padding +/// URL-safe Base64 codecs, without padding, as defined in +/// [RFC 4648 section 3.2](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2). pub const url_safe_no_pad = Codecs{ .alphabet_chars = url_safe_alphabet_chars, .pad_char = null, diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 3a8764eed9..5e95474569 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -141,11 +141,16 @@ pub const AtomicRmwOp = enum { /// therefore must be kept in sync with the compiler implementation. pub const CodeModel = enum { default, - tiny, - small, + extreme, kernel, - medium, large, + medany, + medium, + medlow, + medmid, + normal, + small, + tiny, }; /// This data structure is used by the Zig language code generation and diff --git a/lib/std/c.zig b/lib/std/c.zig index 9b7d36cb32..550679c93c 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -2269,7 +2269,10 @@ pub const SC = switch (native_os) { else => void, }; -pub const _SC = switch (native_os) { +pub const _SC = if (builtin.abi.isAndroid()) enum(c_int) { + PAGESIZE = 39, + NPROCESSORS_ONLN = 97, +} else switch (native_os) { .driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) { PAGESIZE = 29, }, @@ -9328,9 +9331,11 @@ pub extern "c" fn setrlimit64(resource: rlimit_resource, rlim: *const rlimit) c_ pub const arc4random_buf = switch (native_os) { .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .macos, .ios, .tvos, .watchos, .visionos => private.arc4random_buf, + .linux => if (builtin.abi.isAndroid()) private.arc4random_buf else {}, else => {}, }; pub const getentropy = switch (native_os) { + .linux => if (builtin.abi.isAndroid() and versionCheck(.{ .major = 28, .minor = 0, .patch = 0 })) private.getentropy else {}, .emscripten => private.getentropy, else => {}, }; diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index 561a4e7ce4..ac083ec9f2 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -1165,6 +1165,8 @@ pub const CPUFAMILY = enum(u32) { ARM_PALMA = 0x72015832, ARM_DONAN = 0x6f5129ac, ARM_BRAVA = 0x17d5b93a, + ARM_TAHITI = 0x75d4acb9, + ARM_TUPAI = 0x204526d0, _, }; diff --git a/lib/std/compress/zstandard.zig b/lib/std/compress/zstandard.zig index 9092a2d130..7b41e1fe3e 100644 --- a/lib/std/compress/zstandard.zig +++ b/lib/std/compress/zstandard.zig @@ -289,3 +289,22 @@ test "zero sized block" { try expectEqualDecodedStreaming("", input_raw); try expectEqualDecodedStreaming("", input_rle); } + +test "declared raw literals size too large" { + const input_raw = + "\x28\xb5\x2f\xfd" ++ // zstandard frame magic number + "\x00\x00" ++ // frame header: everything unset, window descriptor zero + "\x95\x00\x00" ++ // block header with: last_block set, block_type compressed, block_size 18 + "\xbc\xf3\xae" ++ // literals section header with: type raw, size_format 3, regenerated_size 716603 + "\xa5\x9f\xe3"; // some bytes of literal content - the content is shorter than regenerated_size + + // Note that the regenerated_size in the above input is larger than block maximum size, so the + // block can't be valid as it is a raw literals block. + + var fbs = std.io.fixedBufferStream(input_raw); + var window: [1024]u8 = undefined; + var stream = decompressor(fbs.reader(), .{ .window_buffer = &window }); + + var buf: [1024]u8 = undefined; + try std.testing.expectError(error.MalformedBlock, stream.read(&buf)); +} diff --git a/lib/std/compress/zstandard/decode/block.zig b/lib/std/compress/zstandard/decode/block.zig index 101abfc9b7..49c6e7dc36 100644 --- a/lib/std/compress/zstandard/decode/block.zig +++ b/lib/std/compress/zstandard/decode/block.zig @@ -989,6 +989,7 @@ pub fn decodeLiteralsSection( const header = try decodeLiteralsHeader(source); switch (header.block_type) { .raw => { + if (buffer.len < header.regenerated_size) return error.LiteralsBufferTooSmall; try source.readNoEof(buffer[0..header.regenerated_size]); return LiteralsSection{ .header = header, diff --git a/lib/std/compress/zstandard/decompress.zig b/lib/std/compress/zstandard/decompress.zig index 86be16268f..adc7b89749 100644 --- a/lib/std/compress/zstandard/decompress.zig +++ b/lib/std/compress/zstandard/decompress.zig @@ -380,7 +380,7 @@ pub const FrameContext = struct { /// - `error.WindowSizeUnknown` if the frame does not have a valid window /// size /// - `error.WindowTooLarge` if the window size is larger than - /// `window_size_max` + /// `window_size_max` or `std.math.intMax(usize)` /// - `error.ContentSizeTooLarge` if the frame header indicates a content /// size larger than `std.math.maxInt(usize)` pub fn init( @@ -395,7 +395,7 @@ pub const FrameContext = struct { const window_size = if (window_size_raw > window_size_max) return error.WindowTooLarge else - @as(usize, @intCast(window_size_raw)); + std.math.cast(usize, window_size_raw) orelse return error.WindowTooLarge; const should_compute_checksum = frame_header.descriptor.content_checksum_flag and verify_checksum; diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig index ec176f78c5..aa9226ae0d 100644 --- a/lib/std/crypto/pcurves/p256.zig +++ b/lib/std/crypto/pcurves/p256.zig @@ -471,6 +471,10 @@ pub const AffineCoordinates = struct { /// Identity element in affine coordinates. pub const identityElement = AffineCoordinates{ .x = P256.identityElement.x, .y = P256.identityElement.y }; + pub fn neg(p: AffineCoordinates) AffineCoordinates { + return .{ .x = p.x, .y = p.y.neg() }; + } + fn cMov(p: *AffineCoordinates, a: AffineCoordinates, c: u1) void { p.x.cMov(a.x, c); p.y.cMov(a.y, c); diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig index 3ab95da5e8..61632f61ed 100644 --- a/lib/std/crypto/pcurves/p384.zig +++ b/lib/std/crypto/pcurves/p384.zig @@ -471,6 +471,10 @@ pub const AffineCoordinates = struct { /// Identity element in affine coordinates. pub const identityElement = AffineCoordinates{ .x = P384.identityElement.x, .y = P384.identityElement.y }; + pub fn neg(p: AffineCoordinates) AffineCoordinates { + return .{ .x = p.x, .y = p.y.neg() }; + } + fn cMov(p: *AffineCoordinates, a: AffineCoordinates, c: u1) void { p.x.cMov(a.x, c); p.y.cMov(a.y, c); diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig index 945abea931..c891f414f5 100644 --- a/lib/std/crypto/pcurves/secp256k1.zig +++ b/lib/std/crypto/pcurves/secp256k1.zig @@ -549,6 +549,10 @@ pub const AffineCoordinates = struct { /// Identity element in affine coordinates. pub const identityElement = AffineCoordinates{ .x = Secp256k1.identityElement.x, .y = Secp256k1.identityElement.y }; + pub fn neg(p: AffineCoordinates) AffineCoordinates { + return .{ .x = p.x, .y = p.y.neg() }; + } + fn cMov(p: *AffineCoordinates, a: AffineCoordinates, c: u1) void { p.x.cMov(a.x, c); p.y.cMov(a.y, c); diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 738fe122dd..4c67f15ea6 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -112,7 +112,7 @@ pub const Options = struct { /// No host verification is performed, which prevents a trusted connection from /// being established. no_verification, - /// Verify that the server certificate was issues for a given host. + /// Verify that the server certificate was issued for a given host. explicit: []const u8, }, /// How to verify the authenticity of server certificates. diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 77b619a056..3b30e9c151 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -244,10 +244,14 @@ pub fn dumpHexFallible(bytes: []const u8) !void { const stderr = std.io.getStdErr(); const ttyconf = std.io.tty.detectConfig(stderr); const writer = stderr.writer(); + try dumpHexInternal(bytes, ttyconf, writer); +} + +fn dumpHexInternal(bytes: []const u8, ttyconf: std.io.tty.Config, writer: anytype) !void { var chunks = mem.window(u8, bytes, 16, 16); while (chunks.next()) |window| { // 1. Print the address. - const address = (@intFromPtr(bytes.ptr) + 0x10 * (chunks.index orelse 0) / 16) - 0x10; + const address = (@intFromPtr(bytes.ptr) + 0x10 * (std.math.divCeil(usize, chunks.index orelse bytes.len, 16) catch unreachable)) - 0x10; try ttyconf.setColor(writer, .dim); // We print the address in lowercase and the bytes in uppercase hexadecimal to distinguish them more. // Also, make sure all lines are aligned by padding the address. @@ -292,6 +296,24 @@ pub fn dumpHexFallible(bytes: []const u8) !void { } } +test dumpHexInternal { + const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 }; + var output = std.ArrayList(u8).init(std.testing.allocator); + defer output.deinit(); + try dumpHexInternal(bytes, .no_color, output.writer()); + const expected = try std.fmt.allocPrint(std.testing.allocator, + \\{x:0>[2]} 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF .."3DUfw........ + \\{x:0>[2]} 01 12 13 ... + \\ + , .{ + @intFromPtr(bytes.ptr), + @intFromPtr(bytes.ptr) + 16, + @sizeOf(usize) * 2, + }); + defer std.testing.allocator.free(expected); + try std.testing.expectEqualStrings(expected, output.items); +} + /// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned. /// TODO multithreaded awareness pub fn dumpCurrentStackTrace(start_addr: ?usize) void { @@ -415,7 +437,13 @@ pub fn dumpStackTraceFromBase(context: *ThreadContext) void { var it = StackIterator.initWithContext(null, debug_info, context) catch return; defer it.deinit(); - printSourceAtAddress(debug_info, stderr, it.unwind_state.?.dwarf_context.pc, tty_config) catch return; + + // DWARF unwinding on aarch64-macos is not complete so we need to get pc address from mcontext + const pc_addr = if (builtin.target.os.tag.isDarwin() and native_arch == .aarch64) + context.mcontext.ss.pc + else + it.unwind_state.?.dwarf_context.pc; + printSourceAtAddress(debug_info, stderr, pc_addr, tty_config) catch return; while (it.next()) |return_address| { printLastUnwindError(&it, debug_info, stderr, tty_config); @@ -1458,8 +1486,26 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque) .aarch64, .aarch64_be, => { - const ctx: *posix.ucontext_t = @ptrCast(@alignCast(ctx_ptr)); - dumpStackTraceFromBase(ctx); + // Some kernels don't align `ctx_ptr` properly. Handle this defensively. + const ctx: *align(1) posix.ucontext_t = @ptrCast(ctx_ptr); + var new_ctx: posix.ucontext_t = ctx.*; + if (builtin.os.tag.isDarwin() and builtin.cpu.arch == .aarch64) { + // The kernel incorrectly writes the contents of `__mcontext_data` right after `mcontext`, + // rather than after the 8 bytes of padding that are supposed to sit between the two. Copy the + // contents to the right place so that the `mcontext` pointer will be correct after the + // `relocateContext` call below. + new_ctx.__mcontext_data = @as(*align(1) extern struct { + onstack: c_int, + sigmask: std.c.sigset_t, + stack: std.c.stack_t, + link: ?*std.c.ucontext_t, + mcsize: u64, + mcontext: *std.c.mcontext_t, + __mcontext_data: std.c.mcontext_t align(@sizeOf(usize)), // Disable padding after `mcontext`. + }, @ptrCast(ctx)).__mcontext_data; + } + relocateContext(&new_ctx); + dumpStackTraceFromBase(&new_ctx); }, else => {}, } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 99a47ec8fd..8588a37caf 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -1380,13 +1380,8 @@ test fmtDuration { } fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - if (ns < 0) { - const data = FormatDurationData{ .ns = @as(u64, @intCast(-ns)), .negative = true }; - try formatDuration(data, fmt, options, writer); - } else { - const data = FormatDurationData{ .ns = @as(u64, @intCast(ns)) }; - try formatDuration(data, fmt, options, writer); - } + const data = FormatDurationData{ .ns = @abs(ns), .negative = ns < 0 }; + try formatDuration(data, fmt, options, writer); } /// Return a Formatter for number of nanoseconds according to its signed magnitude: @@ -1457,6 +1452,7 @@ test fmtDurationSigned { .{ .s = "-1y1m999ns", .d = -(365 * std.time.ns_per_day + std.time.ns_per_min + 999) }, .{ .s = "292y24w3d23h47m16.854s", .d = math.maxInt(i64) }, .{ .s = "-292y24w3d23h47m16.854s", .d = math.minInt(i64) + 1 }, + .{ .s = "-292y24w3d23h47m16.854s", .d = math.minInt(i64) }, }) |tc| { const slice = try bufPrint(&buf, "{}", .{fmtDurationSigned(tc.d)}); try std.testing.expectEqualStrings(tc.s, slice); diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index 4ebec1ce14..e3c5568681 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -1347,13 +1347,11 @@ pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) RealPathErr var wide_buf: [w.PATH_MAX_WIDE]u16 = undefined; const wide_slice = try w.GetFinalPathNameByHandle(h_file, .{}, &wide_buf); - var big_out_buf: [fs.max_path_bytes]u8 = undefined; - const end_index = std.unicode.wtf16LeToWtf8(&big_out_buf, wide_slice); - if (end_index > out_buffer.len) + const len = std.unicode.calcWtf8Len(wide_slice); + if (len > out_buffer.len) return error.NameTooLong; - const result = out_buffer[0..end_index]; - @memcpy(result, big_out_buf[0..end_index]); - return result; + const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice); + return out_buffer[0..end_index]; } pub const RealPathAllocError = RealPathError || Allocator.Error; diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index d658ed4118..33701c2267 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -73,8 +73,8 @@ pub const Wyhash = struct { newSelf.smallKey(input); } else { var offset: usize = 0; + var scratch: [16]u8 = undefined; if (self.buf_len < 16) { - var scratch: [16]u8 = undefined; const rem = 16 - self.buf_len; @memcpy(scratch[0..rem], self.buf[self.buf.len - rem ..][0..rem]); @memcpy(scratch[rem..][0..self.buf_len], self.buf[0..self.buf_len]); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index f0ea8d0d46..3e7647abb6 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -593,6 +593,8 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void { const zero_bit_ptr = try allocator.create(u0); zero_bit_ptr.* = 0; allocator.destroy(zero_bit_ptr); + const zero_len_array = try allocator.create([0]u64); + allocator.destroy(zero_len_array); const oversize = try allocator.alignedAlloc(u32, null, 5); try testing.expect(oversize.len >= 5); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index 15d9044479..f88bb7de16 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -3,8 +3,10 @@ const assert = std.debug.assert; const mem = std.mem; const Allocator = std.mem.Allocator; -/// This allocator takes an existing allocator, wraps it, and provides an interface -/// where you can allocate without freeing, and then free it all together. +/// This allocator takes an existing allocator, wraps it, and provides an interface where +/// you can allocate and then free it all together. Calls to free an individual item only +/// free the item if it was the most recent allocation, otherwise calls to free do +/// nothing. pub const ArenaAllocator = struct { child_allocator: Allocator, state: State, diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig index ffdc143467..a6b2676b1d 100644 --- a/lib/std/heap/debug_allocator.zig +++ b/lib/std/heap/debug_allocator.zig @@ -281,6 +281,7 @@ pub fn DebugAllocator(comptime config: Config) type { allocated_count: SlotIndex, freed_count: SlotIndex, prev: ?*BucketHeader, + next: ?*BucketHeader, canary: usize = config.canary, fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader { @@ -782,7 +783,11 @@ pub fn DebugAllocator(comptime config: Config) type { .allocated_count = 1, .freed_count = 0, .prev = self.buckets[size_class_index], + .next = null, }; + if (self.buckets[size_class_index]) |old_head| { + old_head.next = bucket; + } self.buckets[size_class_index] = bucket; if (!config.backing_allocator_zeroes) { @@ -935,9 +940,18 @@ pub fn DebugAllocator(comptime config: Config) type { } bucket.freed_count += 1; if (bucket.freed_count == bucket.allocated_count) { - if (self.buckets[size_class_index] == bucket) { - self.buckets[size_class_index] = null; + if (bucket.prev) |prev| { + prev.next = bucket.next; } + + if (bucket.next) |next| { + assert(self.buckets[size_class_index] != bucket); + next.prev = bucket.prev; + } else { + assert(self.buckets[size_class_index] == bucket); + self.buckets[size_class_index] = bucket.prev; + } + if (!config.never_unmap) { const page: [*]align(page_size) u8 = @ptrFromInt(page_addr); self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress()); diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 86f29b4a08..ecc1893194 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -1241,10 +1241,14 @@ pub fn initDefaultProxies(client: *Client, arena: Allocator) !void { fn createProxyFromEnvVar(arena: Allocator, env_var_names: []const []const u8) !?*Proxy { const content = for (env_var_names) |name| { - break std.process.getEnvVarOwned(arena, name) catch |err| switch (err) { + const content = std.process.getEnvVarOwned(arena, name) catch |err| switch (err) { error.EnvironmentVariableNotFound => continue, else => |e| return e, }; + + if (content.len == 0) continue; + + break content; } else return null; const uri = Uri.parse(content) catch try Uri.parseAfterScheme("http", content); diff --git a/lib/std/json/static_test.zig b/lib/std/json/static_test.zig index 3375ae0572..3a1919e40c 100644 --- a/lib/std/json/static_test.zig +++ b/lib/std/json/static_test.zig @@ -925,3 +925,19 @@ test "parse at comptime" { }; comptime testing.expectEqual(@as(u64, 9999), config.uptime) catch unreachable; } + +test "parse with zero-bit field" { + const str = + \\{ + \\ "a": ["a", "a"], + \\ "b": "a" + \\} + ; + const ZeroSizedEnum = enum { a }; + try testing.expectEqual(0, @sizeOf(ZeroSizedEnum)); + + const Inner = struct { a: []const ZeroSizedEnum, b: ZeroSizedEnum }; + const expected: Inner = .{ .a = &.{ .a, .a }, .b = .a }; + + try testAllParseFunctions(Inner, expected, str); +} diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index a4ab9b2f48..f6de62550e 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -76,9 +76,18 @@ pub fn calcSqrtLimbsBufferLen(a_bit_count: usize) usize { return a_limb_count + 3 * u_s_rem_limb_count + calcDivLimbsBufferLen(a_limb_count, u_s_rem_limb_count); } -// Compute the number of limbs required to store a 2s-complement number of `bit_count` bits. +/// Compute the number of limbs required to store a 2s-complement number of `bit_count` bits. +pub fn calcNonZeroTwosCompLimbCount(bit_count: usize) usize { + assert(bit_count != 0); + return calcTwosCompLimbCount(bit_count); +} + +/// Compute the number of limbs required to store a 2s-complement number of `bit_count` bits. +/// +/// Special cases `bit_count == 0` to return 1. Zero-bit integers can only store the value zero +/// and this big integer implementation stores zero using one limb. pub fn calcTwosCompLimbCount(bit_count: usize) usize { - return std.math.divCeil(usize, bit_count, @bitSizeOf(Limb)) catch unreachable; + return @max(std.math.divCeil(usize, bit_count, @bitSizeOf(Limb)) catch unreachable, 1); } /// a + b * c + *carry, sets carry to the overflow bits @@ -188,8 +197,10 @@ pub const Mutable = struct { if (self.limbs.ptr != other.limbs.ptr) { @memcpy(self.limbs[0..other.limbs.len], other.limbs[0..other.limbs.len]); } - self.positive = other.positive; - self.len = other.limbs.len; + // Normalize before setting `positive` so the `eqlZero` doesn't need to iterate + // over the extra zero limbs. + self.normalize(other.limbs.len); + self.positive = other.positive or other.eqlZero(); } /// Efficiently swap an Mutable with another. This swaps the limb pointers and a full copy is not @@ -1096,7 +1107,7 @@ pub const Mutable = struct { /// Asserts there is enough memory to fit the result. The upper bound Limb count is /// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`. pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void { - llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift); + llshl(r.limbs, a.limbs, shift); r.normalize(a.limbs.len + (shift / limb_bits) + 1); r.positive = a.positive; } @@ -1165,7 +1176,7 @@ pub const Mutable = struct { // This shift should not be able to overflow, so invoke llshl and normalize manually // to avoid the extra required limb. - llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift); + llshl(r.limbs, a.limbs, shift); r.normalize(a.limbs.len + (shift / limb_bits)); r.positive = a.positive; } @@ -1202,17 +1213,11 @@ pub const Mutable = struct { break :nonzero a.limbs[full_limbs_shifted_out] << not_covered != 0; }; - llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift); + llshr(r.limbs, a.limbs, shift); r.len = a.limbs.len - full_limbs_shifted_out; r.positive = a.positive; - if (nonzero_negative_shiftout) { - if (full_limbs_shifted_out > 0) { - r.limbs[a.limbs.len - full_limbs_shifted_out] = 0; - r.len += 1; - } - r.addScalar(r.toConst(), -1); - } + if (nonzero_negative_shiftout) r.addScalar(r.toConst(), -1); r.normalize(r.len); } @@ -1755,119 +1760,60 @@ pub const Mutable = struct { y.shiftRight(y.toConst(), norm_shift); } - /// If a is positive, this passes through to truncate. - /// If a is negative, then r is set to positive with the bit pattern ~(a - 1). - /// r may alias a. - /// - /// Asserts `r` has enough storage to store the result. - /// The upper bound is `calcTwosCompLimbCount(a.len)`. - pub fn convertToTwosComplement(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void { - if (a.positive) { - r.truncate(a, signedness, bit_count); - return; - } - - const req_limbs = calcTwosCompLimbCount(bit_count); - if (req_limbs == 0 or a.eqlZero()) { - r.set(0); - return; - } - - const bit = @as(Log2Limb, @truncate(bit_count - 1)); - const signmask = @as(Limb, 1) << bit; - const mask = (signmask << 1) -% 1; - - r.addScalar(a.abs(), -1); - if (req_limbs > r.len) { - @memset(r.limbs[r.len..req_limbs], 0); - } - - assert(r.limbs.len >= req_limbs); - r.len = req_limbs; - - llnot(r.limbs[0..r.len]); - r.limbs[r.len - 1] &= mask; - r.normalize(r.len); - } - /// Truncate an integer to a number of bits, following 2s-complement semantics. - /// r may alias a. + /// `r` may alias `a`. /// - /// Asserts `r` has enough storage to store the result. + /// Asserts `r` has enough storage to compute the result. /// The upper bound is `calcTwosCompLimbCount(a.len)`. pub fn truncate(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void { - const req_limbs = calcTwosCompLimbCount(bit_count); - const abs_trunc_a: Const = .{ - .positive = true, - .limbs = a.limbs[0..@min(a.limbs.len, req_limbs)], - }; - // Handle 0-bit integers. - if (req_limbs == 0 or abs_trunc_a.eqlZero()) { + if (bit_count == 0) { + @branchHint(.unlikely); r.set(0); return; } - const bit = @as(Log2Limb, @truncate(bit_count - 1)); - const signmask = @as(Limb, 1) << bit; // 0b0..010...0 where 1 is the sign bit. - const mask = (signmask << 1) -% 1; // 0b0..01..1 where the leftmost 1 is the sign bit. + const max_limbs = calcTwosCompLimbCount(bit_count); + const sign_bit = @as(Limb, 1) << @truncate(bit_count - 1); + const mask = @as(Limb, maxInt(Limb)) >> @truncate(-%bit_count); - if (!a.positive) { - // Convert the integer from sign-magnitude into twos-complement. - // -x = ~(x - 1) - // Note, we simply take req_limbs * @bitSizeOf(Limb) as the - // target bit count. + // Guess whether the result will have the same sign as `a`. + // * If the result will be signed zero, the guess is `true`. + // * If the result will be the minimum signed integer, the guess is `false`. + // * If the result will be unsigned zero, the guess is `a.positive`. + // * Otherwise the guess is correct. + const same_sign_guess = switch (signedness) { + .signed => max_limbs > a.limbs.len or a.limbs[max_limbs - 1] & sign_bit == 0, + .unsigned => a.positive, + }; - r.addScalar(abs_trunc_a, -1); - - // Zero-extend the result - @memset(r.limbs[r.len..req_limbs], 0); - r.len = req_limbs; - - // Without truncating, we can already peek at the sign bit of the result here. - // Note that it will be 0 if the result is negative, as we did not apply the flip here. - // If the result is negative, we have - // -(-x & mask) - // = ~(~(x - 1) & mask) + 1 - // = ~(~((x - 1) | ~mask)) + 1 - // = ((x - 1) | ~mask)) + 1 - // Note, this is only valid for the target bits and not the upper bits - // of the most significant limb. Those still need to be cleared. - // Also note that `mask` is zero for all other bits, reducing to the identity. - // This means that we still need to use & mask to clear off the upper bits. - - if (signedness == .signed and r.limbs[r.len - 1] & signmask == 0) { - // Re-add the one and negate to get the result. - r.limbs[r.len - 1] &= mask; - // Note, addition cannot require extra limbs here as we did a subtraction before. - r.addScalar(r.toConst(), 1); - r.normalize(r.len); - r.positive = false; - } else { - llnot(r.limbs[0..r.len]); - r.limbs[r.len - 1] &= mask; - r.normalize(r.len); - } - } else { + const abs_trunc_a: Const = .{ + .positive = true, + .limbs = a.limbs[0..llnormalize(a.limbs[0..@min(a.limbs.len, max_limbs)])], + }; + if (same_sign_guess or abs_trunc_a.eqlZero()) { + // One of the following is true: + // * The result is zero. + // * The result is non-zero and has the same sign as `a`. r.copy(abs_trunc_a); - // If the integer fits within target bits, no wrapping is required. - if (r.len < req_limbs) return; - - r.limbs[r.len - 1] &= mask; + if (max_limbs <= r.len) r.limbs[max_limbs - 1] &= mask; r.normalize(r.len); - - if (signedness == .signed and r.limbs[r.len - 1] & signmask != 0) { - // Convert 2s-complement back to sign-magnitude. - // Sign-extend the upper bits so that they are inverted correctly. - r.limbs[r.len - 1] |= ~mask; - llnot(r.limbs[0..r.len]); - - // Note, can only overflow if r holds 0xFFF...F which can only happen if - // a holds 0. - r.addScalar(r.toConst(), 1); - - r.positive = false; - } + r.positive = a.positive or r.eqlZero(); + } else { + // One of the following is true: + // * The result is the minimum signed integer. + // * The result is unsigned zero. + // * The result is non-zero and has the opposite sign as `a`. + r.addScalar(abs_trunc_a, -1); + llnot(r.limbs[0..r.len]); + @memset(r.limbs[r.len..max_limbs], maxInt(Limb)); + r.limbs[max_limbs - 1] &= mask; + r.normalize(max_limbs); + r.positive = switch (signedness) { + // The only value with the sign bit still set is the minimum signed integer. + .signed => !a.positive and r.limbs[max_limbs - 1] & sign_bit == 0, + .unsigned => !a.positive or r.eqlZero(), + }; } } diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 6ff9861fc1..489adc12fb 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -726,6 +726,34 @@ test "subWrap single-multi, signed, limb aligned" { try testing.expect((try a.toInt(SignedDoubleLimb)) == maxInt(SignedDoubleLimb)); } +test "addWrap returns normalized result" { + var x = try Managed.initSet(testing.allocator, 0); + defer x.deinit(); + var y = try Managed.initSet(testing.allocator, 0); + defer y.deinit(); + + // make them both non normalized "-0" + x.setMetadata(false, 1); + y.setMetadata(false, 1); + + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try testing.expect(!(try r.addWrap(&x, &y, .unsigned, 64))); + try testing.expect(r.isPositive() and r.len() == 1 and r.limbs[0] == 0); +} + +test "subWrap returns normalized result" { + var x = try Managed.initSet(testing.allocator, 0); + defer x.deinit(); + var y = try Managed.initSet(testing.allocator, 0); + defer y.deinit(); + + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try testing.expect(!(try r.subWrap(&x, &y, .unsigned, 64))); + try testing.expect(r.isPositive() and r.len() == 1 and r.limbs[0] == 0); +} + test "addSat single-single, unsigned" { var a = try Managed.initSet(testing.allocator, maxInt(u17) - 5); defer a.deinit(); @@ -1020,7 +1048,7 @@ test "mul large" { // Generate a number that's large enough to cross the thresholds for the use // of subquadratic algorithms for (a.limbs) |*p| { - p.* = std.math.maxInt(Limb); + p.* = maxInt(Limb); } a.setMetadata(true, 50); @@ -1104,7 +1132,7 @@ test "mulWrap large" { // Generate a number that's large enough to cross the thresholds for the use // of subquadratic algorithms for (a.limbs) |*p| { - p.* = std.math.maxInt(Limb); + p.* = maxInt(Limb); } a.setMetadata(true, 50); @@ -1961,23 +1989,78 @@ test "truncate to mutable with fewer limbs" { .positive = undefined, }; res.truncate(.{ .positive = true, .limbs = &.{ 0, 1 } }, .unsigned, @bitSizeOf(Limb)); - try testing.expect(res.eqlZero()); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); res.truncate(.{ .positive = true, .limbs = &.{ 0, 1 } }, .signed, @bitSizeOf(Limb)); - try testing.expect(res.eqlZero()); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); res.truncate(.{ .positive = false, .limbs = &.{ 0, 1 } }, .unsigned, @bitSizeOf(Limb)); - try testing.expect(res.eqlZero()); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); res.truncate(.{ .positive = false, .limbs = &.{ 0, 1 } }, .signed, @bitSizeOf(Limb)); - try testing.expect(res.eqlZero()); - res.truncate(.{ .positive = true, .limbs = &.{ std.math.maxInt(Limb), 1 } }, .unsigned, @bitSizeOf(Limb)); - try testing.expect(res.toConst().orderAgainstScalar(std.math.maxInt(Limb)).compare(.eq)); - res.truncate(.{ .positive = true, .limbs = &.{ std.math.maxInt(Limb), 1 } }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = true, .limbs = &.{ maxInt(Limb), 1 } }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(maxInt(Limb)).compare(.eq)); + res.truncate(.{ .positive = true, .limbs = &.{ maxInt(Limb), 1 } }, .signed, @bitSizeOf(Limb)); try testing.expect(res.toConst().orderAgainstScalar(-1).compare(.eq)); - res.truncate(.{ .positive = false, .limbs = &.{ std.math.maxInt(Limb), 1 } }, .unsigned, @bitSizeOf(Limb)); + res.truncate(.{ .positive = false, .limbs = &.{ maxInt(Limb), 1 } }, .unsigned, @bitSizeOf(Limb)); try testing.expect(res.toConst().orderAgainstScalar(1).compare(.eq)); - res.truncate(.{ .positive = false, .limbs = &.{ std.math.maxInt(Limb), 1 } }, .signed, @bitSizeOf(Limb)); + res.truncate(.{ .positive = false, .limbs = &.{ maxInt(Limb), 1 } }, .signed, @bitSizeOf(Limb)); try testing.expect(res.toConst().orderAgainstScalar(1).compare(.eq)); } +test "truncate value that normalizes after being masked" { + var res_limbs: [2]Limb = undefined; + var res: Mutable = .{ + .limbs = &res_limbs, + .len = undefined, + .positive = undefined, + }; + res.truncate(.{ .positive = true, .limbs = &.{ 0, 2 } }, .signed, 1 + @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = true, .limbs = &.{ 1, 2 } }, .signed, 1 + @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(1).compare(.eq)); +} + +test "truncate to zero" { + var res_limbs: [1]Limb = undefined; + var res: Mutable = .{ + .limbs = &res_limbs, + .len = undefined, + .positive = undefined, + }; + res.truncate(.{ .positive = true, .limbs = &.{0} }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = false, .limbs = &.{0} }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = true, .limbs = &.{0} }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = false, .limbs = &.{0} }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = true, .limbs = &.{ 0, 1 } }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = false, .limbs = &.{ 0, 1 } }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = true, .limbs = &.{ 0, 1 } }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); + res.truncate(.{ .positive = false, .limbs = &.{ 0, 1 } }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.positive and res.len == 1 and res.limbs[0] == 0); +} + +test "truncate to minimum signed integer" { + var res_limbs: [1]Limb = undefined; + var res: Mutable = .{ + .limbs = &res_limbs, + .len = undefined, + .positive = undefined, + }; + res.truncate(.{ .positive = true, .limbs = &.{1 << @bitSizeOf(Limb) - 1} }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(-1 << @bitSizeOf(Limb) - 1).compare(.eq)); + res.truncate(.{ .positive = false, .limbs = &.{1 << @bitSizeOf(Limb) - 1} }, .signed, @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(-1 << @bitSizeOf(Limb) - 1).compare(.eq)); + res.truncate(.{ .positive = true, .limbs = &.{1 << @bitSizeOf(Limb) - 1} }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(1 << @bitSizeOf(Limb) - 1).compare(.eq)); + res.truncate(.{ .positive = false, .limbs = &.{1 << @bitSizeOf(Limb) - 1} }, .unsigned, @bitSizeOf(Limb)); + try testing.expect(res.toConst().orderAgainstScalar(1 << @bitSizeOf(Limb) - 1).compare(.eq)); +} + test "saturate single signed positive" { var a = try Managed.initSet(testing.allocator, 0xBBBB_BBBB); defer a.deinit(); @@ -2136,6 +2219,15 @@ test "shift-right negative" { a.setSign(true); try a.shiftRight(&arg7, 4); try testing.expect(try a.toInt(i16) == -2048); + + var arg8_limbs: [1]Limb = undefined; + var arg8: Mutable = .{ + .limbs = &arg8_limbs, + .len = undefined, + .positive = undefined, + }; + arg8.shiftRight(.{ .limbs = &.{ 1, 1 }, .positive = false }, @bitSizeOf(Limb)); + try testing.expect(arg8.toConst().orderAgainstScalar(-2).compare(.eq)); } test "sat shift-left simple unsigned" { diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 016f3ab9da..2363fe125e 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -228,6 +228,18 @@ test "Allocator.resize" { } } +test "Allocator alloc and remap with zero-bit type" { + var values = try testing.allocator.alloc(void, 10); + defer testing.allocator.free(values); + + try testing.expectEqual(10, values.len); + const remaped = testing.allocator.remap(values, 200); + try testing.expect(remaped != null); + + values = remaped.?; + try testing.expectEqual(200, values.len); +} + /// Copy all of source into dest at position 0. /// dest.len must be >= source.len. /// If the slices overlap, dest.ptr must be <= src.ptr. @@ -4207,10 +4219,11 @@ fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type { /// Given a slice of bytes, returns a slice of the specified type /// backed by those bytes, preserving pointer attributes. +/// If `T` is zero-bytes sized, the returned slice has a len of zero. pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, @TypeOf(bytes)) { // let's not give an undefined pointer to @ptrCast // it may be equal to zero and fail a null check - if (bytes.len == 0) { + if (bytes.len == 0 or @sizeOf(T) == 0) { return &[0]T{}; } @@ -4288,6 +4301,19 @@ test "bytesAsSlice preserves pointer attributes" { try testing.expectEqual(in.alignment, out.alignment); } +test "bytesAsSlice with zero-bit element type" { + { + const bytes = [_]u8{}; + const slice = bytesAsSlice(void, &bytes); + try testing.expectEqual(0, slice.len); + } + { + const bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 }; + const slice = bytesAsSlice(u0, &bytes); + try testing.expectEqual(0, slice.len); + } +} + fn SliceAsBytesReturnType(comptime Slice: type) type { return CopyPtrAttrs(Slice, .slice, u8); } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 1ad9533116..c2f73096e8 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -150,7 +150,10 @@ pub inline fn rawFree(a: Allocator, memory: []u8, alignment: Alignment, ret_addr /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. pub fn create(a: Allocator, comptime T: type) Error!*T { - if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize))); + if (@sizeOf(T) == 0) { + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), @alignOf(T)); + return @as(*T, @ptrFromInt(ptr)); + } const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress())); return ptr; } @@ -308,15 +311,19 @@ pub fn resize(self: Allocator, allocation: anytype, new_len: usize) bool { /// In such case, it is more efficient for the caller to perform those /// operations. /// -/// `allocation` may be an empty slice, in which case a new allocation is made. +/// `allocation` may be an empty slice, in which case `null` is returned, +/// unless `new_len` is also 0, in which case `allocation` is returned. /// /// `new_len` may be zero, in which case the allocation is freed. +/// +/// If the allocation's elements' type is zero bytes sized, `allocation.len` is set to `new_len`. pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: { const Slice = @typeInfo(@TypeOf(allocation)).pointer; break :t ?[]align(Slice.alignment) Slice.child; } { const Slice = @typeInfo(@TypeOf(allocation)).pointer; const T = Slice.child; + const alignment = Slice.alignment; if (new_len == 0) { self.free(allocation); @@ -325,6 +332,11 @@ pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: { if (allocation.len == 0) { return null; } + if (@sizeOf(T) == 0) { + var new_memory = allocation; + new_memory.len = new_len; + return new_memory; + } const old_memory = mem.sliceAsBytes(allocation); // I would like to use saturating multiplication here, but LLVM cannot lower it // on WebAssembly: https://github.com/ziglang/zig/issues/9660 diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 7d68322c90..2290c61e2d 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -170,6 +170,7 @@ pub fn MultiArrayList(comptime T: type) type { return lhs.alignment > rhs.alignment; } }; + @setEvalBranchQuota(3 * fields.len * std.math.log2(fields.len)); mem.sort(Data, &data, {}, Sort.lessThan); var sizes_bytes: [fields.len]usize = undefined; var field_indexes: [fields.len]usize = undefined; @@ -565,7 +566,7 @@ pub fn MultiArrayList(comptime T: type) type { } fn FieldType(comptime field: Field) type { - return meta.fieldInfo(Elem, field).type; + return @FieldType(Elem, @tagName(field)); } const Entry = entry: { @@ -978,3 +979,40 @@ test "0 sized struct" { list.swapRemove(list.len - 1); try testing.expectEqualSlices(u0, &[_]u0{0}, list.items(.a)); } + +test "struct with many fields" { + const ManyFields = struct { + fn Type(count: comptime_int) type { + var fields: [count]std.builtin.Type.StructField = undefined; + for (0..count) |i| { + fields[i] = .{ + .name = std.fmt.comptimePrint("a{}", .{i}), + .type = u32, + .default_value_ptr = null, + .is_comptime = false, + .alignment = @alignOf(u32), + }; + } + const info: std.builtin.Type = .{ .@"struct" = .{ + .layout = .auto, + .fields = &fields, + .decls = &.{}, + .is_tuple = false, + } }; + return @Type(info); + } + + fn doTest(ally: std.mem.Allocator, count: comptime_int) !void { + var list: MultiArrayList(Type(count)) = .empty; + defer list.deinit(ally); + + try list.resize(ally, 1); + list.items(.a0)[0] = 42; + } + }; + + try ManyFields.doTest(testing.allocator, 25); + try ManyFields.doTest(testing.allocator, 50); + try ManyFields.doTest(testing.allocator, 100); + try ManyFields.doTest(testing.allocator, 200); +} diff --git a/lib/std/net.zig b/lib/std/net.zig index 46c8e216d7..9d821c4399 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -785,6 +785,19 @@ fn if_nametoindex(name: []const u8) IPv6InterfaceError!u32 { return @as(u32, @bitCast(index)); } + if (native_os == .windows) { + if (name.len >= posix.IFNAMESIZE) + return error.NameTooLong; + + var interface_name: [posix.IFNAMESIZE:0]u8 = undefined; + @memcpy(interface_name[0..name.len], name); + interface_name[name.len] = 0; + const index = std.os.windows.ws2_32.if_nametoindex(@as([*:0]const u8, &interface_name)); + if (index == 0) + return error.InterfaceNotFound; + return index; + } + @compileError("std.net.if_nametoindex unimplemented for this OS"); } diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig index b322a262bb..88a18094ac 100644 --- a/lib/std/net/test.zig +++ b/lib/std/net/test.zig @@ -129,15 +129,16 @@ test "parse and render IPv6 addresses" { try testing.expectError(error.InvalidIpv4Mapping, net.Address.parseIp6("::123.123.123.123", 0)); try testing.expectError(error.Incomplete, net.Address.parseIp6("1", 0)); // TODO Make this test pass on other operating systems. - if (builtin.os.tag == .linux or comptime builtin.os.tag.isDarwin()) { + if (builtin.os.tag == .linux or comptime builtin.os.tag.isDarwin() or builtin.os.tag == .windows) { try testing.expectError(error.Incomplete, net.Address.resolveIp6("ff01::fb%", 0)); - try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%wlp3s0s0s0s0s0s0s0s0", 0)); + // Assumes IFNAMESIZE will always be a multiple of 2 + try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%wlp3" ++ "s0" ** @divExact(std.posix.IFNAMESIZE - 4, 2), 0)); try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%12345678901234", 0)); } } test "invalid but parseable IPv6 scope ids" { - if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) { + if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin() and builtin.os.tag != .windows) { // Currently, resolveIp6 with alphanumerical scope IDs only works on Linux. // TODO Make this test pass on other operating systems. return error.SkipZigTest; @@ -261,7 +262,7 @@ test "listen on a port, send bytes, receive bytes" { } test "listen on an in use port" { - if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) { + if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin() and builtin.os.tag != .windows) { // TODO build abstractions for other operating systems return error.SkipZigTest; } diff --git a/lib/std/os.zig b/lib/std/os.zig index 80f45dd59d..a435f243bd 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -120,6 +120,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix. .SUCCESS => {}, .BADF => return error.FileNotFound, .NOSPC => return error.NameTooLong, + .NOENT => return error.FileNotFound, // TODO man pages for fcntl on macOS don't really tell you what // errno values to expect when command is F.GETPATH... else => |err| return posix.unexpectedErrno(err), diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index d224a45a8f..40ec44ef1b 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -482,26 +482,27 @@ pub const O = switch (native_arch) { /// Set by startup code, used by `getauxval`. pub var elf_aux_maybe: ?[*]std.elf.Auxv = null; +/// Whether an external or internal getauxval implementation is used. const extern_getauxval = switch (builtin.zig_backend) { // Calling extern functions is not yet supported with these backends .stage2_aarch64, .stage2_arm, .stage2_riscv64, .stage2_sparc64 => false, else => !builtin.link_libc, }; -comptime { - const root = @import("root"); - // Export this only when building executable, otherwise it is overriding - // the libc implementation - if (extern_getauxval and (builtin.output_mode == .Exe or @hasDecl(root, "main"))) { - @export(&getauxvalImpl, .{ .name = "getauxval", .linkage = .weak }); - } -} - pub const getauxval = if (extern_getauxval) struct { + comptime { + const root = @import("root"); + // Export this only when building an executable, otherwise it is overriding + // the libc implementation + if (builtin.output_mode == .Exe or @hasDecl(root, "main")) { + @export(&getauxvalImpl, .{ .name = "getauxval", .linkage = .weak }); + } + } extern fn getauxval(index: usize) usize; }.getauxval else getauxvalImpl; fn getauxvalImpl(index: usize) callconv(.c) usize { + @disableInstrumentation(); const auxv = elf_aux_maybe orelse return 0; var i: usize = 0; while (auxv[i].a_type != std.elf.AT_NULL) : (i += 1) { @@ -1979,7 +1980,7 @@ pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usi pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize { if (native_arch == .x86) { - return socketcall(SC.accept, &[4]usize{ fd, addr, len, 0 }); + return socketcall(SC.accept, &[4]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), 0 }); } return accept4(fd, addr, len, 0); } @@ -2221,7 +2222,7 @@ pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeou @as(usize, @intCast(maxevents)), @as(usize, @bitCast(@as(isize, timeout))), @intFromPtr(sigmask), - @sizeOf(sigset_t), + NSIG / 8, ); } @@ -3385,6 +3386,7 @@ pub const SIG = if (is_mips) struct { pub const UNBLOCK = 2; pub const SETMASK = 3; + // https://github.com/torvalds/linux/blob/ca91b9500108d4cf083a635c2e11c884d5dd20ea/arch/mips/include/uapi/asm/signal.h#L25 pub const HUP = 1; pub const INT = 2; pub const QUIT = 3; @@ -3392,33 +3394,32 @@ pub const SIG = if (is_mips) struct { pub const TRAP = 5; pub const ABRT = 6; pub const IOT = ABRT; - pub const BUS = 7; + pub const EMT = 7; pub const FPE = 8; pub const KILL = 9; - pub const USR1 = 10; + pub const BUS = 10; pub const SEGV = 11; - pub const USR2 = 12; + pub const SYS = 12; pub const PIPE = 13; pub const ALRM = 14; pub const TERM = 15; - pub const STKFLT = 16; - pub const CHLD = 17; - pub const CONT = 18; - pub const STOP = 19; - pub const TSTP = 20; - pub const TTIN = 21; - pub const TTOU = 22; - pub const URG = 23; - pub const XCPU = 24; - pub const XFSZ = 25; - pub const VTALRM = 26; - pub const PROF = 27; - pub const WINCH = 28; - pub const IO = 29; - pub const POLL = 29; - pub const PWR = 30; - pub const SYS = 31; - pub const UNUSED = SIG.SYS; + pub const USR1 = 16; + pub const USR2 = 17; + pub const CHLD = 18; + pub const PWR = 19; + pub const WINCH = 20; + pub const URG = 21; + pub const IO = 22; + pub const POLL = IO; + pub const STOP = 23; + pub const TSTP = 24; + pub const CONT = 25; + pub const TTIN = 26; + pub const TTOU = 27; + pub const VTALRM = 28; + pub const PROF = 29; + pub const XCPU = 30; + pub const XFZ = 31; pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(maxInt(usize)); pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0); diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig index 90cb799e02..cb454c7691 100644 --- a/lib/std/os/linux/sparc64.zig +++ b/lib/std/os/linux/sparc64.zig @@ -304,6 +304,7 @@ pub const msghdr_const = extern struct { pub const off_t = i64; pub const ino_t = u64; +pub const time_t = isize; pub const mode_t = u32; pub const dev_t = usize; pub const nlink_t = u32; diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index 3180b04d2b..fdaac29e5d 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -516,7 +516,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void { -1, 0, ); - if (@as(isize, @bitCast(begin_addr)) < 0) @trap(); + if (@call(.always_inline, linux.E.init, .{begin_addr}) != .SUCCESS) @trap(); const area_ptr: [*]align(page_size_min) u8 = @ptrFromInt(begin_addr); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 563b24cf83..8e186c7468 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -687,6 +687,7 @@ pub fn WriteFile( .INVALID_HANDLE => return error.NotOpenForWriting, .LOCK_VIOLATION => return error.LockViolation, .NETNAME_DELETED => return error.ConnectionResetByPeer, + .WORKING_SET_QUOTA => return error.SystemResources, else => |err| return unexpectedError(err), } } @@ -1913,6 +1914,7 @@ pub fn CreateProcessW( switch (GetLastError()) { .FILE_NOT_FOUND => return error.FileNotFound, .PATH_NOT_FOUND => return error.FileNotFound, + .DIRECTORY => return error.FileNotFound, .ACCESS_DENIED => return error.AccessDenied, .INVALID_PARAMETER => unreachable, .INVALID_NAME => return error.InvalidName, @@ -5246,6 +5248,9 @@ pub const PF = enum(DWORD) { /// This ARM processor implements the ARM v8.3 JavaScript conversion (JSCVT) instructions. ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44, + + /// This Arm processor implements the Arm v8.3 LRCPC instructions (for example, LDAPR). Note that certain Arm v8.2 CPUs may optionally support the LRCPC instructions. + ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE, }; pub const MAX_WOW64_SHARED_ENTRIES = 16; diff --git a/lib/std/posix.zig b/lib/std/posix.zig index c07d6e6f0d..f6252acaa1 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -722,9 +722,12 @@ pub fn raise(sig: u8) RaiseError!void { } if (native_os == .linux) { + // https://git.musl-libc.org/cgit/musl/commit/?id=0bed7e0acfd34e3fb63ca0e4d99b7592571355a9 + // + // Unlike musl, libc-less Zig std does not have any internal signals for implementation purposes, so we + // need to block all signals on the assumption that any of them could potentially fork() in a handler. var set: sigset_t = undefined; - // block application signals - sigprocmask(SIG.BLOCK, &linux.app_mask, &set); + sigprocmask(SIG.BLOCK, &linux.all_mask, &set); const tid = linux.gettid(); const rc = linux.tkill(tid, sig); @@ -7474,7 +7477,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { } } -const lfs64_abi = native_os == .linux and builtin.link_libc and builtin.abi.isGnu(); +const lfs64_abi = native_os == .linux and builtin.link_libc and (builtin.abi.isGnu() or builtin.abi.isAndroid()); /// Whether or not `error.Unexpected` will print its value and a stack trace. /// diff --git a/lib/std/process.zig b/lib/std/process.zig index dd08e88af2..ebc8de28b7 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1651,14 +1651,15 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { pub fn getBaseAddress() usize { switch (native_os) { .linux => { - const base = std.os.linux.getauxval(std.elf.AT_BASE); + const getauxval = if (builtin.link_libc) std.c.getauxval else std.os.linux.getauxval; + const base = getauxval(std.elf.AT_BASE); if (base != 0) { return base; } - const phdr = std.os.linux.getauxval(std.elf.AT_PHDR); + const phdr = getauxval(std.elf.AT_PHDR); return phdr - @sizeOf(std.elf.Ehdr); }, - .macos, .freebsd, .netbsd => { + .driverkit, .ios, .macos, .tvos, .visionos, .watchos => { return @intFromPtr(&std.c._mh_execute_header); }, .windows => return @intFromPtr(windows.kernel32.GetModuleHandleW(null)), diff --git a/lib/std/start.zig b/lib/std/start.zig index a91df35700..e1510b3ecb 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -239,7 +239,7 @@ fn _start() callconv(.naked) noreturn { .csky => ".cfi_undefined lr", .hexagon => ".cfi_undefined r31", .loongarch32, .loongarch64 => ".cfi_undefined 1", - .m68k => ".cfi_undefined pc", + .m68k => ".cfi_undefined %%pc", .mips, .mipsel, .mips64, .mips64el => ".cfi_undefined $ra", .powerpc, .powerpcle, .powerpc64, .powerpc64le => ".cfi_undefined lr", .riscv32, .riscv64 => if (builtin.zig_backend == .stage2_riscv64) @@ -355,7 +355,11 @@ fn _start() callconv(.naked) noreturn { // Note that the - 8 is needed because pc in the jsr instruction points into the middle // of the jsr instruction. (The lea is 6 bytes, the jsr is 4 bytes.) \\ suba.l %%fp, %%fp - \\ move.l %%sp, -(%%sp) + \\ move.l %%sp, %%a0 + \\ move.l %%a0, %%d0 + \\ and.l #-4, %%d0 + \\ move.l %%d0, %%sp + \\ move.l %%a0, -(%%sp) \\ lea %[posixCallMainAndExit] - . - 8, %%a0 \\ jsr (%%pc, %%a0) , diff --git a/lib/std/testing/FailingAllocator.zig b/lib/std/testing/FailingAllocator.zig index c7767ae02f..c1f9791e39 100644 --- a/lib/std/testing/FailingAllocator.zig +++ b/lib/std/testing/FailingAllocator.zig @@ -1,13 +1,5 @@ //! Allocator that fails after N allocations, useful for making sure out of //! memory conditions are handled correctly. -//! -//! To use this, first initialize it and get an allocator with -//! -//! `const failing_allocator = &FailingAllocator.init(, -//! ).allocator;` -//! -//! Then use `failing_allocator` anywhere you would have used a -//! different allocator. const std = @import("../std.zig"); const mem = std.mem; const FailingAllocator = @This(); @@ -28,12 +20,7 @@ const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0; pub const Config = struct { /// The number of successful allocations you can expect from this allocator. - /// The next allocation will fail. For example, with `fail_index` equal to - /// 2, the following test will pass: - /// - /// var a = try failing_alloc.create(i32); - /// var b = try failing_alloc.create(i32); - /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32)); + /// The next allocation will fail. fail_index: usize = std.math.maxInt(usize), /// Number of successful resizes to expect from this allocator. The next resize will fail. @@ -159,3 +146,40 @@ pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace { .index = len, }; } + +test FailingAllocator { + // Fail on allocation + { + var failing_allocator_state = FailingAllocator.init(std.testing.allocator, .{ + .fail_index = 2, + }); + const failing_alloc = failing_allocator_state.allocator(); + + const a = try failing_alloc.create(i32); + defer failing_alloc.destroy(a); + const b = try failing_alloc.create(i32); + defer failing_alloc.destroy(b); + try std.testing.expectError(error.OutOfMemory, failing_alloc.create(i32)); + } + // Fail on resize + { + var failing_allocator_state = FailingAllocator.init(std.testing.allocator, .{ + .resize_fail_index = 1, + }); + const failing_alloc = failing_allocator_state.allocator(); + + const resized_slice = blk: { + const slice = try failing_alloc.alloc(u8, 8); + errdefer failing_alloc.free(slice); + + break :blk failing_alloc.remap(slice, 6) orelse return error.UnexpectedRemapFailure; + }; + defer failing_alloc.free(resized_slice); + + // Remap and resize should fail from here on out + try std.testing.expectEqual(null, failing_alloc.remap(resized_slice, 4)); + try std.testing.expectEqual(false, failing_alloc.resize(resized_slice, 4)); + + // Note: realloc could succeed because it falls back to free+alloc + } +} diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index d4503b95ca..af7b6ab6a2 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -100,14 +100,19 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A .zon => try parser.parseZon(), } + const extra_data = try parser.extra_data.toOwnedSlice(gpa); + errdefer gpa.free(extra_data); + const errors = try parser.errors.toOwnedSlice(gpa); + errdefer gpa.free(errors); + // TODO experiment with compacting the MultiArrayList slices here return Ast{ .source = source, .mode = mode, .tokens = tokens.toOwnedSlice(), .nodes = parser.nodes.toOwnedSlice(), - .extra_data = try parser.extra_data.toOwnedSlice(gpa), - .errors = try parser.errors.toOwnedSlice(gpa), + .extra_data = extra_data, + .errors = errors, }; } diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig index bc3e893596..813b409c90 100644 --- a/lib/std/zig/c_translation.zig +++ b/lib/std/zig/c_translation.zig @@ -88,6 +88,9 @@ fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype .pointer => { return castPtr(DestType, target); }, + .@"fn" => { + return castPtr(DestType, &target); + }, .optional => |target_opt| { if (@typeInfo(target_opt.child) == .pointer) { return castPtr(DestType, target); @@ -686,3 +689,14 @@ test "Extended C ABI casting" { try testing.expect(@TypeOf(Macros.L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong); // comptime_int -> c_longlong } } + +// Function with complex signature for testing the SDL case +fn complexFunction(_: ?*anyopaque, _: c_uint, _: ?*const fn (?*anyopaque) callconv(.c) c_uint, _: ?*anyopaque, _: c_uint, _: [*c]c_uint) callconv(.c) usize { + return 0; +} + +test "function pointer casting" { + const SDL_FunctionPointer = ?*const fn () callconv(.c) void; + const fn_ptr = cast(SDL_FunctionPointer, complexFunction); + try testing.expect(fn_ptr != null); +} diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig index 6d16a33602..a713830161 100644 --- a/lib/std/zig/llvm/Builder.zig +++ b/lib/std/zig/llvm/Builder.zig @@ -7526,9 +7526,9 @@ pub const Constant = enum(u32) { }; } }; - const Mantissa64 = std.meta.FieldType(Float.Repr(f64), .mantissa); - const Exponent32 = std.meta.FieldType(Float.Repr(f32), .exponent); - const Exponent64 = std.meta.FieldType(Float.Repr(f64), .exponent); + const Mantissa64 = @FieldType(Float.Repr(f64), "mantissa"); + const Exponent32 = @FieldType(Float.Repr(f32), "exponent"); + const Exponent64 = @FieldType(Float.Repr(f64), "exponent"); const repr: Float.Repr(f32) = @bitCast(item.data); const denormal_shift = switch (repr.exponent) { diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 84f9cf7330..301ae16eca 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -125,40 +125,33 @@ pub fn getExternalExecutor( }; } + if (options.allow_wasmtime and candidate.cpu.arch.isWasm()) { + return Executor{ .wasmtime = "wasmtime" }; + } + switch (candidate.os.tag) { .windows => { if (options.allow_wine) { - // x86_64 wine does not support emulating aarch64-windows and - // vice versa. - if (candidate.cpu.arch != builtin.cpu.arch and - !(candidate.cpu.arch == .thumb and builtin.cpu.arch == .aarch64) and - !(candidate.cpu.arch == .x86 and builtin.cpu.arch == .x86_64)) - { - return bad_result; - } - switch (candidate.ptrBitWidth()) { - 32 => return Executor{ .wine = "wine" }, - 64 => return Executor{ .wine = "wine64" }, - else => return bad_result, - } + const wine_supported = switch (candidate.cpu.arch) { + .thumb => switch (host.cpu.arch) { + .arm, .thumb, .aarch64 => true, + else => false, + }, + .aarch64 => host.cpu.arch == .aarch64, + .x86 => host.cpu.arch.isX86(), + .x86_64 => host.cpu.arch == .x86_64, + else => false, + }; + return if (wine_supported) Executor{ .wine = "wine" } else bad_result; } return bad_result; }, - .wasi => { - if (options.allow_wasmtime) { - switch (candidate.ptrBitWidth()) { - 32 => return Executor{ .wasmtime = "wasmtime" }, - else => return bad_result, - } - } - return bad_result; - }, - .macos => { + .driverkit, .macos => { if (options.allow_darling) { // This check can be loosened once darling adds a QEMU-based emulation // layer for non-host architectures: // https://github.com/darlinghq/darling/issues/863 - if (candidate.cpu.arch != builtin.cpu.arch) { + if (candidate.cpu.arch != host.cpu.arch) { return bad_result; } return Executor{ .darling = "darling" }; diff --git a/lib/std/zig/system/arm.zig b/lib/std/zig/system/arm.zig index c8c3f6a851..c3e27f65cf 100644 --- a/lib/std/zig/system/arm.zig +++ b/lib/std/zig/system/arm.zig @@ -22,32 +22,34 @@ pub const cpu_models = struct { // implementer = 0x41 const ARM = [_]E{ - E{ .part = 0x926, .m32 = &A32.arm926ej_s, .m64 = null }, - E{ .part = 0xb02, .m32 = &A32.mpcore, .m64 = null }, - E{ .part = 0xb36, .m32 = &A32.arm1136j_s, .m64 = null }, - E{ .part = 0xb56, .m32 = &A32.arm1156t2_s, .m64 = null }, - E{ .part = 0xb76, .m32 = &A32.arm1176jz_s, .m64 = null }, - E{ .part = 0xc05, .m32 = &A32.cortex_a5, .m64 = null }, - E{ .part = 0xc07, .m32 = &A32.cortex_a7, .m64 = null }, - E{ .part = 0xc08, .m32 = &A32.cortex_a8, .m64 = null }, - E{ .part = 0xc09, .m32 = &A32.cortex_a9, .m64 = null }, - E{ .part = 0xc0d, .m32 = &A32.cortex_a17, .m64 = null }, - E{ .part = 0xc0f, .m32 = &A32.cortex_a15, .m64 = null }, - E{ .part = 0xc0e, .m32 = &A32.cortex_a17, .m64 = null }, - E{ .part = 0xc14, .m32 = &A32.cortex_r4, .m64 = null }, - E{ .part = 0xc15, .m32 = &A32.cortex_r5, .m64 = null }, - E{ .part = 0xc17, .m32 = &A32.cortex_r7, .m64 = null }, - E{ .part = 0xc18, .m32 = &A32.cortex_r8, .m64 = null }, - E{ .part = 0xc20, .m32 = &A32.cortex_m0, .m64 = null }, - E{ .part = 0xc21, .m32 = &A32.cortex_m1, .m64 = null }, - E{ .part = 0xc23, .m32 = &A32.cortex_m3, .m64 = null }, - E{ .part = 0xc24, .m32 = &A32.cortex_m4, .m64 = null }, - E{ .part = 0xc27, .m32 = &A32.cortex_m7, .m64 = null }, - E{ .part = 0xc60, .m32 = &A32.cortex_m0plus, .m64 = null }, - E{ .part = 0xd01, .m32 = &A32.cortex_a32, .m64 = null }, + E{ .part = 0x926, .m32 = &A32.arm926ej_s }, + E{ .part = 0xb02, .m32 = &A32.mpcore }, + E{ .part = 0xb36, .m32 = &A32.arm1136j_s }, + E{ .part = 0xb56, .m32 = &A32.arm1156t2_s }, + E{ .part = 0xb76, .m32 = &A32.arm1176jz_s }, + E{ .part = 0xc05, .m32 = &A32.cortex_a5 }, + E{ .part = 0xc07, .m32 = &A32.cortex_a7 }, + E{ .part = 0xc08, .m32 = &A32.cortex_a8 }, + E{ .part = 0xc09, .m32 = &A32.cortex_a9 }, + E{ .part = 0xc0d, .m32 = &A32.cortex_a17 }, + E{ .part = 0xc0e, .m32 = &A32.cortex_a17 }, + E{ .part = 0xc0f, .m32 = &A32.cortex_a15 }, + E{ .part = 0xc14, .m32 = &A32.cortex_r4 }, + E{ .part = 0xc15, .m32 = &A32.cortex_r5 }, + E{ .part = 0xc17, .m32 = &A32.cortex_r7 }, + E{ .part = 0xc18, .m32 = &A32.cortex_r8 }, + E{ .part = 0xc20, .m32 = &A32.cortex_m0 }, + E{ .part = 0xc21, .m32 = &A32.cortex_m1 }, + E{ .part = 0xc23, .m32 = &A32.cortex_m3 }, + E{ .part = 0xc24, .m32 = &A32.cortex_m4 }, + E{ .part = 0xc27, .m32 = &A32.cortex_m7 }, + E{ .part = 0xc60, .m32 = &A32.cortex_m0plus }, + E{ .part = 0xd01, .m32 = &A32.cortex_a32 }, + E{ .part = 0xd02, .m64 = &A64.cortex_a34 }, E{ .part = 0xd03, .m32 = &A32.cortex_a53, .m64 = &A64.cortex_a53 }, E{ .part = 0xd04, .m32 = &A32.cortex_a35, .m64 = &A64.cortex_a35 }, E{ .part = 0xd05, .m32 = &A32.cortex_a55, .m64 = &A64.cortex_a55 }, + E{ .part = 0xd06, .m64 = &A64.cortex_a65 }, E{ .part = 0xd07, .m32 = &A32.cortex_a57, .m64 = &A64.cortex_a57 }, E{ .part = 0xd08, .m32 = &A32.cortex_a72, .m64 = &A64.cortex_a72 }, E{ .part = 0xd09, .m32 = &A32.cortex_a73, .m64 = &A64.cortex_a73 }, @@ -55,16 +57,38 @@ pub const cpu_models = struct { E{ .part = 0xd0b, .m32 = &A32.cortex_a76, .m64 = &A64.cortex_a76 }, E{ .part = 0xd0c, .m32 = &A32.neoverse_n1, .m64 = &A64.neoverse_n1 }, E{ .part = 0xd0d, .m32 = &A32.cortex_a77, .m64 = &A64.cortex_a77 }, - E{ .part = 0xd13, .m32 = &A32.cortex_r52, .m64 = null }, - E{ .part = 0xd20, .m32 = &A32.cortex_m23, .m64 = null }, - E{ .part = 0xd21, .m32 = &A32.cortex_m33, .m64 = null }, + E{ .part = 0xd0e, .m32 = &A32.cortex_a76ae, .m64 = &A64.cortex_a76ae }, + E{ .part = 0xd13, .m32 = &A32.cortex_r52 }, + E{ .part = 0xd14, .m64 = &A64.cortex_r82ae }, + E{ .part = 0xd15, .m64 = &A64.cortex_r82 }, + E{ .part = 0xd16, .m32 = &A32.cortex_r52plus }, + E{ .part = 0xd20, .m32 = &A32.cortex_m23 }, + E{ .part = 0xd21, .m32 = &A32.cortex_m33 }, + E{ .part = 0xd40, .m32 = &A32.neoverse_v1, .m64 = &A64.neoverse_v1 }, E{ .part = 0xd41, .m32 = &A32.cortex_a78, .m64 = &A64.cortex_a78 }, + E{ .part = 0xd42, .m32 = &A32.cortex_a78ae, .m64 = &A64.cortex_a78ae }, + E{ .part = 0xd43, .m64 = &A64.cortex_a65ae }, + E{ .part = 0xd44, .m32 = &A32.cortex_x1, .m64 = &A64.cortex_x1 }, + E{ .part = 0xd46, .m64 = &A64.cortex_a510 }, + E{ .part = 0xd47, .m32 = &A32.cortex_a710, .m64 = &A64.cortex_a710 }, + E{ .part = 0xd48, .m64 = &A64.cortex_x2 }, + E{ .part = 0xd49, .m32 = &A32.neoverse_n2, .m64 = &A64.neoverse_n2 }, + E{ .part = 0xd4a, .m64 = &A64.neoverse_e1 }, E{ .part = 0xd4b, .m32 = &A32.cortex_a78c, .m64 = &A64.cortex_a78c }, E{ .part = 0xd4c, .m32 = &A32.cortex_x1c, .m64 = &A64.cortex_x1c }, - E{ .part = 0xd44, .m32 = &A32.cortex_x1, .m64 = &A64.cortex_x1 }, - E{ .part = 0xd02, .m64 = &A64.cortex_a34 }, - E{ .part = 0xd06, .m64 = &A64.cortex_a65 }, - E{ .part = 0xd43, .m64 = &A64.cortex_a65ae }, + E{ .part = 0xd4d, .m64 = &A64.cortex_a715 }, + E{ .part = 0xd4e, .m64 = &A64.cortex_x3 }, + E{ .part = 0xd4f, .m64 = &A64.neoverse_v2 }, + E{ .part = 0xd80, .m64 = &A64.cortex_a520 }, + E{ .part = 0xd81, .m64 = &A64.cortex_a720 }, + E{ .part = 0xd82, .m64 = &A64.cortex_x4 }, + E{ .part = 0xd83, .m64 = &A64.neoverse_v3ae }, + E{ .part = 0xd84, .m64 = &A64.neoverse_v3 }, + E{ .part = 0xd85, .m64 = &A64.cortex_x925 }, + E{ .part = 0xd87, .m64 = &A64.cortex_a725 }, + E{ .part = 0xd88, .m64 = &A64.cortex_a520ae }, + E{ .part = 0xd89, .m64 = &A64.cortex_a720ae }, + E{ .part = 0xd8e, .m64 = &A64.neoverse_n3 }, }; // implementer = 0x42 const Broadcom = [_]E{ @@ -97,6 +121,7 @@ pub const cpu_models = struct { }; // implementer = 0x51 const Qualcomm = [_]E{ + E{ .part = 0x001, .m64 = &A64.oryon_1 }, E{ .part = 0x06f, .m32 = &A32.krait }, E{ .part = 0x201, .m64 = &A64.kryo, .m32 = &A64.kryo }, E{ .part = 0x205, .m64 = &A64.kryo, .m32 = &A64.kryo }, @@ -110,7 +135,7 @@ pub const cpu_models = struct { E{ .part = 0xc00, .m64 = &A64.falkor }, E{ .part = 0xc01, .m64 = &A64.saphira }, }; - + // implementer = 0x61 const Apple = [_]E{ E{ .part = 0x022, .m64 = &A64.apple_m1 }, E{ .part = 0x023, .m64 = &A64.apple_m1 }, @@ -133,6 +158,7 @@ pub const cpu_models = struct { 0x43 => &Cavium, 0x46 => &Fujitsu, 0x48 => &HiSilicon, + 0x4e => &Nvidia, 0x50 => &Ampere, 0x51 => &Qualcomm, 0x61 => &Apple, diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig index 8ae3d470c6..eba837fb49 100644 --- a/lib/std/zig/system/darwin/macos.zig +++ b/lib/std/zig/system/darwin/macos.zig @@ -408,22 +408,24 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu { switch (current_arch) { .aarch64, .aarch64_be => { const model = switch (cpu_family) { - .ARM_EVEREST_SAWTOOTH => &Target.aarch64.cpu.apple_a16, - .ARM_BLIZZARD_AVALANCHE => &Target.aarch64.cpu.apple_a15, - .ARM_FIRESTORM_ICESTORM => &Target.aarch64.cpu.apple_a14, - .ARM_LIGHTNING_THUNDER => &Target.aarch64.cpu.apple_a13, - .ARM_VORTEX_TEMPEST => &Target.aarch64.cpu.apple_a12, - .ARM_MONSOON_MISTRAL => &Target.aarch64.cpu.apple_a11, - .ARM_HURRICANE => &Target.aarch64.cpu.apple_a10, - .ARM_TWISTER => &Target.aarch64.cpu.apple_a9, + .ARM_CYCLONE => &Target.aarch64.cpu.apple_a7, .ARM_TYPHOON => &Target.aarch64.cpu.apple_a8, - .ARM_CYCLONE => &Target.aarch64.cpu.cyclone, - .ARM_COLL => &Target.aarch64.cpu.apple_a17, + .ARM_TWISTER => &Target.aarch64.cpu.apple_a9, + .ARM_HURRICANE => &Target.aarch64.cpu.apple_a10, + .ARM_MONSOON_MISTRAL => &Target.aarch64.cpu.apple_a11, + .ARM_VORTEX_TEMPEST => &Target.aarch64.cpu.apple_a12, + .ARM_LIGHTNING_THUNDER => &Target.aarch64.cpu.apple_a13, + .ARM_FIRESTORM_ICESTORM => &Target.aarch64.cpu.apple_m1, // a14 + .ARM_BLIZZARD_AVALANCHE => &Target.aarch64.cpu.apple_m2, // a15 + .ARM_EVEREST_SAWTOOTH => &Target.aarch64.cpu.apple_m3, // a16 .ARM_IBIZA => &Target.aarch64.cpu.apple_m3, // base - .ARM_LOBOS => &Target.aarch64.cpu.apple_m3, // pro .ARM_PALMA => &Target.aarch64.cpu.apple_m3, // max + .ARM_LOBOS => &Target.aarch64.cpu.apple_m3, // pro + .ARM_COLL => &Target.aarch64.cpu.apple_a17, // a17 pro .ARM_DONAN => &Target.aarch64.cpu.apple_m4, // base .ARM_BRAVA => &Target.aarch64.cpu.apple_m4, // pro/max + .ARM_TAHITI => &Target.aarch64.cpu.apple_m4, // a18 pro + .ARM_TUPAI => &Target.aarch64.cpu.apple_m4, // a18 else => return null, }; diff --git a/lib/std/zig/system/x86.zig b/lib/std/zig/system/x86.zig index 428561c371..febd677402 100644 --- a/lib/std/zig/system/x86.zig +++ b/lib/std/zig/system/x86.zig @@ -2,11 +2,30 @@ const std = @import("std"); const builtin = @import("builtin"); const Target = std.Target; -const XCR0_XMM = 0x02; -const XCR0_YMM = 0x04; -const XCR0_MASKREG = 0x20; -const XCR0_ZMM0_15 = 0x40; -const XCR0_ZMM16_31 = 0x80; +/// Only covers EAX for now. +const Xcr0 = packed struct(u32) { + x87: bool, + sse: bool, + avx: bool, + bndreg: bool, + bndcsr: bool, + opmask: bool, + zmm_hi256: bool, + hi16_zmm: bool, + pt: bool, + pkru: bool, + pasid: bool, + cet_u: bool, + cet_s: bool, + hdc: bool, + uintr: bool, + lbr: bool, + hwp: bool, + xtilecfg: bool, + xtiledata: bool, + apx: bool, + _reserved: u12, +}; fn setFeature(cpu: *Target.Cpu, feature: Target.x86.Feature, enabled: bool) void { const idx = @as(Target.Cpu.Feature.Set.Index, @intFromEnum(feature)); @@ -339,12 +358,6 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { leaf = cpuid(1, 0); - setFeature(cpu, .cx8, bit(leaf.edx, 8)); - setFeature(cpu, .cmov, bit(leaf.edx, 15)); - setFeature(cpu, .mmx, bit(leaf.edx, 23)); - setFeature(cpu, .fxsr, bit(leaf.edx, 24)); - setFeature(cpu, .sse, bit(leaf.edx, 25)); - setFeature(cpu, .sse2, bit(leaf.edx, 26)); setFeature(cpu, .sse3, bit(leaf.ecx, 0)); setFeature(cpu, .pclmul, bit(leaf.ecx, 1)); setFeature(cpu, .ssse3, bit(leaf.ecx, 9)); @@ -356,13 +369,20 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { setFeature(cpu, .aes, bit(leaf.ecx, 25)); setFeature(cpu, .rdrnd, bit(leaf.ecx, 30)); + setFeature(cpu, .cx8, bit(leaf.edx, 8)); + setFeature(cpu, .cmov, bit(leaf.edx, 15)); + setFeature(cpu, .mmx, bit(leaf.edx, 23)); + setFeature(cpu, .fxsr, bit(leaf.edx, 24)); + setFeature(cpu, .sse, bit(leaf.edx, 25)); + setFeature(cpu, .sse2, bit(leaf.edx, 26)); + const has_xsave = bit(leaf.ecx, 27); const has_avx = bit(leaf.ecx, 28); // Make sure not to call xgetbv if xsave is not supported - const xcr0_eax = if (has_xsave and has_avx) getXCR0() else 0; + const xcr0: Xcr0 = if (has_xsave and has_avx) @bitCast(getXCR0()) else @bitCast(@as(u32, 0)); - const has_avx_save = hasMask(xcr0_eax, XCR0_XMM | XCR0_YMM); + const has_avx_save = xcr0.sse and xcr0.avx; // LLVM approaches avx512_save by hardcoding it to true on Darwin, // because the kernel saves the context even if the bit is not set. @@ -384,22 +404,26 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { // Darwin lazily saves the AVX512 context on first use: trust that the OS will // save the AVX512 context if we use AVX512 instructions, even if the bit is not // set right now. - const has_avx512_save = switch (os_tag.isDarwin()) { - true => true, - false => hasMask(xcr0_eax, XCR0_MASKREG | XCR0_ZMM0_15 | XCR0_ZMM16_31), - }; + const has_avx512_save = if (os_tag.isDarwin()) + true + else + xcr0.zmm_hi256 and xcr0.hi16_zmm; + + // AMX requires additional context to be saved by the OS. + const has_amx_save = xcr0.xtilecfg and xcr0.xtiledata; setFeature(cpu, .avx, has_avx_save); - setFeature(cpu, .fma, has_avx_save and bit(leaf.ecx, 12)); + setFeature(cpu, .fma, bit(leaf.ecx, 12) and has_avx_save); // Only enable XSAVE if OS has enabled support for saving YMM state. - setFeature(cpu, .xsave, has_avx_save and bit(leaf.ecx, 26)); - setFeature(cpu, .f16c, has_avx_save and bit(leaf.ecx, 29)); + setFeature(cpu, .xsave, bit(leaf.ecx, 26) and has_avx_save); + setFeature(cpu, .f16c, bit(leaf.ecx, 29) and has_avx_save); leaf = cpuid(0x80000000, 0); const max_ext_level = leaf.eax; if (max_ext_level >= 0x80000001) { leaf = cpuid(0x80000001, 0); + setFeature(cpu, .sahf, bit(leaf.ecx, 0)); setFeature(cpu, .lzcnt, bit(leaf.ecx, 5)); setFeature(cpu, .sse4a, bit(leaf.ecx, 6)); @@ -409,11 +433,21 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { setFeature(cpu, .fma4, bit(leaf.ecx, 16) and has_avx_save); setFeature(cpu, .tbm, bit(leaf.ecx, 21)); setFeature(cpu, .mwaitx, bit(leaf.ecx, 29)); + setFeature(cpu, .@"64bit", bit(leaf.edx, 29)); } else { for ([_]Target.x86.Feature{ - .sahf, .lzcnt, .sse4a, .prfchw, .xop, - .lwp, .fma4, .tbm, .mwaitx, .@"64bit", + .sahf, + .lzcnt, + .sse4a, + .prfchw, + .xop, + .lwp, + .fma4, + .tbm, + .mwaitx, + + .@"64bit", }) |feat| { setFeature(cpu, feat, false); } @@ -422,10 +456,16 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { // Misc. memory-related features. if (max_ext_level >= 0x80000008) { leaf = cpuid(0x80000008, 0); + setFeature(cpu, .clzero, bit(leaf.ebx, 0)); + setFeature(cpu, .rdpru, bit(leaf.ebx, 4)); setFeature(cpu, .wbnoinvd, bit(leaf.ebx, 9)); } else { - for ([_]Target.x86.Feature{ .clzero, .wbnoinvd }) |feat| { + for ([_]Target.x86.Feature{ + .clzero, + .rdpru, + .wbnoinvd, + }) |feat| { setFeature(cpu, feat, false); } } @@ -444,6 +484,7 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { setFeature(cpu, .rtm, bit(leaf.ebx, 11)); // AVX512 is only supported if the OS supports the context save for it. setFeature(cpu, .avx512f, bit(leaf.ebx, 16) and has_avx512_save); + setFeature(cpu, .evex512, bit(leaf.ebx, 16) and has_avx512_save); setFeature(cpu, .avx512dq, bit(leaf.ebx, 17) and has_avx512_save); setFeature(cpu, .rdseed, bit(leaf.ebx, 18)); setFeature(cpu, .adx, bit(leaf.ebx, 19)); @@ -470,8 +511,8 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { setFeature(cpu, .avx512vnni, bit(leaf.ecx, 11) and has_avx512_save); setFeature(cpu, .avx512bitalg, bit(leaf.ecx, 12) and has_avx512_save); setFeature(cpu, .avx512vpopcntdq, bit(leaf.ecx, 14) and has_avx512_save); - setFeature(cpu, .avx512vp2intersect, bit(leaf.edx, 8) and has_avx512_save); setFeature(cpu, .rdpid, bit(leaf.ecx, 22)); + setFeature(cpu, .kl, bit(leaf.ecx, 23)); setFeature(cpu, .cldemote, bit(leaf.ecx, 25)); setFeature(cpu, .movdiri, bit(leaf.ecx, 27)); setFeature(cpu, .movdir64b, bit(leaf.ecx, 28)); @@ -487,32 +528,153 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { // leaves using cpuid, since that information is ignored while // detecting features using the "-march=native" flag. // For more info, see X86 ISA docs. - setFeature(cpu, .pconfig, bit(leaf.edx, 18)); setFeature(cpu, .uintr, bit(leaf.edx, 5)); + setFeature(cpu, .avx512vp2intersect, bit(leaf.edx, 8) and has_avx512_save); + setFeature(cpu, .serialize, bit(leaf.edx, 14)); + setFeature(cpu, .tsxldtrk, bit(leaf.edx, 16)); + setFeature(cpu, .pconfig, bit(leaf.edx, 18)); + setFeature(cpu, .amx_bf16, bit(leaf.edx, 22) and has_amx_save); + setFeature(cpu, .avx512fp16, bit(leaf.edx, 23) and has_avx512_save); + setFeature(cpu, .amx_tile, bit(leaf.edx, 24) and has_amx_save); + setFeature(cpu, .amx_int8, bit(leaf.edx, 25) and has_amx_save); - // TODO I feel unsure about this check. - // It doesn't really seem to check for 7.1, just for 7. - // Is this a sound assumption to make? - // Note that this is what other implementations do, so I kind of trust it. - const has_leaf_7_1 = max_level >= 7; - if (has_leaf_7_1) { + if (leaf.eax >= 1) { leaf = cpuid(0x7, 0x1); + + setFeature(cpu, .sha512, bit(leaf.eax, 0)); + setFeature(cpu, .sm3, bit(leaf.eax, 1)); + setFeature(cpu, .sm4, bit(leaf.eax, 2)); + setFeature(cpu, .raoint, bit(leaf.eax, 3)); + setFeature(cpu, .avxvnni, bit(leaf.eax, 4) and has_avx_save); setFeature(cpu, .avx512bf16, bit(leaf.eax, 5) and has_avx512_save); + setFeature(cpu, .cmpccxadd, bit(leaf.eax, 7)); + setFeature(cpu, .amx_fp16, bit(leaf.eax, 21) and has_amx_save); + setFeature(cpu, .hreset, bit(leaf.eax, 22)); + setFeature(cpu, .avxifma, bit(leaf.eax, 23) and has_avx_save); + + setFeature(cpu, .avxvnniint8, bit(leaf.edx, 4) and has_avx_save); + setFeature(cpu, .avxneconvert, bit(leaf.edx, 5) and has_avx_save); + setFeature(cpu, .amx_complex, bit(leaf.edx, 8) and has_amx_save); + setFeature(cpu, .avxvnniint16, bit(leaf.edx, 10) and has_avx_save); + setFeature(cpu, .prefetchi, bit(leaf.edx, 14)); + setFeature(cpu, .usermsr, bit(leaf.edx, 15)); + setFeature(cpu, .avx10_1_256, bit(leaf.edx, 19)); + // APX + setFeature(cpu, .egpr, bit(leaf.edx, 21)); + setFeature(cpu, .push2pop2, bit(leaf.edx, 21)); + setFeature(cpu, .ppx, bit(leaf.edx, 21)); + setFeature(cpu, .ndd, bit(leaf.edx, 21)); + setFeature(cpu, .ccmp, bit(leaf.edx, 21)); + setFeature(cpu, .cf, bit(leaf.edx, 21)); } else { - setFeature(cpu, .avx512bf16, false); + for ([_]Target.x86.Feature{ + .sha512, + .sm3, + .sm4, + .raoint, + .avxvnni, + .avx512bf16, + .cmpccxadd, + .amx_fp16, + .hreset, + .avxifma, + + .avxvnniint8, + .avxneconvert, + .amx_complex, + .avxvnniint16, + .prefetchi, + .usermsr, + .avx10_1_256, + .egpr, + .push2pop2, + .ppx, + .ndd, + .ccmp, + .cf, + }) |feat| { + setFeature(cpu, feat, false); + } } } else { for ([_]Target.x86.Feature{ - .fsgsbase, .sgx, .bmi, .avx2, - .bmi2, .invpcid, .rtm, .avx512f, - .avx512dq, .rdseed, .adx, .avx512ifma, - .clflushopt, .clwb, .avx512pf, .avx512er, - .avx512cd, .sha, .avx512bw, .avx512vl, - .prefetchwt1, .avx512vbmi, .pku, .waitpkg, - .avx512vbmi2, .shstk, .gfni, .vaes, - .vpclmulqdq, .avx512vnni, .avx512bitalg, .avx512vpopcntdq, - .avx512vp2intersect, .rdpid, .cldemote, .movdiri, - .movdir64b, .enqcmd, .pconfig, .avx512bf16, + .fsgsbase, + .sgx, + .bmi, + .avx2, + .smep, + .bmi2, + .invpcid, + .rtm, + .avx512f, + .evex512, + .avx512dq, + .rdseed, + .adx, + .smap, + .avx512ifma, + .clflushopt, + .clwb, + .avx512pf, + .avx512er, + .avx512cd, + .sha, + .avx512bw, + .avx512vl, + + .prefetchwt1, + .avx512vbmi, + .pku, + .waitpkg, + .avx512vbmi2, + .shstk, + .gfni, + .vaes, + .vpclmulqdq, + .avx512vnni, + .avx512bitalg, + .avx512vpopcntdq, + .rdpid, + .kl, + .cldemote, + .movdiri, + .movdir64b, + .enqcmd, + + .uintr, + .avx512vp2intersect, + .serialize, + .tsxldtrk, + .pconfig, + .amx_bf16, + .avx512fp16, + .amx_tile, + .amx_int8, + + .sha512, + .sm3, + .sm4, + .raoint, + .avxvnni, + .avx512bf16, + .cmpccxadd, + .amx_fp16, + .hreset, + .avxifma, + + .avxvnniint8, + .avxneconvert, + .amx_complex, + .avxvnniint16, + .prefetchi, + .usermsr, + .avx10_1_256, + .egpr, + .push2pop2, + .ppx, + .ndd, + .ccmp, + .cf, }) |feat| { setFeature(cpu, feat, false); } @@ -520,21 +682,55 @@ fn detectNativeFeatures(cpu: *Target.Cpu, os_tag: Target.Os.Tag) void { if (max_level >= 0xD and has_avx_save) { leaf = cpuid(0xD, 0x1); + // Only enable XSAVE if OS has enabled support for saving YMM state. setFeature(cpu, .xsaveopt, bit(leaf.eax, 0)); setFeature(cpu, .xsavec, bit(leaf.eax, 1)); setFeature(cpu, .xsaves, bit(leaf.eax, 3)); } else { - for ([_]Target.x86.Feature{ .xsaveopt, .xsavec, .xsaves }) |feat| { + for ([_]Target.x86.Feature{ + .xsaveopt, + .xsavec, + .xsaves, + }) |feat| { setFeature(cpu, feat, false); } } if (max_level >= 0x14) { leaf = cpuid(0x14, 0); + setFeature(cpu, .ptwrite, bit(leaf.ebx, 4)); } else { - setFeature(cpu, .ptwrite, false); + for ([_]Target.x86.Feature{ + .ptwrite, + }) |feat| { + setFeature(cpu, feat, false); + } + } + + if (max_level >= 0x19) { + leaf = cpuid(0x19, 0); + + setFeature(cpu, .widekl, bit(leaf.ebx, 2)); + } else { + for ([_]Target.x86.Feature{ + .widekl, + }) |feat| { + setFeature(cpu, feat, false); + } + } + + if (max_level >= 0x24) { + leaf = cpuid(0x24, 0); + + setFeature(cpu, .avx10_1_512, bit(leaf.ebx, 18)); + } else { + for ([_]Target.x86.Feature{ + .avx10_1_512, + }) |feat| { + setFeature(cpu, feat, false); + } } } diff --git a/lib/std/zon/stringify.zig b/lib/std/zon/stringify.zig index 26ca751319..8682fdc5f4 100644 --- a/lib/std/zon/stringify.zig +++ b/lib/std/zon/stringify.zig @@ -627,10 +627,16 @@ pub fn Serializer(Writer: type) type { return self.writer.writeAll("inf"); } else if (std.math.isNegativeInf(val)) { return self.writer.writeAll("-inf"); + } else if (std.math.isNegativeZero(val)) { + return self.writer.writeAll("-0.0"); + } else { + try std.fmt.format(self.writer, "{d}", .{val}); + }, + .comptime_float => if (val == 0) { + return self.writer.writeAll("0"); } else { try std.fmt.format(self.writer, "{d}", .{val}); }, - .comptime_float => try std.fmt.format(self.writer, "{d}", .{val}), else => comptime unreachable, } } @@ -2103,10 +2109,11 @@ test "std.zon stringify primitives" { \\ .b = 0.3333333333333333333333333333333333, \\ .c = 3.1415926535897932384626433832795028, \\ .d = 0, - \\ .e = -0, - \\ .f = inf, - \\ .g = -inf, - \\ .h = nan, + \\ .e = 0, + \\ .f = -0.0, + \\ .g = inf, + \\ .h = -inf, + \\ .i = nan, \\} , .{ @@ -2115,9 +2122,10 @@ test "std.zon stringify primitives" { .c = std.math.pi, .d = 0.0, .e = -0.0, - .f = std.math.inf(f32), - .g = -std.math.inf(f32), - .h = std.math.nan(f32), + .f = @as(f128, -0.0), + .g = std.math.inf(f32), + .h = -std.math.inf(f32), + .i = std.math.nan(f32), }, .{}, ); diff --git a/src/Air.zig b/src/Air.zig index e323259cbe..39f9bd783d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1656,6 +1656,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[@intFromEnum(inst)]; return switch (air.instructions.items(.tag)[@intFromEnum(inst)]) { .arg, + .assembly, .block, .loop, .repeat, @@ -1798,12 +1799,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .cmp_vector_optimized, .is_null, .is_non_null, - .is_null_ptr, - .is_non_null_ptr, .is_err, .is_non_err, - .is_err_ptr, - .is_non_err_ptr, .bool_and, .bool_or, .fptrunc, @@ -1816,7 +1813,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .unwrap_errunion_payload, .unwrap_errunion_err, .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, .wrap_errunion_payload, .wrap_errunion_err, .struct_field_ptr, @@ -1861,17 +1857,13 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .work_group_id, => false, - .assembly => { - const extra = air.extraData(Air.Asm, data.ty_pl.payload); - const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; - return is_volatile or if (extra.data.outputs_len == 1) - @as(Air.Inst.Ref, @enumFromInt(air.extra[extra.end])) != .none - else - extra.data.outputs_len > 1; - }, - .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .is_non_null_ptr, .is_null_ptr, .is_non_err_ptr, .is_err_ptr => air.typeOf(data.un_op, ip).isVolatilePtrIp(ip), + .load, .unwrap_errunion_err_ptr => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), - .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), + .atomic_load => switch (data.atomic_load.order) { + .unordered, .monotonic => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), + else => true, // Stronger memory orderings have inter-thread side effects. + }, }; } diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index 79760ea24d..2144ce2316 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -439,6 +439,7 @@ fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool { pub fn checkVal(val: Value, zcu: *Zcu) bool { const ty = val.typeOf(zcu); if (!checkType(ty, zcu)) return false; + if (val.isUndef(zcu)) return true; if (ty.toIntern() == .type_type and !checkType(val.toType(), zcu)) return false; // Check for lazy values switch (zcu.intern_pool.indexToKey(val.toIntern())) { diff --git a/src/Compilation.zig b/src/Compilation.zig index 12221ba3dc..74ddc30a5a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -259,8 +259,6 @@ crt_files: std.StringHashMapUnmanaged(CrtFile) = .empty, /// Null means only show snippet on first error. reference_trace: ?u32 = null, -libcxx_abi_version: libcxx.AbiVersion = libcxx.AbiVersion.default, - /// This mutex guards all `Compilation` mutable state. /// Disabled in single-threaded mode because the thread pool spawns in the same thread. mutex: if (builtin.single_threaded) struct { @@ -827,7 +825,7 @@ pub const MiscTask = enum { @"mingw-w64 crt2.o", @"mingw-w64 dllcrt2.o", - @"mingw-w64 mingw32.lib", + @"mingw-w64 libmingw32.lib", }; pub const MiscError = struct { @@ -1172,7 +1170,6 @@ pub const CreateOptions = struct { force_load_objc: bool = false, /// Whether local symbols should be discarded from the symbol table. discard_local_symbols: bool = false, - libcxx_abi_version: libcxx.AbiVersion = libcxx.AbiVersion.default, /// (Windows) PDB source path prefix to instruct the linker how to resolve relative /// paths when consolidating CodeView streams into a single PDB file. pdb_source_path: ?[]const u8 = null, @@ -1512,7 +1509,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .emit_asm = options.emit_asm, .emit_llvm_ir = options.emit_llvm_ir, .emit_llvm_bc = options.emit_llvm_bc, - .work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).array.len, + .work_queues = @splat(.init(gpa)), .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa) else .{}, .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), @@ -1545,7 +1542,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .debug_compiler_runtime_libs = options.debug_compiler_runtime_libs, .debug_compile_errors = options.debug_compile_errors, .incremental = options.incremental, - .libcxx_abi_version = options.libcxx_abi_version, .root_name = root_name, .sysroot = sysroot, .windows_libs = windows_libs, @@ -1886,7 +1882,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil const main_crt_file: mingw.CrtFile = if (is_dyn_lib) .dllcrt2_o else .crt2_o; comp.queued_jobs.mingw_crt_file[@intFromEnum(main_crt_file)] = true; - comp.queued_jobs.mingw_crt_file[@intFromEnum(mingw.CrtFile.mingw32_lib)] = true; + comp.queued_jobs.mingw_crt_file[@intFromEnum(mingw.CrtFile.libmingw32_lib)] = true; comp.remaining_prelink_tasks += 2; // When linking mingw-w64 there are some import libs we always need. @@ -2129,7 +2125,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { const is_hit = man.hit() catch |err| switch (err) { error.CacheCheckFailed => switch (man.diagnostic) { .none => unreachable, - .manifest_create, .manifest_read, .manifest_lock => |e| return comp.setMiscFailure( + .manifest_create, .manifest_read, .manifest_lock, .manifest_seek => |e| return comp.setMiscFailure( .check_whole_cache, "failed to check cache: {s} {s}", .{ @tagName(man.diagnostic), @errorName(e) }, @@ -2261,7 +2257,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { zcu.compile_log_text.shrinkAndFree(gpa, 0); - zcu.skip_analysis_errors = false; + zcu.skip_analysis_this_update = false; // Make sure std.zig is inside the import_table. We unconditionally need // it for start.zig. @@ -2336,6 +2332,17 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { const pt: Zcu.PerThread = .activate(zcu, .main); defer pt.deactivate(); + if (!zcu.skip_analysis_this_update) { + if (comp.config.is_test) { + // The `test_functions` decl has been intentionally postponed until now, + // at which point we must populate it with the list of test functions that + // have been discovered and not filtered out. + try pt.populateTestFunctions(main_progress_node); + } + + try pt.processExports(); + } + if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { std.debug.print("intern pool stats for '{s}':\n", .{ comp.root_name, @@ -2350,15 +2357,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { }); zcu.intern_pool.dumpGenericInstances(gpa); } - - if (comp.config.is_test) { - // The `test_functions` decl has been intentionally postponed until now, - // at which point we must populate it with the list of test functions that - // have been discovered and not filtered out. - try pt.populateTestFunctions(main_progress_node); - } - - try pt.processExports(); } if (anyErrors(comp)) { @@ -3310,7 +3308,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } } - if (zcu.skip_analysis_errors) break :zcu_errors; + if (zcu.skip_analysis_this_update) break :zcu_errors; var sorted_failed_analysis: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, *Zcu.ErrorMsg).DataList.Slice = s: { const SortOrder = struct { zcu: *Zcu, @@ -3446,7 +3444,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try comp.link_diags.addMessagesToBundle(&bundle, comp.bin_file); if (comp.zcu) |zcu| { - if (!zcu.skip_analysis_errors and bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + if (!zcu.skip_analysis_this_update and bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src(); @@ -3957,7 +3955,7 @@ fn performAllTheWorkInner( // However, this means our analysis data is invalid, so we want to omit all analysis errors. assert(zcu.failed_files.count() > 0); // we will get an error - zcu.skip_analysis_errors = true; + zcu.skip_analysis_this_update = true; return; } @@ -5115,11 +5113,13 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr } // Just to save disk space, we delete the files that are never needed again. - defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(std.fs.path.basename(diag_file_path)) catch |err| { - log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }); + defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(std.fs.path.basename(diag_file_path)) catch |err| switch (err) { + error.FileNotFound => {}, // the file wasn't created due to an error we reported + else => log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }), }; - defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(std.fs.path.basename(dep_file_path)) catch |err| { - log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }); + defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(std.fs.path.basename(dep_file_path)) catch |err| switch (err) { + error.FileNotFound => {}, // the file wasn't created due to an error we reported + else => log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }), }; if (std.process.can_spawn) { var child = std.process.Child.init(argv.items, arena); @@ -5626,12 +5626,40 @@ pub fn addCCArgs( const llvm_triple = try @import("codegen/llvm.zig").targetTriple(arena, target); try argv.appendSlice(&[_][]const u8{ "-target", llvm_triple }); + switch (target.os.tag) { + .ios, .macos, .tvos, .watchos => |os| { + try argv.ensureUnusedCapacity(2); + // Pass the proper -m-version-min argument for darwin. + const ver = target.os.version_range.semver.min; + argv.appendAssumeCapacity(try std.fmt.allocPrint(arena, "-m{s}{s}-version-min={d}.{d}.{d}", .{ + @tagName(os), + switch (target.abi) { + .simulator => "-simulator", + else => "", + }, + ver.major, + ver.minor, + ver.patch, + })); + // This avoids a warning that sometimes occurs when + // providing both a -target argument that contains a + // version as well as the -mmacosx-version-min argument. + // Zig provides the correct value in both places, so it + // doesn't matter which one gets overridden. + argv.appendAssumeCapacity("-Wno-overriding-option"); + }, + else => {}, + } + if (target.cpu.arch.isArm()) { try argv.append(if (target.cpu.arch.isThumb()) "-mthumb" else "-mno-thumb"); } if (target_util.llvmMachineAbi(target)) |mabi| { - try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); + // Clang's integrated Arm assembler doesn't support `-mabi` yet... + if (!(target.cpu.arch.isArm() and (ext == .assembly or ext == .assembly_with_cpp))) { + try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); + } } // We might want to support -mfloat-abi=softfp for Arm and CSKY here in the future. @@ -5743,6 +5771,19 @@ pub fn addCCArgs( try argv.append("-D_SOFT_DOUBLE"); } + switch (mod.optimize_mode) { + .Debug => { + // windows c runtime requires -D_DEBUG if using debug libraries + try argv.append("-D_DEBUG"); + }, + .ReleaseSafe => { + try argv.append("-D_FORTIFY_SOURCE=2"); + }, + .ReleaseFast, .ReleaseSmall => { + try argv.append("-DNDEBUG"); + }, + } + if (comp.config.link_libc) { if (target.isGnuLibC()) { const target_version = target.os.versionRange().gnuLibCVersion().?; @@ -5786,11 +5827,12 @@ pub fn addCCArgs( // See the comment in libcxx.zig for more details about this. try argv.append("-D_LIBCPP_PSTL_BACKEND_SERIAL"); + const abi_version: u2 = if (target.os.tag == .emscripten) 2 else 1; try argv.append(try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_VERSION={d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, })); try argv.append(try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_NAMESPACE=__{d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, })); try argv.append(libcxx.hardeningModeFlag(mod.optimize_mode)); @@ -5835,6 +5877,32 @@ pub fn addCCArgs( } } + // Only C-family files support these flags. + switch (ext) { + .c, + .h, + .cpp, + .hpp, + .m, + .hm, + .mm, + .hmm, + => { + try argv.append("-fno-spell-checking"); + + if (target.os.tag == .windows and target.abi.isGnu()) { + // windows.h has files such as pshpack1.h which do #pragma packing, + // triggering a clang warning. So for this target, we disable this warning. + try argv.append("-Wno-pragma-pack"); + } + + if (mod.optimize_mode != .Debug) { + try argv.append("-Werror=date-time"); + } + }, + else => {}, + } + // Only assembly files support these flags. switch (ext) { .assembly, @@ -5909,7 +5977,7 @@ pub fn addCCArgs( else => {}, } - // Only C-family files support these flags. + // Only compiled files support these flags. switch (ext) { .c, .h, @@ -5919,9 +5987,9 @@ pub fn addCCArgs( .hm, .mm, .hmm, + .ll, + .bc, => { - try argv.append("-fno-spell-checking"); - if (target_util.clangSupportsTargetCpuArg(target)) { if (target.cpu.model.llvm_name) |llvm_name| { try argv.appendSlice(&[_][]const u8{ @@ -5941,6 +6009,10 @@ pub fn addCCArgs( // We communicate float ABI to Clang through the dedicated options further down. if (std.mem.eql(u8, llvm_name, "soft-float")) continue; + // Ignore these until we figure out how to handle the concept of omitting features. + // See https://github.com/ziglang/zig/issues/23539 + if (target_util.isDynamicAMDGCNFeature(target, feature)) continue; + argv.appendSliceAssumeCapacity(&[_][]const u8{ "-Xclang", "-target-feature", "-Xclang" }); const plus_or_minus = "-+"[@intFromBool(is_enabled)]; const arg = try std.fmt.allocPrint(arena, "{c}{s}", .{ plus_or_minus, llvm_name }); @@ -5948,48 +6020,6 @@ pub fn addCCArgs( } } - switch (target.os.tag) { - .windows => { - // windows.h has files such as pshpack1.h which do #pragma packing, - // triggering a clang warning. So for this target, we disable this warning. - if (target.abi.isGnu()) { - try argv.append("-Wno-pragma-pack"); - } - }, - .macos => { - try argv.ensureUnusedCapacity(2); - // Pass the proper -m-version-min argument for darwin. - const ver = target.os.version_range.semver.min; - argv.appendAssumeCapacity(try std.fmt.allocPrint(arena, "-mmacos-version-min={d}.{d}.{d}", .{ - ver.major, ver.minor, ver.patch, - })); - // This avoids a warning that sometimes occurs when - // providing both a -target argument that contains a - // version as well as the -mmacosx-version-min argument. - // Zig provides the correct value in both places, so it - // doesn't matter which one gets overridden. - argv.appendAssumeCapacity("-Wno-overriding-option"); - }, - .ios => switch (target.cpu.arch) { - // Pass the proper -m-version-min argument for darwin. - .x86, .x86_64 => { - const ver = target.os.version_range.semver.min; - try argv.append(try std.fmt.allocPrint( - arena, - "-m{s}-simulator-version-min={d}.{d}.{d}", - .{ @tagName(target.os.tag), ver.major, ver.minor, ver.patch }, - )); - }, - else => { - const ver = target.os.version_range.semver.min; - try argv.append(try std.fmt.allocPrint(arena, "-m{s}-version-min={d}.{d}.{d}", .{ - @tagName(target.os.tag), ver.major, ver.minor, ver.patch, - })); - }, - }, - else => {}, - } - { var san_arg: std.ArrayListUnmanaged(u8) = .empty; const prefix = "-fsanitize="; @@ -6026,17 +6056,21 @@ pub fn addCCArgs( // function was called. try argv.append("-fno-sanitize=function"); - // It's recommended to use the minimal runtime in production environments - // due to the security implications of the full runtime. The minimal runtime - // doesn't provide much benefit over simply trapping. - if (mod.optimize_mode == .ReleaseSafe) { + // If we want to sanitize C, but the ubsan runtime has been turned off, + // we'll switch to just trapping. + if (comp.ubsan_rt_strat == .none or mod.optimize_mode == .ReleaseSafe) { + // It's recommended to use the minimal runtime in production + // environments due to the security implications of the full runtime. + // The minimal runtime doesn't provide much benefit over simply + // trapping, however, so we do that instead. try argv.append("-fsanitize-trap=undefined"); - } - - // This is necessary because, by default, Clang instructs LLVM to embed a COFF link - // dependency on `libclang_rt.ubsan_standalone.a` when the UBSan runtime is used. - if (target.os.tag == .windows) { - try argv.append("-fno-rtlib-defaultlib"); + } else { + // This is necessary because, by default, Clang instructs LLVM to embed + // a COFF link dependency on `libclang_rt.ubsan_standalone.a` when the + // UBSan runtime is used. + if (target.os.tag == .windows) { + try argv.append("-fno-rtlib-defaultlib"); + } } } } @@ -6048,8 +6082,6 @@ pub fn addCCArgs( switch (mod.optimize_mode) { .Debug => { - // windows c runtime requires -D_DEBUG if using debug libraries - try argv.append("-D_DEBUG"); // Clang has -Og for compatibility with GCC, but currently it is just equivalent // to -O1. Besides potentially impairing debugging, -O1/-Og significantly // increases compile times. @@ -6059,10 +6091,8 @@ pub fn addCCArgs( // See the comment in the BuildModeFastRelease case for why we pass -O2 rather // than -O3 here. try argv.append("-O2"); - try argv.append("-D_FORTIFY_SOURCE=2"); }, .ReleaseFast => { - try argv.append("-DNDEBUG"); // Here we pass -O2 rather than -O3 because, although we do the equivalent of // -O3 in Zig code, the justification for the difference here is that Zig // has better detection and prevention of undefined behavior, so -O3 is safer for @@ -6071,14 +6101,9 @@ pub fn addCCArgs( try argv.append("-O2"); }, .ReleaseSmall => { - try argv.append("-DNDEBUG"); try argv.append("-Os"); }, } - - if (mod.optimize_mode != .Debug) { - try argv.append("-Werror=date-time"); - } }, else => {}, } diff --git a/src/InternPool.zig b/src/InternPool.zig index ab264d0e86..799d200c81 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -9443,7 +9443,7 @@ pub fn getFuncInstanceIes( try items.ensureUnusedCapacity(4); const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); - const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(arg.generic_owner).ty).func_type; + const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(generic_owner).ty).func_type; // The strategy here is to add the function decl unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 0b23d5d5bd..95961a893a 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1850,7 +1850,11 @@ const FileHeader = struct { return magic_number == std.macho.MH_MAGIC or magic_number == std.macho.MH_MAGIC_64 or magic_number == std.macho.FAT_MAGIC or - magic_number == std.macho.FAT_MAGIC_64; + magic_number == std.macho.FAT_MAGIC_64 or + magic_number == std.macho.MH_CIGAM or + magic_number == std.macho.MH_CIGAM_64 or + magic_number == std.macho.FAT_CIGAM or + magic_number == std.macho.FAT_CIGAM_64; } pub fn isExecutable(self: *FileHeader) bool { @@ -1875,6 +1879,11 @@ test FileHeader { h.bytes_read = 0; h.update(&macho64_magic_bytes); try std.testing.expect(h.isExecutable()); + + const macho64_cigam_bytes = [_]u8{ 0xFE, 0xED, 0xFA, 0xCF }; + h.bytes_read = 0; + h.update(&macho64_cigam_bytes); + try std.testing.expect(h.isExecutable()); } // Result of the `unpackResource` operation. Enables collecting errors from diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 0dec7bde76..508880d8b2 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -331,6 +331,10 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { // Append disabled features after enabled ones, so that their effects aren't overwritten. for (target.cpu.arch.allFeaturesList()) |feature| { if (feature.llvm_name) |llvm_name| { + // Ignore these until we figure out how to handle the concept of omitting features. + // See https://github.com/ziglang/zig/issues/23539 + if (target_util.isDynamicAMDGCNFeature(target, feature)) continue; + const is_enabled = target.cpu.features.isEnabled(feature.index); if (is_enabled) { diff --git a/src/Sema.zig b/src/Sema.zig index b30f42c2d7..d4284aa441 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3669,7 +3669,10 @@ fn indexablePtrLenOrNone( const zcu = pt.zcu; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize(zcu) == .many) return .none; + switch (operand_ty.ptrSize(zcu)) { + .many, .c => return .none, + .one, .slice => {}, + } const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, operand, field_name, src); } @@ -13524,7 +13527,7 @@ fn validateErrSetSwitch( seen_errors: *SwitchErrorSet, case_vals: *std.ArrayListUnmanaged(Air.Inst.Ref), operand_ty: Type, - inst_data: std.meta.FieldType(Zir.Inst.Data, .pl_node), + inst_data: @FieldType(Zir.Inst.Data, "pl_node"), scalar_cases_len: u32, multi_cases_len: u32, else_case: struct { body: []const Zir.Inst.Index, end: usize, src: LazySrcLoc }, @@ -17841,11 +17844,18 @@ fn zirThis( const zcu = pt.zcu; const namespace = pt.zcu.namespacePtr(block.namespace); - const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type); - - switch (pt.zcu.intern_pool.indexToKey(new_ty)) { - .struct_type, .union_type => try sema.declareDependency(.{ .interned = new_ty }), + switch (pt.zcu.intern_pool.indexToKey(namespace.owner_type)) { + .opaque_type => { + // Opaque types are never outdated since they don't undergo type resolution, so nothing to do! + return Air.internedToRef(namespace.owner_type); + }, + .struct_type, .union_type => { + const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type); + try sema.declareDependency(.{ .interned = new_ty }); + return Air.internedToRef(new_ty); + }, .enum_type => { + const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type); try sema.declareDependency(.{ .interned = new_ty }); // Since this is an enum, it has to be resolved immediately. // `ensureTypeUpToDate` has resolved the new type if necessary. @@ -17854,11 +17864,10 @@ fn zirThis( if (zcu.failed_analysis.contains(ty_unit) or zcu.transitive_failed_analysis.contains(ty_unit)) { return error.AnalysisFail; } + return Air.internedToRef(new_ty); }, - .opaque_type => {}, else => unreachable, } - return Air.internedToRef(new_ty); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -20284,11 +20293,41 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is const zcu = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); + // Generic poison means this is an untyped anonymous empty struct/array init - const ty_operand = try sema.resolveTypeOrPoison(block, src, inst_data.operand) orelse return .empty_tuple; + const ty_operand = try sema.resolveTypeOrPoison(block, src, inst_data.operand) orelse { + if (is_byref) { + return sema.uavRef(.empty_tuple); + } else { + return .empty_tuple; + } + }; + const init_ty = if (is_byref) ty: { const ptr_ty = ty_operand.optEuBaseType(zcu); assert(ptr_ty.zigTypeTag(zcu) == .pointer); // validated by a previous instruction + switch (ptr_ty.ptrSize(zcu)) { + // Use a zero-length array for a slice or many-ptr result + .slice, .many => break :ty try pt.arrayType(.{ + .len = 0, + .child = ptr_ty.childType(zcu).toIntern(), + .sentinel = if (ptr_ty.sentinel(zcu)) |s| s.toIntern() else .none, + }), + // Just use the child type for a single-pointer or C-pointer result + .one, .c => { + const child = ptr_ty.childType(zcu); + if (child.toIntern() == .anyopaque_type) { + // ...unless that child is anyopaque, in which case this is equivalent to an untyped init. + // `.{}` is an empty tuple. + if (is_byref) { + return sema.uavRef(.empty_tuple); + } else { + return .empty_tuple; + } + } + break :ty child; + }, + } if (!ptr_ty.isSlice(zcu)) { break :ty ptr_ty.childType(zcu); } @@ -23199,8 +23238,16 @@ fn ptrFromIntVal( const addr = try operand_val.toUnsignedIntSema(pt); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(pt)}); - if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) - return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)}); + if (addr != 0 and ptr_align != .none) { + const masked_addr = if (ptr_ty.childType(zcu).fnPtrMaskOrNull(zcu)) |mask| + addr & mask + else + addr; + + if (!ptr_align.check(masked_addr)) { + return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)}); + } + } return switch (ptr_ty.zigTypeTag(zcu)) { .optional => Value.fromInterned(try pt.intern(.{ .opt = .{ @@ -23452,11 +23499,14 @@ fn ptrCastFull( if (src_slice_like_elem.comptimeOnly(zcu) or dest_elem.comptimeOnly(zcu)) { return sema.fail(block, src, "cannot infer length of slice of '{}' from slice of '{}'", .{ dest_elem.fmt(pt), src_slice_like_elem.fmt(pt) }); } - const src_elem_size = src_slice_like_elem.abiSize(zcu); + // It's okay for `src_slice_like_elem` to be 0-bit; the resulting slice will just always have 0 elements. + // However, `dest_elem` can't be 0-bit. If it were, then either the source slice has 0 bits and we don't + // know how what `result.len` should be, or the source has >0 bits and there is no valid `result.len`. const dest_elem_size = dest_elem.abiSize(zcu); - if (src_elem_size == 0 or dest_elem_size == 0) { + if (dest_elem_size == 0) { return sema.fail(block, src, "cannot infer length of slice of '{}' from slice of '{}'", .{ dest_elem.fmt(pt), src_slice_like_elem.fmt(pt) }); } + const src_elem_size = src_slice_like_elem.abiSize(zcu); break :need_len_change src_elem_size != dest_elem_size; } else false; @@ -23713,7 +23763,12 @@ fn ptrCastFull( if (dest_align.compare(.gt, src_align)) { if (try ptr_val.getUnsignedIntSema(pt)) |addr| { - if (!dest_align.check(addr)) { + const masked_addr = if (Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu)) |mask| + addr & mask + else + addr; + + if (!dest_align.check(masked_addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align.toByteUnits().?, @@ -26314,12 +26369,14 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var info = dest_ty.ptrInfo(zcu); info.flags.size = .one; info.child = array_ty.toIntern(); + info.sentinel = .none; break :info info; }); const src_array_ptr_ty = try pt.ptrType(info: { var info = src_ty.ptrInfo(zcu); info.flags.size = .one; info.child = array_ty.toIntern(); + info.sentinel = .none; break :info info; }); @@ -26600,13 +26657,12 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :cc .auto; }; - const ret_ty: Type = if (extra.data.bits.ret_ty_is_generic) - .generic_poison - else if (extra.data.bits.has_ret_ty_body) blk: { + const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; + if (extra.data.bits.ret_ty_is_generic) break :blk .generic_poison; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{ .simple = .function_ret_ty }); const ty = val.toType(); @@ -26614,6 +26670,8 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; + if (extra.data.bits.ret_ty_is_generic) break :blk .generic_poison; + const ret_ty_air_ref = try sema.resolveInst(ret_ty_ref); const ret_ty_val = try sema.resolveConstDefinedValue(block, ret_src, ret_ty_air_ref, .{ .simple = .function_ret_ty }); break :blk ret_ty_val.toType(); @@ -28524,12 +28582,17 @@ fn structFieldPtrByIndex( const zcu = pt.zcu; const ip = &zcu.intern_pool; - if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, pt); - return Air.internedToRef(val.toIntern()); + const struct_type = zcu.typeToStruct(struct_ty).?; + const field_is_comptime = struct_type.fieldIsComptime(ip, field_index); + + // Comptime fields are handled later + if (!field_is_comptime) { + if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { + const val = try struct_ptr_val.ptrField(field_index, pt); + return Air.internedToRef(val.toIntern()); + } } - const struct_type = zcu.typeToStruct(struct_ty).?; const field_ty = struct_type.field_types.get(ip)[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu); @@ -28549,6 +28612,7 @@ fn structFieldPtrByIndex( try Type.fromInterned(struct_ptr_ty_info.child).abiAlignmentSema(pt); if (struct_type.layout == .@"packed") { + assert(!field_is_comptime); switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) { .bit_ptr => |packed_offset| { ptr_ty_data.flags.alignment = parent_align; @@ -28559,6 +28623,7 @@ fn structFieldPtrByIndex( }, } } else if (struct_type.layout == .@"extern") { + assert(!field_is_comptime); // For extern structs, field alignment might be bigger than type's // natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the // second field is aligned as u32. @@ -28582,7 +28647,7 @@ fn structFieldPtrByIndex( const ptr_field_ty = try pt.ptrTypeSema(ptr_ty_data); - if (struct_type.fieldIsComptime(ip, field_index)) { + if (field_is_comptime) { try struct_ty.resolveStructFieldInits(pt); const val = try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), @@ -28979,6 +29044,14 @@ fn elemPtrOneLayerOnly( } const result_ty = try indexable_ty.elemPtrType(null, pt); + try sema.validateRuntimeElemAccess(block, elem_index_src, result_ty, indexable_ty, indexable_src); + try sema.validateRuntimeValue(block, indexable_src, indexable); + + if (!try result_ty.childType(zcu).hasRuntimeBitsIgnoreComptimeSema(pt)) { + // zero-bit child type; just bitcast the pointer + return block.addBitCast(result_ty, indexable); + } + return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .one => { @@ -29107,7 +29180,8 @@ fn tupleFieldPtr( const pt = sema.pt; const zcu = pt.zcu; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(zcu); + const tuple_ptr_info = tuple_ptr_ty.ptrInfo(zcu); + const tuple_ty: Type = .fromInterned(tuple_ptr_info.child); try tuple_ty.resolveFields(pt); const field_count = tuple_ty.structFieldCount(zcu); @@ -29125,9 +29199,16 @@ fn tupleFieldPtr( const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ - .is_const = !tuple_ptr_ty.ptrIsMutable(zcu), - .is_volatile = tuple_ptr_ty.isVolatilePtr(zcu), - .address_space = tuple_ptr_ty.ptrAddressSpace(zcu), + .is_const = tuple_ptr_info.flags.is_const, + .is_volatile = tuple_ptr_info.flags.is_volatile, + .address_space = tuple_ptr_info.flags.address_space, + .alignment = a: { + if (tuple_ptr_info.flags.alignment == .none) break :a .none; + // The tuple pointer isn't naturally aligned, so the field pointer might be underaligned. + const tuple_align = tuple_ptr_info.flags.alignment; + const field_align = try field_ty.abiAlignmentSema(pt); + break :a tuple_align.min(field_align); + }, }, }); @@ -29439,6 +29520,11 @@ fn elemPtrSlice( const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op); } + if (!try slice_ty.childType(zcu).hasRuntimeBitsIgnoreComptimeSema(pt)) { + // zero-bit child type; just extract the pointer and bitcast it + const slice_ptr = try block.addTyOp(.slice_ptr, slice_ty.slicePtrFieldType(zcu), slice); + return block.addBitCast(elem_ptr_ty, slice_ptr); + } return block.addSliceElemPtr(slice, elem_index, elem_ptr_ty); } @@ -30995,20 +31081,17 @@ fn coerceInMemoryAllowedFns( } }; } - switch (src_param_ty.toIntern()) { - .generic_poison_type => {}, - else => { - // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, dest_is_mut, target, dest_src, src_src, null); - if (param != .ok) { - return .{ .fn_param = .{ - .child = try param.dupe(sema.arena), - .actual = src_param_ty, - .wanted = dest_param_ty, - .index = param_i, - } }; - } - }, + if (!src_param_ty.isGenericPoison() and !dest_param_ty.isGenericPoison()) { + // Note: Cast direction is reversed here. + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, dest_is_mut, target, dest_src, src_src, null); + if (param != .ok) { + return .{ .fn_param = .{ + .child = try param.dupe(sema.arena), + .actual = src_param_ty, + .wanted = dest_param_ty, + .index = param_i, + } }; + } } } @@ -35485,7 +35568,15 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { offsets[i] = @intCast(aligns[i].forward(offset)); offset = offsets[i] + sizes[i]; } - struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align); + const size = std.math.cast(u32, big_align.forward(offset)) orelse { + const msg = try sema.errMsg( + ty.srcLoc(zcu), + "struct layout requires size {d}, this compiler implementation supports up to {d}", + .{ big_align.forward(offset), std.math.maxInt(u32) }, + ); + return sema.failWithOwnedErrorMsg(null, msg); + }; + struct_type.setLayoutResolved(ip, size, big_align); _ = try ty.comptimeOnlySema(pt); } @@ -35760,7 +35851,15 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { break :layout .{ size, max_align.max(tag_align), padding }; } else .{ max_align.forward(max_size), max_align, 0 }; - union_type.setHaveLayout(ip, @intCast(size), padding, alignment); + const casted_size = std.math.cast(u32, size) orelse { + const msg = try sema.errMsg( + ty.srcLoc(pt.zcu), + "union layout requires size {d}, this compiler implementation supports up to {d}", + .{ size, std.math.maxInt(u32) }, + ); + return sema.failWithOwnedErrorMsg(null, msg); + }; + union_type.setHaveLayout(ip, casted_size, padding, alignment); if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try ty.hasRuntimeBitsSema(pt))) { const msg = try sema.errMsg( @@ -36719,7 +36818,7 @@ fn unionFields( if (enum_index != field_i) { const msg = msg: { const enum_field_src: LazySrcLoc = .{ - .base_node_inst = tag_info.zir_index.unwrap().?, + .base_node_inst = Type.fromInterned(tag_ty).typeDeclInstAllowGeneratedTag(zcu).?, .offset = .{ .container_field_name = enum_index }, }; const msg = try sema.errMsg(name_src, "union field '{}' ordered differently than corresponding enum field", .{ @@ -38029,6 +38128,11 @@ fn compareScalar( const pt = sema.pt; const coerced_lhs = try pt.getCoerced(lhs, ty); const coerced_rhs = try pt.getCoerced(rhs, ty); + + // Equality comparisons of signed zero and NaN need to use floating point semantics + if (coerced_lhs.isFloat(pt.zcu) or coerced_rhs.isFloat(pt.zcu)) + return Value.compareHeteroSema(coerced_lhs, op, coerced_rhs, pt); + switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index ceddb9457d..2e21c31f2b 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -65,6 +65,15 @@ pub fn storeComptimePtr( const zcu = pt.zcu; const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu); assert(store_val.typeOf(zcu).toIntern() == ptr_info.child); + + { + const store_ty: Type = .fromInterned(ptr_info.child); + if (!try store_ty.comptimeOnlySema(pt) and !try store_ty.hasRuntimeBitsIgnoreComptimeSema(pt)) { + // zero-bit store; nothing to do + return .success; + } + } + // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, diff --git a/src/Type.zig b/src/Type.zig index 3208cf522d..321730067d 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -1740,10 +1740,7 @@ pub fn bitSizeInner( const len = array_type.lenIncludingSentinel(); if (len == 0) return 0; const elem_ty = Type.fromInterned(array_type.child); - const elem_size = @max( - (try elem_ty.abiAlignmentInner(strat_lazy, zcu, tid)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar, - ); + const elem_size = (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar; if (elem_size == 0) return 0; const elem_bit_size = try elem_ty.bitSizeInner(strat, zcu, tid); return (len - 1) * 8 * elem_size + elem_bit_size; diff --git a/src/Value.zig b/src/Value.zig index be2c73c3e9..36ea0cfddb 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1132,6 +1132,8 @@ pub fn compareHeteroAdvanced( else => {}, } } + + if (lhs.isNan(zcu) or rhs.isNan(zcu)) return op == .neq; return (try orderAdvanced(lhs, rhs, strat, zcu, tid)).compare(op); } @@ -2675,7 +2677,7 @@ pub fn shlSatScalar( const shift: usize = @intCast(rhs.toUnsignedInt(zcu)); const limbs = try arena.alloc( std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, + std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = BigIntMutable{ .limbs = limbs, @@ -3777,6 +3779,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. + try aggregate_ty.resolveLayout(pt); const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); const field_align = a: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { diff --git a/src/Zcu.zig b/src/Zcu.zig index dbefec47ca..6cc30a9593 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -181,7 +181,10 @@ analysis_roots: std.BoundedArray(*Package.Module, 4) = .{}, /// Allocated into `gpa`. resolved_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null, -skip_analysis_errors: bool = false, +/// If `true`, then semantic analysis must not occur on this update due to AstGen errors. +/// Essentially the entire pipeline after AstGen, including Sema, codegen, and link, is skipped. +/// Reset to `false` at the start of each update in `Compilation.update`. +skip_analysis_this_update: bool = false, stage1_flags: packed struct { have_winmain: bool = false, @@ -2748,7 +2751,7 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir }, .{ .base = @ptrCast(zoir.limbs), - .len = zoir.limbs.len * 4, + .len = zoir.limbs.len * @sizeOf(std.math.big.Limb), }, .{ .base = zoir.string_bytes.ptr, @@ -3869,9 +3872,11 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv .unnamed_test => true, .@"test", .decltest => a: { const fqn_slice = nav.fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; - } else break :a false; + if (comp.test_filters.len > 0) { + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; + } else break :a false; + } break :a true; }, }; @@ -3881,7 +3886,10 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv @intFromEnum(inst_info.inst), }); try unit_queue.put(gpa, .wrap(.{ .nav_val = nav_id }), referencer); - try unit_queue.put(gpa, .wrap(.{ .func = nav.status.fully_resolved.val }), referencer); + // Non-fatal AstGen errors could mean this test decl failed + if (nav.status == .fully_resolved) { + try unit_queue.put(gpa, .wrap(.{ .func = nav.status.fully_resolved.val }), referencer); + } } } for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 699315835a..27eba39647 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -234,6 +234,7 @@ pub fn updateFile( error.FileTooBig => unreachable, // 0 is not too big else => |e| return e, }; + try cache_file.seekTo(0); if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; @@ -1444,6 +1445,8 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr break :ty .fromInterned(type_ref.toInterned().?); }; + try resolved_ty.resolveLayout(pt); + // In the case where the type is specified, this function is also responsible for resolving // the pointer modifiers, i.e. alignment, linksection, addrspace. const modifiers = try sema.resolveNavPtrModifiers(&block, zir_decl, inst_resolved.inst, resolved_ty); @@ -1705,7 +1708,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), - error.Overflow => { + error.Overflow, error.RelocationNotByteAligned => { try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, zcu.navSrcLoc(nav_index), @@ -3131,7 +3134,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error lf.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), - error.Overflow => { + error.Overflow, error.RelocationNotByteAligned => { try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, zcu.navSrcLoc(nav_index), diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index c66de9dd30..fd5c019b68 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1768,8 +1768,15 @@ fn finishAirBookkeeping(func: *Func) void { fn finishAirResult(func: *Func, inst: Air.Inst.Index, result: MCValue) void { if (func.liveness.isUnused(inst)) switch (result) { .none, .dead, .unreach => {}, - else => unreachable, // Why didn't the result die? + // Why didn't the result die? + .register => |r| if (r != .zero) unreachable, + else => unreachable, } else { + switch (result) { + .register => |r| if (r == .zero) unreachable, // Why did we discard a used result? + else => {}, + } + tracking_log.debug("%{d} => {} (birth)", .{ inst, result }); func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result)); // In some cases, an operand may be reused as the result. @@ -7728,9 +7735,12 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { const ptr_mcv = try func.resolveInst(atomic_load.ptr); const bit_size = elem_ty.bitSize(zcu); - if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{}); + if (bit_size > 64) return func.fail("TODO: airAtomicLoad > 64 bits", .{}); - const result_mcv = try func.allocRegOrMem(elem_ty, inst, true); + const result_mcv: MCValue = if (func.liveness.isUnused(inst)) + .{ .register = .zero } + else + try func.allocRegOrMem(elem_ty, inst, true); assert(result_mcv == .register); // should be less than 8 bytes if (order == .seq_cst) { @@ -7746,11 +7756,10 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { try func.load(result_mcv, ptr_mcv, ptr_ty); switch (order) { - // Don't guarnetee other memory operations to be ordered after the load. - .unordered => {}, - .monotonic => {}, - // Make sure all previous reads happen before any reading or writing accurs. - .seq_cst, .acquire => { + // Don't guarantee other memory operations to be ordered after the load. + .unordered, .monotonic => {}, + // Make sure all previous reads happen before any reading or writing occurs. + .acquire, .seq_cst => { _ = try func.addInst(.{ .tag = .fence, .data = .{ .fence = .{ @@ -7792,6 +7801,17 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr } try func.store(ptr_mcv, val_mcv, ptr_ty); + + if (order == .seq_cst) { + _ = try func.addInst(.{ + .tag = .fence, + .data = .{ .fence = .{ + .pred = .rw, + .succ = .rw, + } }, + }); + } + return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 641347bee1..c48673d526 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1398,11 +1398,22 @@ fn resolveCallingConventionValues( }, .wasm_mvp => { for (fn_info.param_types.get(ip)) |ty| { - const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu); - for (ty_classes) |class| { - if (class == .none) continue; - try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); - result.local_index += 1; + if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(zcu)) { + continue; + } + switch (abi.classifyType(.fromInterned(ty), zcu)) { + .direct => |scalar_ty| if (!abi.lowerAsDoubleI64(scalar_ty, zcu)) { + try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); + result.local_index += 1; + } else { + try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); + try args.append(.{ .local = .{ .value = result.local_index + 1, .references = 1 } }); + result.local_index += 2; + }, + .indirect => { + try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } }); + result.local_index += 1; + }, } } }, @@ -1418,14 +1429,13 @@ pub fn firstParamSRet( zcu: *const Zcu, target: *const std.Target, ) bool { + if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false; switch (cc) { .@"inline" => unreachable, .auto => return isByRef(return_type, zcu, target), - .wasm_mvp => { - const ty_classes = abi.classifyType(return_type, zcu); - if (ty_classes[0] == .indirect) return true; - if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; - return false; + .wasm_mvp => switch (abi.classifyType(return_type, zcu)) { + .direct => |scalar_ty| return abi.lowerAsDoubleI64(scalar_ty, zcu), + .indirect => return true, }, else => return false, } @@ -1439,26 +1449,19 @@ fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WV } const zcu = cg.pt.zcu; - const ty_classes = abi.classifyType(ty, zcu); - assert(ty_classes[0] != .none); - switch (ty.zigTypeTag(zcu)) { - .@"struct", .@"union" => { - if (ty_classes[0] == .indirect) { + + switch (abi.classifyType(ty, zcu)) { + .direct => |scalar_type| if (!abi.lowerAsDoubleI64(scalar_type, zcu)) { + if (!isByRef(ty, zcu, cg.target)) { return cg.lowerToStack(value); + } else { + switch (value) { + .nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0), + .dead => unreachable, + else => try cg.emitWValue(value), + } } - assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, zcu); - switch (value) { - .nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0), - .dead => unreachable, - else => try cg.emitWValue(value), - } - }, - .int, .float => { - if (ty_classes[1] == .none) { - return cg.lowerToStack(value); - } - assert(ty_classes[0] == .direct and ty_classes[1] == .direct); + } else { assert(ty.abiSize(zcu) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try cg.emitWValue(value); @@ -1466,7 +1469,7 @@ fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WV try cg.emitWValue(value); try cg.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 }); }, - else => return cg.lowerToStack(value), + .indirect => return cg.lowerToStack(value), } } @@ -2125,23 +2128,16 @@ fn airRet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (cg.return_value != .none) { try cg.store(cg.return_value, operand, ret_ty, 0); } else if (fn_info.cc == .wasm_mvp and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - switch (ret_ty.zigTypeTag(zcu)) { - // Aggregate types can be lowered as a singular value - .@"struct", .@"union" => { - const scalar_type = abi.scalarType(ret_ty, zcu); - try cg.emitWValue(operand); - const opcode = buildOpcode(.{ - .op = .load, - .width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)), - .signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, zcu, cg.target), - }); - try cg.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ - .offset = operand.offset(), - .alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?), - }); + switch (abi.classifyType(ret_ty, zcu)) { + .direct => |scalar_type| { + assert(!abi.lowerAsDoubleI64(scalar_type, zcu)); + if (!isByRef(ret_ty, zcu, cg.target)) { + try cg.emitWValue(operand); + } else { + _ = try cg.load(operand, scalar_type, 0); + } }, - else => try cg.emitWValue(operand), + .indirect => unreachable, } } else { if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) { @@ -2267,14 +2263,24 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie break :result_value .none; } else if (first_param_sret) { break :result_value sret; - // TODO: Make this less fragile and optimize - } else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_mvp and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") { - const result_local = try cg.allocLocal(ret_ty); - try cg.addLocal(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, zcu); - const result = try cg.allocStack(scalar_type); - try cg.store(result, result_local, scalar_type, 0); - break :result_value result; + } else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_mvp) { + switch (abi.classifyType(ret_ty, zcu)) { + .direct => |scalar_type| { + assert(!abi.lowerAsDoubleI64(scalar_type, zcu)); + if (!isByRef(ret_ty, zcu, cg.target)) { + const result_local = try cg.allocLocal(ret_ty); + try cg.addLocal(.local_set, result_local.local.value); + break :result_value result_local; + } else { + const result_local = try cg.allocLocal(ret_ty); + try cg.addLocal(.local_set, result_local.local.value); + const result = try cg.allocStack(ret_ty); + try cg.store(result, result_local, scalar_type, 0); + break :result_value result; + } + }, + .indirect => unreachable, + } } else { const result_local = try cg.allocLocal(ret_ty); try cg.addLocal(.local_set, result_local.local.value); @@ -2547,26 +2553,17 @@ fn airArg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const cc = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?.cc; const arg_ty = cg.typeOfIndex(inst); if (cc == .wasm_mvp) { - const arg_classes = abi.classifyType(arg_ty, zcu); - for (arg_classes) |class| { - if (class != .none) { + switch (abi.classifyType(arg_ty, zcu)) { + .direct => |scalar_ty| if (!abi.lowerAsDoubleI64(scalar_ty, zcu)) { cg.arg_index += 1; - } - } - - // When we have an argument that's passed using more than a single parameter, - // we combine them into a single stack value - if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag(zcu) != .int and arg_ty.zigTypeTag(zcu) != .float) { - return cg.fail( - "TODO: Implement C-ABI argument for type '{}'", - .{arg_ty.fmt(pt)}, - ); - } - const result = try cg.allocStack(arg_ty); - try cg.store(result, arg, Type.u64, 0); - try cg.store(result, cg.args[arg_index + 1], Type.u64, 8); - return cg.finishAir(inst, result, &.{}); + } else { + cg.arg_index += 2; + const result = try cg.allocStack(arg_ty); + try cg.store(result, arg, Type.u64, 0); + try cg.store(result, cg.args[arg_index + 1], Type.u64, 8); + return cg.finishAir(inst, result, &.{}); + }, + .indirect => cg.arg_index += 1, } } else { cg.arg_index += 1; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index d7ca4cf715..a1fa812649 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -13,70 +13,55 @@ const Zcu = @import("../../Zcu.zig"); /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. -pub const Class = enum { direct, indirect, none }; - -const none: [2]Class = .{ .none, .none }; -const memory: [2]Class = .{ .indirect, .none }; -const direct: [2]Class = .{ .direct, .none }; +pub const Class = union(enum) { + direct: Type, + indirect, +}; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. -/// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, zcu: *const Zcu) [2]Class { +pub fn classifyType(ty: Type, zcu: *const Zcu) Class { const ip = &zcu.intern_pool; - const target = zcu.getTarget(); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none; + assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); switch (ty.zigTypeTag(zcu)) { + .int, .@"enum", .error_set => return .{ .direct = ty }, + .float => return .{ .direct = ty }, + .bool => return .{ .direct = ty }, + .vector => return .{ .direct = ty }, + .array => return .indirect, + .optional => { + assert(ty.isPtrLikeOptional(zcu)); + return .{ .direct = ty }; + }, + .pointer => { + assert(!ty.isSlice(zcu)); + return .{ .direct = ty }; + }, .@"struct" => { const struct_type = zcu.typeToStruct(ty).?; if (struct_type.layout == .@"packed") { - if (ty.bitSize(zcu) <= 64) return direct; - return .{ .direct, .direct }; + return .{ .direct = ty }; } if (struct_type.field_types.len > 1) { // The struct type is non-scalar. - return memory; + return .indirect; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]); const explicit_align = struct_type.fieldAlign(ip, 0); if (explicit_align != .none) { if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(zcu))) - return memory; + return .indirect; } return classifyType(field_ty, zcu); }, - .int, .@"enum", .error_set => { - const int_bits = ty.intInfo(zcu).bits; - if (int_bits <= 64) return direct; - if (int_bits <= 128) return .{ .direct, .direct }; - return memory; - }, - .float => { - const float_bits = ty.floatBits(target); - if (float_bits <= 64) return direct; - if (float_bits <= 128) return .{ .direct, .direct }; - return memory; - }, - .bool => return direct, - .vector => return direct, - .array => return memory, - .optional => { - assert(ty.isPtrLikeOptional(zcu)); - return direct; - }, - .pointer => { - assert(!ty.isSlice(zcu)); - return direct; - }, .@"union" => { const union_obj = zcu.typeToUnion(ty).?; if (union_obj.flagsUnordered(ip).layout == .@"packed") { - if (ty.bitSize(zcu) <= 64) return direct; - return .{ .direct, .direct }; + return .{ .direct = ty }; } const layout = ty.unionGetLayout(zcu); assert(layout.tag_size == 0); - if (union_obj.field_types.len > 1) return memory; + if (union_obj.field_types.len > 1) return .indirect; const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); return classifyType(first_field_ty, zcu); }, @@ -97,32 +82,6 @@ pub fn classifyType(ty: Type, zcu: *const Zcu) [2]Class { } } -/// Returns the scalar type a given type can represent. -/// Asserts given type can be represented as scalar, such as -/// a struct with a single scalar field. -pub fn scalarType(ty: Type, zcu: *Zcu) Type { - const ip = &zcu.intern_pool; - switch (ty.zigTypeTag(zcu)) { - .@"struct" => { - if (zcu.typeToPackedStruct(ty)) |packed_struct| { - return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu); - } else { - assert(ty.structFieldCount(zcu) == 1); - return scalarType(ty.fieldType(0, zcu), zcu); - } - }, - .@"union" => { - const union_obj = zcu.typeToUnion(ty).?; - if (union_obj.flagsUnordered(ip).layout != .@"packed") { - const layout = Type.getUnionLayout(union_obj, zcu); - if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety(zcu).?, zcu); - } - assert(union_obj.field_types.len == 1); - } - const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - return scalarType(first_field_ty, zcu); - }, - else => return ty, - } +pub fn lowerAsDoubleI64(scalar_ty: Type, zcu: *const Zcu) bool { + return scalar_ty.bitSize(zcu) > 64; } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4af9b01257..9f8dd70b39 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -88110,12 +88110,15 @@ fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void { const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); + const ptr_ty = self.typeOf(bin_op.lhs); + const ptr_info = ptr_ty.ptrInfo(zcu); + const is_packed = ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0; + if (is_packed) try self.spillEflagsIfOccupied(); + const src_mcv = try self.resolveInst(bin_op.rhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo(zcu); - if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) { + if (is_packed) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv, .{ .safety = safety }); @@ -97114,23 +97117,29 @@ fn airAtomicRmw(self: *CodeGen, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *CodeGen, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; + const result: MCValue = result: { + const ptr_ty = self.typeOf(atomic_load.ptr); + const ptr_mcv = try self.resolveInst(atomic_load.ptr); + const ptr_lock = switch (ptr_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_mcv = try self.resolveInst(atomic_load.ptr); - const ptr_lock = switch (ptr_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); + const unused = self.liveness.isUnused(inst); - const dst_mcv = - if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv)) + const dst_mcv: MCValue = if (unused) + .{ .register = try self.register_manager.allocReg(null, self.regSetForType(ptr_ty.childType(self.pt.zcu))) } + else if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv)) ptr_mcv else try self.allocRegOrMem(inst, true); - try self.load(dst_mcv, ptr_ty, ptr_mcv); - return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none }); + try self.load(dst_mcv, ptr_ty, ptr_mcv); + + break :result if (unused) .unreach else dst_mcv; + }; + return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none }); } fn airAtomicStore(self: *CodeGen, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { @@ -97909,16 +97918,150 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void { switch (pred_mcv) { .register => |pred_reg| switch (pred_reg.class()) { .general_purpose => {}, - .sse => if (need_xmm0 and pred_reg.id() != comptime Register.xmm0.id()) { - try self.register_manager.getKnownReg(.xmm0, null); - try self.genSetReg(.xmm0, pred_ty, pred_mcv, .{}); - break :mask .xmm0; - } else break :mask if (has_blend) - pred_reg + .sse => if (elem_ty.toIntern() == .bool_type) + if (need_xmm0 and pred_reg.id() != comptime Register.xmm0.id()) { + try self.register_manager.getKnownReg(.xmm0, null); + try self.genSetReg(.xmm0, pred_ty, pred_mcv, .{}); + break :mask .xmm0; + } else break :mask if (has_blend) + pred_reg + else + try self.copyToTmpRegister(pred_ty, pred_mcv) else - try self.copyToTmpRegister(pred_ty, pred_mcv), + return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}), else => unreachable, }, + .register_mask => |pred_reg_mask| { + if (pred_reg_mask.info.scalar.bitSize(self.target) != 8 * elem_abi_size) + return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); + + const mask_reg: Register = if (need_xmm0 and pred_reg_mask.reg.id() != comptime Register.xmm0.id()) mask_reg: { + try self.register_manager.getKnownReg(.xmm0, null); + try self.genSetReg(.xmm0, ty, .{ .register = pred_reg_mask.reg }, .{}); + break :mask_reg .xmm0; + } else pred_reg_mask.reg; + const mask_alias = registerAlias(mask_reg, abi_size); + const mask_lock = self.register_manager.lockRegAssumeUnused(mask_reg); + defer self.register_manager.unlockReg(mask_lock); + + const lhs_mcv = try self.resolveInst(extra.lhs); + const lhs_lock = switch (lhs_mcv) { + .register => |lhs_reg| self.register_manager.lockRegAssumeUnused(lhs_reg), + else => null, + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_mcv = try self.resolveInst(extra.rhs); + const rhs_lock = switch (rhs_mcv) { + .register => |rhs_reg| self.register_manager.lockReg(rhs_reg), + else => null, + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const order = has_blend != pred_reg_mask.info.inverted; + const reuse_mcv, const other_mcv = if (order) + .{ rhs_mcv, lhs_mcv } + else + .{ lhs_mcv, rhs_mcv }; + const dst_mcv: MCValue = if (reuse_mcv.isRegister() and self.reuseOperand( + inst, + if (order) extra.rhs else extra.lhs, + @intFromBool(order), + reuse_mcv, + )) reuse_mcv else if (has_avx) + .{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) } + else + try self.copyToRegisterWithInstTracking(inst, ty, reuse_mcv); + const dst_reg = dst_mcv.getReg().?; + const dst_alias = registerAlias(dst_reg, abi_size); + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + + const mir_tag = @as(?Mir.Inst.FixedTag, if ((pred_reg_mask.info.kind == .all and + elem_ty.toIntern() != .f32_type and elem_ty.toIntern() != .f64_type) or pred_reg_mask.info.scalar == .byte) + if (has_avx) + .{ .vp_b, .blendv } + else if (has_blend) + .{ .p_b, .blendv } + else if (pred_reg_mask.info.kind == .all) + .{ .p_, undefined } + else + null + else if ((pred_reg_mask.info.kind == .all and (elem_ty.toIntern() != .f64_type or !self.hasFeature(.sse2))) or + pred_reg_mask.info.scalar == .dword) + if (has_avx) + .{ .v_ps, .blendv } + else if (has_blend) + .{ ._ps, .blendv } + else if (pred_reg_mask.info.kind == .all) + .{ ._ps, undefined } + else + null + else if (pred_reg_mask.info.kind == .all or pred_reg_mask.info.scalar == .qword) + if (has_avx) + .{ .v_pd, .blendv } + else if (has_blend) + .{ ._pd, .blendv } + else if (pred_reg_mask.info.kind == .all) + .{ ._pd, undefined } + else + null + else + null) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); + if (has_avx) { + const rhs_alias = if (reuse_mcv.isRegister()) + registerAlias(reuse_mcv.getReg().?, abi_size) + else rhs: { + try self.genSetReg(dst_reg, ty, reuse_mcv, .{}); + break :rhs dst_alias; + }; + if (other_mcv.isBase()) try self.asmRegisterRegisterMemoryRegister( + mir_tag, + dst_alias, + rhs_alias, + try other_mcv.mem(self, .{ .size = self.memSize(ty) }), + mask_alias, + ) else try self.asmRegisterRegisterRegisterRegister( + mir_tag, + dst_alias, + rhs_alias, + registerAlias(if (other_mcv.isRegister()) + other_mcv.getReg().? + else + try self.copyToTmpRegister(ty, other_mcv), abi_size), + mask_alias, + ); + } else if (has_blend) if (other_mcv.isBase()) try self.asmRegisterMemoryRegister( + mir_tag, + dst_alias, + try other_mcv.mem(self, .{ .size = self.memSize(ty) }), + mask_alias, + ) else try self.asmRegisterRegisterRegister( + mir_tag, + dst_alias, + registerAlias(if (other_mcv.isRegister()) + other_mcv.getReg().? + else + try self.copyToTmpRegister(ty, other_mcv), abi_size), + mask_alias, + ) else { + try self.asmRegisterRegister(.{ mir_tag[0], .@"and" }, dst_alias, mask_alias); + if (other_mcv.isBase()) try self.asmRegisterMemory( + .{ mir_tag[0], .andn }, + mask_alias, + try other_mcv.mem(self, .{ .size = .fromSize(abi_size) }), + ) else try self.asmRegisterRegister( + .{ mir_tag[0], .andn }, + mask_alias, + if (other_mcv.isRegister()) + other_mcv.getReg().? + else + try self.copyToTmpRegister(ty, other_mcv), + ); + try self.asmRegisterRegister(.{ mir_tag[0], .@"or" }, dst_alias, mask_alias); + } + break :result dst_mcv; + }, else => {}, } const mask_reg: Register = if (need_xmm0) mask_reg: { @@ -98121,7 +98264,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (elem_ty.zigTypeTag(zcu)) { else => null, .int => switch (abi_size) { 0 => unreachable, @@ -98137,7 +98280,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void { null, else => null, }, - .float => switch (ty.childType(zcu).floatBits(self.target.*)) { + .float => switch (elem_ty.floatBits(self.target.*)) { else => unreachable, 16, 80, 128 => null, 32 => switch (vec_len) { @@ -98191,30 +98334,20 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void { try self.copyToTmpRegister(ty, lhs_mcv), abi_size), mask_alias, ) else { - const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) { - else => null, - .int => .p_, - .float => switch (elem_ty.floatBits(self.target.*)) { - 32 => ._ps, - 64 => ._pd, - 16, 80, 128 => null, - else => unreachable, - }, - }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}); - try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias); + try self.asmRegisterRegister(.{ mir_tag[0], .@"and" }, dst_alias, mask_alias); if (rhs_mcv.isBase()) try self.asmRegisterMemory( - .{ mir_fixes, .andn }, + .{ mir_tag[0], .andn }, mask_alias, try rhs_mcv.mem(self, .{ .size = .fromSize(abi_size) }), ) else try self.asmRegisterRegister( - .{ mir_fixes, .andn }, + .{ mir_tag[0], .andn }, mask_alias, if (rhs_mcv.isRegister()) rhs_mcv.getReg().? else try self.copyToTmpRegister(ty, rhs_mcv), ); - try self.asmRegisterRegister(.{ mir_fixes, .@"or" }, dst_alias, mask_alias); + try self.asmRegisterRegister(.{ mir_tag[0], .@"or" }, dst_alias, mask_alias); } break :result dst_mcv; }; @@ -100753,11 +100886,11 @@ const Temp = struct { const new_temp_index = cg.next_temp_index; cg.temp_type[@intFromEnum(new_temp_index)] = .usize; cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1); - switch (temp.tracking(cg).short) { - else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }), + const mcv = temp.tracking(cg).short; + switch (mcv) { + else => std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }), .register => |reg| { - const new_reg = - try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); + const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); new_temp_index.tracking(cg).* = .init(.{ .register = new_reg }); try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{ .base = .{ .reg = reg.to64() }, @@ -100765,33 +100898,22 @@ const Temp = struct { }); }, .register_offset => |reg_off| { - const new_reg = - try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); + const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); new_temp_index.tracking(cg).* = .init(.{ .register = new_reg }); try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{ .base = .{ .reg = reg_off.reg.to64() }, .mod = .{ .rm = .{ .disp = reg_off.off + off } }, }); }, + .load_symbol, .load_frame => { + const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); + new_temp_index.tracking(cg).* = .init(.{ .register_offset = .{ .reg = new_reg, .off = off } }); + try cg.genSetReg(new_reg, .usize, mcv, .{}); + }, .lea_symbol => |sym_off| new_temp_index.tracking(cg).* = .init(.{ .lea_symbol = .{ .sym_index = sym_off.sym_index, .off = sym_off.off + off, } }), - .load_frame => |frame_addr| { - const new_reg = - try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp); - new_temp_index.tracking(cg).* = .init(.{ .register_offset = .{ - .reg = new_reg, - .off = off, - } }); - try cg.asmRegisterMemory(.{ ._, .mov }, new_reg.to64(), .{ - .base = .{ .frame = frame_addr.index }, - .mod = .{ .rm = .{ - .size = .qword, - .disp = frame_addr.off, - } }, - }); - }, .lea_frame => |frame_addr| new_temp_index.tracking(cg).* = .init(.{ .lea_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off, @@ -101061,8 +101183,9 @@ const Temp = struct { const result_temp: Temp = .{ .index = result_temp_index.toIndex() }; assert(cg.reuseTemp(result_temp.index, first_temp.index, first_temp_tracking)); assert(cg.reuseTemp(result_temp.index, second_temp.index, second_temp_tracking)); - cg.temp_type[@intFromEnum(result_temp_index)] = .slice_const_u8; result_temp_index.tracking(cg).* = .init(result); + cg.temp_type[@intFromEnum(result_temp_index)] = .slice_const_u8; + cg.next_temp_index = @enumFromInt(@intFromEnum(result_temp_index) + 1); first_temp.* = result_temp; second_temp.* = result_temp; } @@ -101108,7 +101231,8 @@ const Temp = struct { => return temp.toRegClass(true, .general_purpose, cg), .lea_symbol => |sym_off| { const off = sym_off.off; - if (off == 0) return false; + // hack around linker relocation bugs + if (false and off == 0) return false; try temp.toOffset(-off, cg); while (try temp.toRegClass(true, .general_purpose, cg)) {} try temp.toOffset(off, cg); @@ -101586,10 +101710,19 @@ const Temp = struct { .dst_temps = .{ .{ .ref = .src0 }, .unused }, .each = .{ .once = &.{} }, }, .{ - .required_features = .{ .fast_imm16, null, null, null }, .src_constraints = .{ .{ .unsigned_int = .word }, .any, .any }, .patterns = &.{ .{ .src = .{ .mut_mem, .none, .none } }, + }, + .dst_temps = .{ .{ .ref = .src0 }, .unused }, + .clobbers = .{ .eflags = true }, + .each = .{ .once = &.{ + .{ ._, ._, .@"and", .dst0w, .ua(.src0, .add_umax), ._, ._ }, + } }, + }, .{ + .required_features = .{ .fast_imm16, null, null, null }, + .src_constraints = .{ .{ .unsigned_int = .word }, .any, .any }, + .patterns = &.{ .{ .src = .{ .to_mut_gpr, .none, .none } }, }, .dst_temps = .{ .{ .ref = .src0 }, .unused }, @@ -105711,7 +105844,8 @@ const Temp = struct { ) InnerError!void { const tomb_bits = cg.liveness.getTombBits(inst); for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| { - if (op_temp.index != temp.index) try op_temp.die(cg); + if (op_temp.index == temp.index) continue; + if (op_temp.tracking(cg).short != .dead) try op_temp.die(cg); if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue; if (cg.reused_operands.isSet(op_index)) continue; try cg.processDeath(op_ref.toIndexAllowNone() orelse continue); @@ -105730,6 +105864,12 @@ const Temp = struct { assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking)); }, } + for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| { + if (op_temp.index != temp.index) continue; + if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue; + if (cg.reused_operands.isSet(op_index)) continue; + try cg.processDeath(op_ref.toIndexAllowNone() orelse continue); + } } fn die(temp: Temp, cg: *CodeGen) InnerError!void { @@ -105755,7 +105895,8 @@ const Temp = struct { } fn isValid(index: Index, cg: *CodeGen) bool { - return index.tracking(cg).short != .dead; + return @intFromEnum(index) < @intFromEnum(cg.next_temp_index) and + index.tracking(cg).short != .dead; } fn typeOf(index: Index, cg: *CodeGen) Type { @@ -106887,10 +107028,17 @@ const Select = struct { }, .frame => |frame_index| .{ try cg.tempInit(spec.type, .{ .load_frame = .{ .index = frame_index } }), true }, .lazy_symbol => |lazy_symbol_spec| { + const ip = &pt.zcu.intern_pool; const ty = if (lazy_symbol_spec.ref == .none) spec.type else lazy_symbol_spec.ref.typeOf(s); const lazy_symbol: link.File.LazySymbol = .{ .kind = lazy_symbol_spec.kind, - .ty = ty.toIntern(), + .ty = switch (ip.indexToKey(ty.toIntern())) { + .inferred_error_set_type => |func_index| switch (ip.funcIesResolvedUnordered(func_index)) { + .none => unreachable, // unresolved inferred error set + else => |ty_index| ty_index, + }, + else => ty.toIntern(), + }, }; return .{ try cg.tempInit(.usize, .{ .lea_symbol = .{ .sym_index = if (cg.bin_file.cast(.elf)) |elf_file| diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index b34fb593f7..484296c79a 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -1029,8 +1029,8 @@ const mnemonic_to_encodings_map = init: { storage_i += value.len; } var mnemonic_i: [mnemonic_count]usize = @splat(0); - const ops_len = @typeInfo(std.meta.FieldType(Data, .ops)).array.len; - const opc_len = @typeInfo(std.meta.FieldType(Data, .opc)).array.len; + const ops_len = @typeInfo(@FieldType(Data, "ops")).array.len; + const opc_len = @typeInfo(@FieldType(Data, "opc")).array.len; for (encodings) |entry| { const i = &mnemonic_i[@intFromEnum(entry[0])]; mnemonic_map[@intFromEnum(entry[0])][i.*] = .{ diff --git a/src/codegen.zig b/src/codegen.zig index ad241d047d..bad8a97f1c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -23,10 +23,7 @@ const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; const dev = @import("dev.zig"); -pub const CodeGenError = error{ - OutOfMemory, - /// Compiler was asked to operate on a number larger than supported. - Overflow, +pub const CodeGenError = GenerateSymbolError || error{ /// Indicates the error is already stored in Zcu `failed_codegen`. CodegenFail, }; @@ -177,6 +174,8 @@ pub const GenerateSymbolError = error{ OutOfMemory, /// Compiler was asked to operate on a number larger than supported. Overflow, + /// Compiler was asked to produce a non-byte-aligned relocation. + RelocationNotByteAligned, }; pub fn generateSymbol( @@ -481,12 +480,18 @@ pub fn generateSymbol( // pointer may point to a decl which must be marked used // but can also result in a relocation. Therefore we handle those separately. if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .pointer) { - const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse - return error.Overflow; - var tmp_list = try std.ArrayListUnmanaged(u8).initCapacity(gpa, field_size); - defer tmp_list.deinit(gpa); - try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent); - @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items); + const field_offset = std.math.divExact(u16, bits, 8) catch |err| switch (err) { + error.DivisionByZero => unreachable, + error.UnexpectedRemainder => return error.RelocationNotByteAligned, + }; + code.items.len = current_pos + field_offset; + // TODO: code.lockPointers(); + defer { + assert(code.items.len == current_pos + field_offset + @divExact(target.ptrBitWidth(), 8)); + // TODO: code.unlockPointers(); + code.items.len = current_pos + abi_size; + } + try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent); } else { Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd4573375d..6e2e5c35af 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -611,7 +611,7 @@ pub const Function = struct { const a = try Assignment.start(f, writer, ctype); try f.writeCValue(writer, dst, .Other); try a.assign(f, writer); - try f.writeCValue(writer, src, .Initializer); + try f.writeCValue(writer, src, .Other); try a.end(f, writer); } @@ -2826,7 +2826,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn }); try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete); try w.writeAll(" = "); - try o.dg.renderValue(w, Value.fromInterned(name_val), .Initializer); + try o.dg.renderValue(w, Value.fromInterned(name_val), .StaticInitializer); try w.writeAll(";\n return ("); try o.dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ @@ -4044,7 +4044,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const new_local = try f.allocLocal(inst, src_ty); try f.writeCValue(writer, new_local, .Other); try writer.writeAll(" = "); - try f.writeCValue(writer, src_val, .Initializer); + try f.writeCValue(writer, src_val, .Other); try writer.writeAll(";\n"); break :blk new_local; @@ -4515,7 +4515,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const a = try Assignment.start(f, writer, .usize); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try a.assign(f, writer); - try f.writeCValue(writer, len, .Initializer); + try f.writeCValue(writer, len, .Other); try a.end(f, writer); } return local; @@ -4933,7 +4933,7 @@ fn airSwitchDispatch(f: *Function, inst: Air.Inst.Index) !void { const cond_local = f.loop_switch_conds.get(br.block_inst).?; try f.writeCValue(writer, .{ .local = cond_local }, .Other); try writer.writeAll(" = "); - try f.writeCValue(writer, cond, .Initializer); + try f.writeCValue(writer, cond, .Other); try writer.writeAll(";\n"); try writer.print("goto zig_switch_{d}_loop;", .{@intFromEnum(br.block_inst)}); } @@ -4978,14 +4978,8 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal const operand_lval = if (operand == .constant) blk: { const operand_local = try f.allocLocal(null, operand_ty); try f.writeCValue(writer, operand_local, .Other); - if (operand_ty.isAbiInt(zcu)) { - try writer.writeAll(" = "); - } else { - try writer.writeAll(" = ("); - try f.renderType(writer, operand_ty); - try writer.writeByte(')'); - } - try f.writeCValue(writer, operand, .Initializer); + try writer.writeAll(" = "); + try f.writeCValue(writer, operand, .Other); try writer.writeAll(";\n"); break :blk operand_local; } else operand; @@ -5697,7 +5691,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const a = try Assignment.start(f, writer, opt_ctype); try f.writeCValueDeref(writer, operand); try a.assign(f, writer); - try f.object.dg.renderValue(writer, Value.false, .Initializer); + try f.object.dg.renderValue(writer, Value.false, .Other); try a.end(f, writer); return .none; }, @@ -5717,7 +5711,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const a = try Assignment.start(f, writer, opt_ctype); try f.writeCValueDerefMember(writer, operand, .{ .identifier = "is_null" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, Value.false, .Initializer); + try f.object.dg.renderValue(writer, Value.false, .Other); try a.end(f, writer); } if (f.liveness.isUnused(inst)) return .none; @@ -5843,7 +5837,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) { - .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), + .begin => try f.writeCValue(writer, field_ptr_val, .Other), .field => |field| { const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8); @@ -5897,7 +5891,7 @@ fn fieldPtr( try writer.writeByte(')'); switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) { - .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), + .begin => try f.writeCValue(writer, container_ptr_val, .Other), .field => |field| { try writer.writeByte('&'); try f.writeCValueDerefMember(writer, container_ptr_val, field); @@ -6020,7 +6014,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const operand_local = try f.allocLocal(inst, struct_ty); try f.writeCValue(writer, operand_local, .Other); try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); + try f.writeCValue(writer, struct_byval, .Other); try writer.writeAll(";\n"); break :blk operand_local; } else struct_byval; @@ -6118,7 +6112,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu try writer.writeAll(" = ("); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - try f.writeCValue(writer, operand, .Initializer); + try f.writeCValue(writer, operand, .Other); try writer.writeAll(";\n"); return local; } @@ -6163,7 +6157,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const a = try Assignment.start(f, writer, operand_ctype); try f.writeCValueMember(writer, local, .{ .identifier = "payload" }); try a.assign(f, writer); - try f.writeCValue(writer, operand, .Initializer); + try f.writeCValue(writer, operand, .Other); try a.end(f, writer); } return local; @@ -6364,7 +6358,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try a.assign(f, writer); if (operand == .undef) { - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Other); } else { const ptr_ctype = try f.ctypeFromType(ptr_ty, .complete); const ptr_child_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype; @@ -6381,7 +6375,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte('&'); try f.writeCValueDeref(writer, operand); try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))}); - } else try f.writeCValue(writer, operand, .Initializer); + } else try f.writeCValue(writer, operand, .Other); } try a.end(f, writer); } @@ -6911,7 +6905,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Initializer); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -7281,7 +7275,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, - }, .Initializer); + }, .Other); try writer.writeAll(";\n"); const v = try Vectorize.start(f, inst, writer, operand_ty); @@ -8175,7 +8169,7 @@ fn formatIntLiteral( try writer.writeAll(string); } else { try data.ctype.renderLiteralPrefix(writer, data.kind, ctype_pool); - wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits); + wrap.truncate(int, .unsigned, c_bits); @memset(wrap.limbs[wrap.len..], 0); wrap.len = wrap.limbs.len; const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count); @@ -8207,7 +8201,6 @@ fn formatIntLiteral( c_limb_int_info.signedness = .signed; c_limb_ctype = c_limb_info.ctype.toSigned(); - c_limb_mut.positive = wrap.positive; c_limb_mut.truncate( c_limb_mut.toConst(), .signed, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e82d75311e..91c955b4bd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -265,7 +265,12 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 { .eabihf => "eabihf", .android => "android", .androideabi => "androideabi", - .musl => "musl", + .musl => switch (target.os.tag) { + // For WASI/Emscripten, "musl" refers to the libc, not really the ABI. + // "unknown" provides better compatibility with LLVM-based tooling for these targets. + .wasi, .emscripten => "unknown", + else => "musl", + }, .muslabin32 => "musl", // Should be muslabin32 in LLVM 20. .muslabi64 => "musl", // Should be muslabi64 in LLVM 20. .musleabi => "musleabi", @@ -771,6 +776,30 @@ const DataLayoutBuilder = struct { } }; +// Avoid depending on `llvm.CodeModel` in the bitcode-only case. +const CodeModel = enum { + default, + tiny, + small, + kernel, + medium, + large, +}; + +fn codeModel(model: std.builtin.CodeModel, target: std.Target) CodeModel { + // Roughly match Clang's mapping of GCC code models to LLVM code models. + return switch (model) { + .default => .default, + .extreme, .large => .large, + .kernel => .kernel, + .medany => if (target.cpu.arch.isRISCV()) .medium else .large, + .medium => if (target.os.tag == .aix) .large else .medium, + .medmid => .medium, + .normal, .medlow, .small => .small, + .tiny => .tiny, + }; +} + pub const Object = struct { gpa: Allocator, builder: Builder, @@ -1135,14 +1164,17 @@ pub const Object = struct { module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag( behavior_error, try o.builder.metadataString("Code Model"), - try o.builder.metadataConstant(try o.builder.intConst(.i32, @as(i32, switch (comp.root_mod.code_model) { - .tiny => 0, - .small => 1, - .kernel => 2, - .medium => 3, - .large => 4, - else => unreachable, - }))), + try o.builder.metadataConstant(try o.builder.intConst(.i32, @as( + i32, + switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) { + .default => unreachable, + .tiny => 0, + .small => 1, + .kernel => 2, + .medium => 3, + .large => 4, + }, + ))), )); } @@ -1294,7 +1326,7 @@ pub const Object = struct { else .Static; - const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) { + const code_model: llvm.CodeModel = switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) { .default => .Default, .tiny => .Tiny, .small => .Small, @@ -1440,8 +1472,10 @@ pub const Object = struct { _ = try attributes.removeFnAttr(.sanitize_thread); } const is_naked = fn_info.cc == .naked; - if (owner_mod.fuzz and !func_analysis.disable_instrumentation and !is_naked) { - try attributes.addFnAttr(.optforfuzzing, &o.builder); + if (!func_analysis.disable_instrumentation and !is_naked) { + if (owner_mod.fuzz) { + try attributes.addFnAttr(.optforfuzzing, &o.builder); + } _ = try attributes.removeFnAttr(.skipprofile); _ = try attributes.removeFnAttr(.nosanitize_coverage); } else { @@ -1735,7 +1769,12 @@ pub const Object = struct { try o.used.append(gpa, counters_variable.toConst(&o.builder)); counters_variable.setLinkage(.private, &o.builder); counters_variable.setAlignment(comptime Builder.Alignment.fromByteUnits(1), &o.builder); - counters_variable.setSection(try o.builder.string("__sancov_cntrs"), &o.builder); + + if (target.ofmt == .macho) { + counters_variable.setSection(try o.builder.string("__DATA,__sancov_cntrs"), &o.builder); + } else { + counters_variable.setSection(try o.builder.string("__sancov_cntrs"), &o.builder); + } break :f .{ .counters_variable = counters_variable, @@ -1797,7 +1836,11 @@ pub const Object = struct { pcs_variable.setLinkage(.private, &o.builder); pcs_variable.setMutability(.constant, &o.builder); pcs_variable.setAlignment(Type.usize.abiAlignment(zcu).toLlvm(), &o.builder); - pcs_variable.setSection(try o.builder.string("__sancov_pcs1"), &o.builder); + if (target.ofmt == .macho) { + pcs_variable.setSection(try o.builder.string("__DATA,__sancov_pcs1"), &o.builder); + } else { + pcs_variable.setSection(try o.builder.string("__sancov_pcs1"), &o.builder); + } try pcs_variable.setInitializer(init_val, &o.builder); } @@ -12051,7 +12094,7 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe .x86_64_win => x86_64_abi.classifyWindows(return_type, zcu) == .memory, .x86_sysv, .x86_win => isByRef(return_type, zcu), .x86_stdcall => !isScalar(zcu, return_type), - .wasm_mvp => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect, + .wasm_mvp => wasm_c_abi.classifyType(return_type, zcu) == .indirect, .aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win, @@ -12136,18 +12179,9 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu return o.builder.structType(.normal, types[0..types_len]); }, }, - .wasm_mvp => { - if (isScalar(zcu, return_type)) { - return o.lowerType(return_type); - } - const classes = wasm_c_abi.classifyType(return_type, zcu); - if (classes[0] == .indirect or classes[0] == .none) { - return .void; - } - - assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(return_type, zcu); - return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8)); + .wasm_mvp => switch (wasm_c_abi.classifyType(return_type, zcu)) { + .direct => |scalar_ty| return o.lowerType(scalar_ty), + .indirect => return .void, }, // TODO investigate other callconvs else => return o.lowerType(return_type), @@ -12401,17 +12435,28 @@ const ParamTypeIterator = struct { }, } }, - .wasm_mvp => { - it.zig_index += 1; - it.llvm_index += 1; - if (isScalar(zcu, ty)) { - return .byval; - } - const classes = wasm_c_abi.classifyType(ty, zcu); - if (classes[0] == .indirect) { + .wasm_mvp => switch (wasm_c_abi.classifyType(ty, zcu)) { + .direct => |scalar_ty| { + if (isScalar(zcu, ty)) { + it.zig_index += 1; + it.llvm_index += 1; + return .byval; + } else { + var types_buffer: [8]Builder.Type = undefined; + types_buffer[0] = try it.object.lowerType(scalar_ty); + it.types_buffer = types_buffer; + it.types_len = 1; + it.llvm_index += 1; + it.zig_index += 1; + return .multiple_llvm_types; + } + }, + .indirect => { + it.zig_index += 1; + it.llvm_index += 1; + it.byval_attr = true; return .byref; - } - return .abi_sized_int; + }, }, // TODO investigate other callconvs else => { diff --git a/src/glibc.zig b/src/glibc.zig index 9079f8d617..363d11bbfd 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -864,8 +864,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // Example: // .balign 4 // .globl _Exit_2_2_5 - // .type _Exit_2_2_5, %function; - // .symver _Exit_2_2_5, _Exit@@GLIBC_2.2.5 + // .type _Exit_2_2_5, %function + // .symver _Exit_2_2_5, _Exit@@GLIBC_2.2.5, remove // _Exit_2_2_5: .long 0 const ver_index = versions_buffer[ver_buf_i]; const ver = metadata.all_versions[ver_index]; @@ -876,19 +876,16 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index; const at_sign_str: []const u8 = if (want_default) "@@" else "@"; if (ver.patch == 0) { - const sym_plus_ver = if (want_default) - sym_name - else - try std.fmt.allocPrint( - arena, - "{s}_GLIBC_{d}_{d}", - .{ sym_name, ver.major, ver.minor }, - ); + const sym_plus_ver = try std.fmt.allocPrint( + arena, + "{s}_{d}_{d}", + .{ sym_name, ver.major, ver.minor }, + ); try stubs_asm.writer().print( \\.balign {d} \\.globl {s} - \\.type {s}, %function; - \\.symver {s}, {s}{s}GLIBC_{d}.{d} + \\.type {s}, %function + \\.symver {s}, {s}{s}GLIBC_{d}.{d}, remove \\{s}: {s} 0 \\ , .{ @@ -904,19 +901,16 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye wordDirective(target), }); } else { - const sym_plus_ver = if (want_default) - sym_name - else - try std.fmt.allocPrint( - arena, - "{s}_GLIBC_{d}_{d}_{d}", - .{ sym_name, ver.major, ver.minor, ver.patch }, - ); + const sym_plus_ver = try std.fmt.allocPrint( + arena, + "{s}_{d}_{d}_{d}", + .{ sym_name, ver.major, ver.minor, ver.patch }, + ); try stubs_asm.writer().print( \\.balign {d} \\.globl {s} - \\.type {s}, %function; - \\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d} + \\.type {s}, %function + \\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}, remove \\{s}: {s} 0 \\ , .{ @@ -1041,9 +1035,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // Example: // .balign 4 // .globl environ_2_2_5 - // .type environ_2_2_5, %object; - // .size environ_2_2_5, 4; - // .symver environ_2_2_5, environ@@GLIBC_2.2.5 + // .type environ_2_2_5, %object + // .size environ_2_2_5, 4 + // .symver environ_2_2_5, environ@@GLIBC_2.2.5, remove // environ_2_2_5: .fill 4, 1, 0 const ver_index = versions_buffer[ver_buf_i]; const ver = metadata.all_versions[ver_index]; @@ -1054,20 +1048,17 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index; const at_sign_str: []const u8 = if (want_default) "@@" else "@"; if (ver.patch == 0) { - const sym_plus_ver = if (want_default) - sym_name - else - try std.fmt.allocPrint( - arena, - "{s}_GLIBC_{d}_{d}", - .{ sym_name, ver.major, ver.minor }, - ); + const sym_plus_ver = try std.fmt.allocPrint( + arena, + "{s}_{d}_{d}", + .{ sym_name, ver.major, ver.minor }, + ); try stubs_asm.writer().print( \\.balign {d} \\.globl {s} - \\.type {s}, %object; - \\.size {s}, {d}; - \\.symver {s}, {s}{s}GLIBC_{d}.{d} + \\.type {s}, %object + \\.size {s}, {d} + \\.symver {s}, {s}{s}GLIBC_{d}.{d}, remove \\{s}: .fill {d}, 1, 0 \\ , .{ @@ -1085,20 +1076,17 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye size, }); } else { - const sym_plus_ver = if (want_default) - sym_name - else - try std.fmt.allocPrint( - arena, - "{s}_GLIBC_{d}_{d}_{d}", - .{ sym_name, ver.major, ver.minor, ver.patch }, - ); + const sym_plus_ver = try std.fmt.allocPrint( + arena, + "{s}_{d}_{d}_{d}", + .{ sym_name, ver.major, ver.minor, ver.patch }, + ); try stubs_asm.writer().print( \\.balign {d} \\.globl {s} - \\.type {s}, %object; - \\.size {s}, {d}; - \\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d} + \\.type {s}, %object + \\.size {s}, {d} + \\.symver {s}, {s}{s}GLIBC_{d}.{d}.{d}, remove \\{s}: .fill {d}, 1, 0 \\ , .{ diff --git a/src/libcxx.zig b/src/libcxx.zig index 5668e39166..84b6183bcd 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -8,13 +8,6 @@ const build_options = @import("build_options"); const trace = @import("tracy.zig").trace; const Module = @import("Package/Module.zig"); -pub const AbiVersion = enum(u2) { - @"1" = 1, - @"2" = 2, - - pub const default: AbiVersion = .@"1"; -}; - const libcxxabi_files = [_][]const u8{ "src/abort_message.cpp", "src/cxa_aux_runtime.cpp", @@ -145,11 +138,12 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError! const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" }); const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }); const cxx_src_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "src" }); + const abi_version: u2 = if (target.os.tag == .emscripten) 2 else 1; const abi_version_arg = try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_VERSION={d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, }); const abi_namespace_arg = try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_NAMESPACE=__{d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, }); const optimize_mode = comp.compilerRtOptMode(); @@ -389,11 +383,12 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" }); const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" }); const cxx_src_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "src" }); + const abi_version: u2 = if (target.os.tag == .emscripten) 2 else 1; const abi_version_arg = try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_VERSION={d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, }); const abi_namespace_arg = try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_NAMESPACE=__{d}", .{ - @intFromEnum(comp.libcxx_abi_version), + abi_version, }); const optimize_mode = comp.compilerRtOptMode(); diff --git a/src/libunwind.zig b/src/libunwind.zig index 937501933f..83b7809284 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -119,7 +119,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr } try cflags.append("-I"); try cflags.append(try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libunwind", "include" })); - try cflags.append("-D_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS"); + try cflags.append("-D_LIBUNWIND_HIDE_SYMBOLS"); try cflags.append("-Wa,--noexecstack"); try cflags.append("-fvisibility=hidden"); try cflags.append("-fvisibility-inlines-hidden"); diff --git a/src/link.zig b/src/link.zig index f2387c393f..f436106aab 100644 --- a/src/link.zig +++ b/src/link.zig @@ -26,6 +26,7 @@ const Package = @import("Package.zig"); const dev = @import("dev.zig"); const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue; const target_util = @import("target.zig"); +const codegen = @import("codegen.zig"); pub const LdScript = @import("link/LdScript.zig"); @@ -683,13 +684,7 @@ pub const File = struct { /// Note that `LinkFailure` is not a member of this error set because the error message /// must be attached to `Zcu.failed_codegen` rather than `Compilation.link_diags`. - pub const UpdateNavError = error{ - Overflow, - OutOfMemory, - /// Indicates the error is already reported and stored in - /// `failed_codegen` on the Zcu. - CodegenFail, - }; + pub const UpdateNavError = codegen.CodeGenError; /// Called from within CodeGen to retrieve the symbol index of a global symbol. /// If no symbol exists yet with this name, a new undefined global symbol will @@ -920,7 +915,7 @@ pub const File = struct { decl_val: InternPool.Index, decl_align: InternPool.Alignment, src_loc: Zcu.LazySrcLoc, - ) !@import("codegen.zig").GenResult { + ) !codegen.GenResult { switch (base.tag) { .c => unreachable, .spirv => unreachable, @@ -1457,8 +1452,10 @@ pub const Task = union(enum) { pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { const diags = &comp.link_diags; switch (task) { - .load_explicitly_provided => if (comp.bin_file) |base| { + .load_explicitly_provided => { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; + const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", comp.link_inputs.len); defer prog_node.end(); for (comp.link_inputs) |input| { @@ -1475,8 +1472,10 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { prog_node.completeOne(); } }, - .load_host_libc => if (comp.bin_file) |base| { + .load_host_libc => { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; + const prog_node = comp.work_queue_progress_node.start("Linker Parse Host libc", 0); defer prog_node.end(); @@ -1535,8 +1534,9 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { } } }, - .load_object => |path| if (comp.bin_file) |base| { + .load_object => |path| { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; const prog_node = comp.work_queue_progress_node.start("Linker Parse Object", 0); defer prog_node.end(); base.openLoadObject(path) catch |err| switch (err) { @@ -1544,8 +1544,9 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { else => |e| diags.addParseError(path, "failed to parse object: {s}", .{@errorName(e)}), }; }, - .load_archive => |path| if (comp.bin_file) |base| { + .load_archive => |path| { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; const prog_node = comp.work_queue_progress_node.start("Linker Parse Archive", 0); defer prog_node.end(); base.openLoadArchive(path, null) catch |err| switch (err) { @@ -1553,8 +1554,9 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}), }; }, - .load_dso => |path| if (comp.bin_file) |base| { + .load_dso => |path| { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; const prog_node = comp.work_queue_progress_node.start("Linker Parse Shared Library", 0); defer prog_node.end(); base.openLoadDso(path, .{ @@ -1565,8 +1567,9 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { else => |e| diags.addParseError(path, "failed to parse shared library: {s}", .{@errorName(e)}), }; }, - .load_input => |input| if (comp.bin_file) |base| { + .load_input => |input| { comp.remaining_prelink_tasks -= 1; + const base = comp.bin_file orelse return; const prog_node = comp.work_queue_progress_node.start("Linker Parse Input", 0); defer prog_node.end(); base.loadInput(input) catch |err| switch (err) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 9307c23669..dc899e2b56 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1134,7 +1134,7 @@ pub fn updateFunc( ) catch |err| switch (err) { error.CodegenFail => return error.CodegenFail, error.OutOfMemory => return error.OutOfMemory, - error.Overflow => |e| { + error.Overflow, error.RelocationNotByteAligned => |e| { try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, zcu.navSrcLoc(nav_index), @@ -1763,6 +1763,7 @@ fn linkWithLLD(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: man.hash.addOptionalBytes(entry_name); man.hash.add(coff.base.stack_size); man.hash.add(coff.image_base); + man.hash.add(coff.base.build_id); { // TODO remove this, libraries must instead be resolved by the frontend. for (coff.lib_directories) |lib_directory| man.hash.addOptionalBytes(lib_directory.path); @@ -1895,6 +1896,12 @@ fn linkWithLLD(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: } try argv.append(try allocPrint(arena, "-BASE:{d}", .{coff.image_base})); + switch (coff.base.build_id) { + .none => try argv.append("-BUILD-ID:NO"), + .fast => try argv.append("-BUILD-ID"), + .uuid, .sha1, .md5, .hexstring => {}, + } + if (target.cpu.arch == .x86) { try argv.append("-MACHINE:X86"); } else if (target.cpu.arch == .x86_64) { @@ -2104,7 +2111,7 @@ fn linkWithLLD(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: try argv.append(try comp.crtFileAsString(arena, "crt2.obj")); } - try argv.append(try comp.crtFileAsString(arena, "mingw32.lib")); + try argv.append(try comp.crtFileAsString(arena, "libmingw32.lib")); } else { const lib_str = switch (comp.config.link_mode) { .dynamic => "", @@ -3497,9 +3504,9 @@ pub const Relocation = struct { const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12)); const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page)))); var inst = aarch64_util.Instruction{ - .pc_relative_address = mem.bytesToValue(std.meta.TagPayload( + .pc_relative_address = mem.bytesToValue(@FieldType( aarch64_util.Instruction, - aarch64_util.Instruction.pc_relative_address, + @tagName(aarch64_util.Instruction.pc_relative_address), ), buffer[0..4]), }; inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); @@ -3512,18 +3519,18 @@ pub const Relocation = struct { const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr)))); if (isArithmeticOp(buffer[0..4])) { var inst = aarch64_util.Instruction{ - .add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload( + .add_subtract_immediate = mem.bytesToValue(@FieldType( aarch64_util.Instruction, - aarch64_util.Instruction.add_subtract_immediate, + @tagName(aarch64_util.Instruction.add_subtract_immediate), ), buffer[0..4]), }; inst.add_subtract_immediate.imm12 = narrowed; mem.writeInt(u32, buffer[0..4], inst.toU32(), .little); } else { var inst = aarch64_util.Instruction{ - .load_store_register = mem.bytesToValue(std.meta.TagPayload( + .load_store_register = mem.bytesToValue(@FieldType( aarch64_util.Instruction, - aarch64_util.Instruction.load_store_register, + @tagName(aarch64_util.Instruction.load_store_register), ), buffer[0..4]), }; const offset: u12 = blk: { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1b3dcfe8b5..63ef2bda01 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -23,12 +23,11 @@ debug_str: StringSection, pub const UpdateError = error{ ReinterpretDeclRef, Unimplemented, - OutOfMemory, EndOfStream, - Overflow, Underflow, UnexpectedEndOfFile, } || + codegen.GenerateSymbolError || std.fs.File.OpenError || std.fs.File.SetEndPosError || std.fs.File.CopyRangeError || @@ -1439,7 +1438,7 @@ pub const WipNav = struct { debug_info: std.ArrayListUnmanaged(u8), debug_line: std.ArrayListUnmanaged(u8), debug_loclists: std.ArrayListUnmanaged(u8), - pending_lazy: std.ArrayListUnmanaged(InternPool.Index), + pending_lazy: PendingLazy, pub fn deinit(wip_nav: *WipNav) void { const gpa = wip_nav.dwarf.gpa; @@ -1448,7 +1447,8 @@ pub const WipNav = struct { wip_nav.debug_info.deinit(gpa); wip_nav.debug_line.deinit(gpa); wip_nav.debug_loclists.deinit(gpa); - wip_nav.pending_lazy.deinit(gpa); + wip_nav.pending_lazy.types.deinit(gpa); + wip_nav.pending_lazy.values.deinit(gpa); } pub fn genDebugFrame(wip_nav: *WipNav, loc: u32, cfa: Cfa) UpdateError!void { @@ -1835,7 +1835,7 @@ pub const WipNav = struct { if (gop.found_existing) return .{ unit, gop.value_ptr.* }; const entry = try wip_nav.dwarf.addCommonEntry(unit); gop.value_ptr.* = entry; - if (maybe_inst_index == null) try wip_nav.pending_lazy.append(wip_nav.dwarf.gpa, ty.toIntern()); + if (maybe_inst_index == null) try wip_nav.pending_lazy.types.append(wip_nav.dwarf.gpa, ty.toIntern()); return .{ unit, entry }; } @@ -1849,14 +1849,16 @@ pub const WipNav = struct { const ip = &zcu.intern_pool; const ty = value.typeOf(zcu); if (std.debug.runtime_safety) assert(ty.comptimeOnly(zcu) and try ty.onePossibleValue(wip_nav.pt) == null); - if (ty.toIntern() == .type_type) return wip_nav.getTypeEntry(value.toType()); - if (ip.isFunctionType(ty.toIntern()) and !value.isUndef(zcu)) return wip_nav.getNavEntry(zcu.funcInfo(value.toIntern()).owner_nav); + if (!value.isUndef(zcu)) { + if (ty.toIntern() == .type_type) return wip_nav.getTypeEntry(value.toType()); + if (ip.isFunctionType(ty.toIntern())) return wip_nav.getNavEntry(zcu.funcInfo(value.toIntern()).owner_nav); + } const gop = try wip_nav.dwarf.values.getOrPut(wip_nav.dwarf.gpa, value.toIntern()); const unit: Unit.Index = .main; if (gop.found_existing) return .{ unit, gop.value_ptr.* }; const entry = try wip_nav.dwarf.addCommonEntry(unit); gop.value_ptr.* = entry; - try wip_nav.pending_lazy.append(wip_nav.dwarf.gpa, value.toIntern()); + try wip_nav.pending_lazy.values.append(wip_nav.dwarf.gpa, value.toIntern()); return .{ unit, entry }; } @@ -2008,9 +2010,9 @@ pub const WipNav = struct { .decl_const_runtime_bits, .decl_const_comptime_state, .decl_const_runtime_bits_comptime_state, - .decl_empty_func, + .decl_nullary_func, .decl_func, - .decl_empty_func_generic, + .decl_nullary_func_generic, .decl_func_generic, => false, .generic_decl_var, @@ -2052,12 +2054,20 @@ pub const WipNav = struct { try wip_nav.infoSectionOffset(.debug_info, wip_nav.unit, generic_decl_entry, 0); } + const PendingLazy = struct { + types: std.ArrayListUnmanaged(InternPool.Index), + values: std.ArrayListUnmanaged(InternPool.Index), + + const empty: PendingLazy = .{ .types = .empty, .values = .empty }; + }; + fn updateLazy(wip_nav: *WipNav, src_loc: Zcu.LazySrcLoc) UpdateError!void { - const ip = &wip_nav.pt.zcu.intern_pool; - while (wip_nav.pending_lazy.pop()) |val| switch (ip.typeOf(val)) { - .type_type => try wip_nav.dwarf.updateLazyType(wip_nav.pt, src_loc, val, &wip_nav.pending_lazy), - else => try wip_nav.dwarf.updateLazyValue(wip_nav.pt, src_loc, val, &wip_nav.pending_lazy), - }; + while (true) if (wip_nav.pending_lazy.types.pop()) |pending_ty| + try wip_nav.dwarf.updateLazyType(wip_nav.pt, src_loc, pending_ty, &wip_nav.pending_lazy) + else if (wip_nav.pending_lazy.values.pop()) |pending_val| + try wip_nav.dwarf.updateLazyValue(wip_nav.pt, src_loc, pending_val, &wip_nav.pending_lazy) + else + break; } }; @@ -2626,8 +2636,8 @@ pub fn finishWipNavFunc( abbrev_code_buf, try dwarf.refAbbrevCode(switch (abbrev_code) { else => unreachable, - .decl_func => .decl_empty_func, - .decl_instance_func => .decl_instance_empty_func, + .decl_func => .decl_nullary_func, + .decl_instance_func => .decl_instance_nullary_func, }), ); } @@ -3012,29 +3022,34 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo if (nav_gop.found_existing) switch (try dwarf.debug_info.declAbbrevCode(wip_nav.unit, nav_gop.value_ptr.*)) { .null => {}, else => unreachable, - .decl_empty_func, .decl_func, .decl_instance_empty_func, .decl_instance_func => return, - .decl_empty_func_generic, + .decl_nullary_func, .decl_func, .decl_instance_nullary_func, .decl_instance_func => return, + .decl_nullary_func_generic, .decl_func_generic, - .decl_instance_empty_func_generic, + .decl_instance_nullary_func_generic, .decl_instance_func_generic, => dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear(), } else nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); wip_nav.entry = nav_gop.value_ptr.*; const func_type = ip.indexToKey(func.ty).func_type; + const is_nullary = !func_type.is_var_args and for (0..func_type.param_types.len) |param_index| { + if (!func_type.paramIsComptime(std.math.cast(u5, param_index) orelse break false)) break false; + } else true; const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.declCommon(if (func_type.param_types.len > 0 or func_type.is_var_args) .{ + try wip_nav.declCommon(if (is_nullary) .{ + .decl = .decl_nullary_func_generic, + .generic_decl = .generic_decl_func, + .decl_instance = .decl_instance_nullary_func_generic, + } else .{ .decl = .decl_func_generic, .generic_decl = .generic_decl_func, .decl_instance = .decl_instance_func_generic, - } else .{ - .decl = .decl_empty_func_generic, - .generic_decl = .generic_decl_func, - .decl_instance = .decl_instance_empty_func_generic, }, &nav, inst_info.file, &decl); try wip_nav.refType(.fromInterned(func_type.return_type)); - if (func_type.param_types.len > 0 or func_type.is_var_args) { + if (!is_nullary) { for (0..func_type.param_types.len) |param_index| { + if (std.math.cast(u5, param_index)) |small_param_index| + if (func_type.paramIsComptime(small_param_index)) continue; try wip_nav.abbrevCode(.func_type_param); try wip_nav.refType(.fromInterned(func_type.param_types.get(ip)[param_index])); } @@ -3129,7 +3144,7 @@ fn updateLazyType( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, type_index: InternPool.Index, - pending_lazy: *std.ArrayListUnmanaged(InternPool.Index), + pending_lazy: *WipNav.PendingLazy, ) UpdateError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -3568,12 +3583,14 @@ fn updateLazyType( }; try diw.writeByte(@intFromEnum(cc)); try wip_nav.refType(.fromInterned(func_type.return_type)); - for (0..func_type.param_types.len) |param_index| { - try wip_nav.abbrevCode(.func_type_param); - try wip_nav.refType(.fromInterned(func_type.param_types.get(ip)[param_index])); + if (!is_nullary) { + for (0..func_type.param_types.len) |param_index| { + try wip_nav.abbrevCode(.func_type_param); + try wip_nav.refType(.fromInterned(func_type.param_types.get(ip)[param_index])); + } + if (func_type.is_var_args) try wip_nav.abbrevCode(.is_var_args); + try uleb128(diw, @intFromEnum(AbbrevCode.null)); } - if (func_type.is_var_args) try wip_nav.abbrevCode(.is_var_args); - if (!is_nullary) try uleb128(diw, @intFromEnum(AbbrevCode.null)); }, .error_set_type => |error_set_type| { try wip_nav.abbrevCode(if (error_set_type.names.len == 0) .generated_empty_enum_type else .generated_enum_type); @@ -3629,7 +3646,7 @@ fn updateLazyValue( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, value_index: InternPool.Index, - pending_lazy: *std.ArrayListUnmanaged(InternPool.Index), + pending_lazy: *WipNav.PendingLazy, ) UpdateError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -4787,9 +4804,9 @@ const AbbrevCode = enum { decl_const_runtime_bits, decl_const_comptime_state, decl_const_runtime_bits_comptime_state, - decl_empty_func, + decl_nullary_func, decl_func, - decl_empty_func_generic, + decl_nullary_func_generic, decl_func_generic, generic_decl_var, generic_decl_const, @@ -4806,9 +4823,9 @@ const AbbrevCode = enum { decl_instance_const_runtime_bits, decl_instance_const_comptime_state, decl_instance_const_runtime_bits_comptime_state, - decl_instance_empty_func, + decl_instance_nullary_func, decl_instance_func, - decl_instance_empty_func_generic, + decl_instance_nullary_func_generic, decl_instance_func_generic, // the rest are unrestricted other than empty variants must not be longer // than the non-empty variant, and so should appear first @@ -5019,7 +5036,7 @@ const AbbrevCode = enum { .{ .ZIG_comptime_value, .ref_addr }, }, }, - .decl_empty_func = .{ + .decl_nullary_func = .{ .tag = .subprogram, .attrs = decl_abbrev_common_attrs ++ .{ .{ .linkage_name, .strp }, @@ -5044,7 +5061,7 @@ const AbbrevCode = enum { .{ .noreturn, .flag }, }, }, - .decl_empty_func_generic = .{ + .decl_nullary_func_generic = .{ .tag = .subprogram, .attrs = decl_abbrev_common_attrs ++ .{ .{ .type, .ref_addr }, @@ -5167,7 +5184,7 @@ const AbbrevCode = enum { .{ .ZIG_comptime_value, .ref_addr }, }, }, - .decl_instance_empty_func = .{ + .decl_instance_nullary_func = .{ .tag = .subprogram, .attrs = decl_instance_abbrev_common_attrs ++ .{ .{ .linkage_name, .strp }, @@ -5192,7 +5209,7 @@ const AbbrevCode = enum { .{ .noreturn, .flag }, }, }, - .decl_instance_empty_func_generic = .{ + .decl_instance_nullary_func_generic = .{ .tag = .subprogram, .attrs = decl_instance_abbrev_common_attrs ++ .{ .{ .type, .ref_addr }, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 53f88101b1..591786cfbc 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1596,8 +1596,8 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s man.hash.addListOfBytes(self.rpath_table.keys()); if (output_mode == .Exe) { man.hash.add(self.base.stack_size); - man.hash.add(self.base.build_id); } + man.hash.add(self.base.build_id); man.hash.addListOfBytes(self.symbol_wrap_set.keys()); man.hash.add(comp.skip_linker_dependencies); man.hash.add(self.z_nodelete); @@ -1753,20 +1753,14 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s }); } - if (is_exe_or_dyn_lib) { - switch (self.base.build_id) { - .none => {}, - .fast, .uuid, .sha1, .md5 => { - try argv.append(try std.fmt.allocPrint(arena, "--build-id={s}", .{ - @tagName(self.base.build_id), - })); - }, - .hexstring => |hs| { - try argv.append(try std.fmt.allocPrint(arena, "--build-id=0x{s}", .{ - std.fmt.fmtSliceHexLower(hs.toSlice()), - })); - }, - } + switch (self.base.build_id) { + .none => try argv.append("--build-id=none"), + .fast, .uuid, .sha1, .md5 => try argv.append(try std.fmt.allocPrint(arena, "--build-id={s}", .{ + @tagName(self.base.build_id), + })), + .hexstring => |hs| try argv.append(try std.fmt.allocPrint(arena, "--build-id=0x{s}", .{ + std.fmt.fmtSliceHexLower(hs.toSlice()), + })), } try argv.append(try std.fmt.allocPrint(arena, "--image-base={d}", .{self.image_base})); diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 33a4663e79..cb145f772c 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -1783,9 +1783,9 @@ const aarch64 = struct { aarch64_util.writeAddImmInst(off, code); } else { const old_inst: Instruction = .{ - .add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload( + .add_subtract_immediate = mem.bytesToValue(@FieldType( Instruction, - Instruction.add_subtract_immediate, + @tagName(Instruction.add_subtract_immediate), ), code), }; const rd: Register = @enumFromInt(old_inst.add_subtract_immediate.rd); @@ -1797,9 +1797,9 @@ const aarch64 = struct { .TLSDESC_CALL => if (!target.flags.has_tlsdesc) { const old_inst: Instruction = .{ - .unconditional_branch_register = mem.bytesToValue(std.meta.TagPayload( + .unconditional_branch_register = mem.bytesToValue(@FieldType( Instruction, - Instruction.unconditional_branch_register, + @tagName(Instruction.unconditional_branch_register), ), code), }; const rn: Register = @enumFromInt(old_inst.unconditional_branch_register.rn); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 5afb33826c..86abc5e7ae 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1937,9 +1937,14 @@ pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, e const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; - // This only works if this atom is the only atom in the output section. In - // every other case, we need to redo the prev/next links. - if (last_atom_ref.eql(atom_ptr.ref())) last_atom_ref.* = .{}; + if (last_atom_ref.eql(atom_ptr.ref())) { + if (atom_ptr.prevAtom(elf_file)) |prev_atom| { + prev_atom.next_atom_ref = .{}; + last_atom_ref.* = prev_atom.ref(); + } else { + last_atom_ref.* = .{}; + } + } const alloc_res = try elf_file.allocateChunk(.{ .shndx = atom_ptr.output_section_index, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 2b4bc3e557..834917f81d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -416,7 +416,7 @@ pub fn flushModule( } if (comp.config.any_fuzz) { - try positionals.append(try link.openObjectInput(diags, comp.fuzzer_lib.?.full_object_path)); + try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false)); } if (comp.ubsan_rt_lib) |crt_file| { diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index ab3b54bf1b..baa9e6172c 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -794,9 +794,9 @@ fn resolveRelocInner( aarch64.writeAddImmInst(@truncate(target), inst_code); } else { var inst = aarch64.Instruction{ - .load_store_register = mem.bytesToValue(std.meta.TagPayload( + .load_store_register = mem.bytesToValue(@FieldType( aarch64.Instruction, - aarch64.Instruction.load_store_register, + @tagName(aarch64.Instruction.load_store_register), ), inst_code), }; inst.load_store_register.offset = switch (inst.load_store_register.size) { @@ -843,9 +843,9 @@ fn resolveRelocInner( const inst_code = code[rel_offset..][0..4]; const reg_info: RegInfo = blk: { if (aarch64.isArithmeticOp(inst_code)) { - const inst = mem.bytesToValue(std.meta.TagPayload( + const inst = mem.bytesToValue(@FieldType( aarch64.Instruction, - aarch64.Instruction.add_subtract_immediate, + @tagName(aarch64.Instruction.add_subtract_immediate), ), inst_code); break :blk .{ .rd = inst.rd, @@ -853,9 +853,9 @@ fn resolveRelocInner( .size = inst.sf, }; } else { - const inst = mem.bytesToValue(std.meta.TagPayload( + const inst = mem.bytesToValue(@FieldType( aarch64.Instruction, - aarch64.Instruction.load_store_register, + @tagName(aarch64.Instruction.load_store_register), ), inst_code); break :blk .{ .rd = inst.rt, diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index ab82bb9de8..b035d8af09 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -53,6 +53,7 @@ pub fn createEmpty( .tag = .nvptx, .comp = comp, .emit = emit, + .zcu_object_sub_path = emit.sub_path, .gc_sections = options.gc_sections orelse false, .print_gc_sections = options.print_gc_sections, .stack_size = options.stack_size orelse 0, @@ -116,11 +117,7 @@ pub fn flushModule(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_n if (build_options.skip_non_native) @panic("Attempted to compile for architecture that was disabled by build configuration"); - // The code that was here before mutated the Compilation's file emission mechanism. - // That's not supposed to happen in flushModule, so I deleted the code. - _ = arena; - _ = self; - _ = prog_node; _ = tid; - @panic("TODO: rewrite the NvPtx.flushModule function"); + + try self.base.emitLlvmObject(arena, self.llvm_object, prog_node); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 3643166e51..c09dc17c67 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -1090,7 +1090,9 @@ fn updateLazySymbolAtom( ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.CodegenFail => return error.LinkFailure, - error.Overflow => return diags.fail("codegen failure: encountered number too big for compiler", .{}), + error.Overflow, + error.RelocationNotByteAligned, + => return diags.fail("unable to codegen: {s}", .{@errorName(err)}), }; const code = code_buffer.items; // duped_code is freed when the atom is freed diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index dda48b09d5..da722b0531 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -4067,6 +4067,17 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: try std.fmt.allocPrint(arena, "stack-size={d}", .{wasm.base.stack_size}), }); + switch (wasm.base.build_id) { + .none => try argv.append("--build-id=none"), + .fast, .uuid, .sha1 => try argv.append(try std.fmt.allocPrint(arena, "--build-id={s}", .{ + @tagName(wasm.base.build_id), + })), + .hexstring => |hs| try argv.append(try std.fmt.allocPrint(arena, "--build-id=0x{s}", .{ + std.fmt.fmtSliceHexLower(hs.toSlice()), + })), + .md5 => {}, + } + if (wasm.import_symbols) { try argv.append("--allow-undefined"); } @@ -4078,11 +4089,6 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: try argv.append("--pie"); } - // XXX - TODO: add when wasm-ld supports --build-id. - // if (wasm.base.build_id) { - // try argv.append("--build-id=tree"); - // } - try argv.appendSlice(&.{ "-o", full_out_path }); if (target.cpu.arch == .wasm64) { @@ -4611,10 +4617,13 @@ fn convertZcuFnType( try params_buffer.append(gpa, .i32); // memory address is always a 32-bit handle } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) { if (cc == .wasm_mvp) { - const res_classes = abi.classifyType(return_type, zcu); - assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, zcu); - try returns_buffer.append(gpa, CodeGen.typeToValtype(scalar_type, zcu, target)); + switch (abi.classifyType(return_type, zcu)) { + .direct => |scalar_ty| { + assert(!abi.lowerAsDoubleI64(scalar_ty, zcu)); + try returns_buffer.append(gpa, CodeGen.typeToValtype(scalar_ty, zcu, target)); + }, + .indirect => unreachable, + } } else { try returns_buffer.append(gpa, CodeGen.typeToValtype(return_type, zcu, target)); } @@ -4629,18 +4638,16 @@ fn convertZcuFnType( switch (cc) { .wasm_mvp => { - const param_classes = abi.classifyType(param_type, zcu); - if (param_classes[1] == .none) { - if (param_classes[0] == .direct) { - const scalar_type = abi.scalarType(param_type, zcu); - try params_buffer.append(gpa, CodeGen.typeToValtype(scalar_type, zcu, target)); - } else { - try params_buffer.append(gpa, CodeGen.typeToValtype(param_type, zcu, target)); - } - } else { - // i128/f128 - try params_buffer.append(gpa, .i64); - try params_buffer.append(gpa, .i64); + switch (abi.classifyType(param_type, zcu)) { + .direct => |scalar_ty| { + if (!abi.lowerAsDoubleI64(scalar_ty, zcu)) { + try params_buffer.append(gpa, CodeGen.typeToValtype(scalar_ty, zcu, target)); + } else { + try params_buffer.append(gpa, .i64); + try params_buffer.append(gpa, .i64); + } + }, + .indirect => try params_buffer.append(gpa, CodeGen.typeToValtype(param_type, zcu, target)), } }, else => try params_buffer.append(gpa, CodeGen.typeToValtype(param_type, zcu, target)), diff --git a/src/link/aarch64.zig b/src/link/aarch64.zig index 8b79810c8d..d86939a156 100644 --- a/src/link/aarch64.zig +++ b/src/link/aarch64.zig @@ -5,9 +5,9 @@ pub inline fn isArithmeticOp(inst: *const [4]u8) bool { pub fn writeAddImmInst(value: u12, code: *[4]u8) void { var inst = Instruction{ - .add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload( + .add_subtract_immediate = mem.bytesToValue(@FieldType( Instruction, - Instruction.add_subtract_immediate, + @tagName(Instruction.add_subtract_immediate), ), code), }; inst.add_subtract_immediate.imm12 = value; @@ -16,9 +16,9 @@ pub fn writeAddImmInst(value: u12, code: *[4]u8) void { pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void { var inst: Instruction = .{ - .load_store_register = mem.bytesToValue(std.meta.TagPayload( + .load_store_register = mem.bytesToValue(@FieldType( Instruction, - Instruction.load_store_register, + @tagName(Instruction.load_store_register), ), code), }; inst.load_store_register.offset = value; @@ -34,9 +34,9 @@ pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 { pub fn writeAdrpInst(pages: u21, code: *[4]u8) void { var inst = Instruction{ - .pc_relative_address = mem.bytesToValue(std.meta.TagPayload( + .pc_relative_address = mem.bytesToValue(@FieldType( Instruction, - Instruction.pc_relative_address, + @tagName(Instruction.pc_relative_address), ), code), }; inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2)); @@ -46,9 +46,9 @@ pub fn writeAdrpInst(pages: u21, code: *[4]u8) void { pub fn writeBranchImm(disp: i28, code: *[4]u8) void { var inst = Instruction{ - .unconditional_branch_immediate = mem.bytesToValue(std.meta.TagPayload( + .unconditional_branch_immediate = mem.bytesToValue(@FieldType( Instruction, - Instruction.unconditional_branch_immediate, + @tagName(Instruction.unconditional_branch_immediate), ), code), }; inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2)))); diff --git a/src/link/riscv.zig b/src/link/riscv.zig index bb16cb7d80..31f26b7287 100644 --- a/src/link/riscv.zig +++ b/src/link/riscv.zig @@ -50,27 +50,27 @@ pub fn writeAddend( } pub fn writeInstU(code: *[4]u8, value: u32) void { - var data: Instruction = .{ .U = mem.bytesToValue(std.meta.TagPayload(Instruction, .U), code) }; + var data: Instruction = .{ .U = mem.bytesToValue(@FieldType(Instruction, "U"), code) }; const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800); data.U.imm12_31 = bitSlice(compensated, 31, 12); mem.writeInt(u32, code, data.toU32(), .little); } pub fn writeInstI(code: *[4]u8, value: u32) void { - var data: Instruction = .{ .I = mem.bytesToValue(std.meta.TagPayload(Instruction, .I), code) }; + var data: Instruction = .{ .I = mem.bytesToValue(@FieldType(Instruction, "I"), code) }; data.I.imm0_11 = bitSlice(value, 11, 0); mem.writeInt(u32, code, data.toU32(), .little); } pub fn writeInstS(code: *[4]u8, value: u32) void { - var data: Instruction = .{ .S = mem.bytesToValue(std.meta.TagPayload(Instruction, .S), code) }; + var data: Instruction = .{ .S = mem.bytesToValue(@FieldType(Instruction, "S"), code) }; data.S.imm0_4 = bitSlice(value, 4, 0); data.S.imm5_11 = bitSlice(value, 11, 5); mem.writeInt(u32, code, data.toU32(), .little); } pub fn writeInstJ(code: *[4]u8, value: u32) void { - var data: Instruction = .{ .J = mem.bytesToValue(std.meta.TagPayload(Instruction, .J), code) }; + var data: Instruction = .{ .J = mem.bytesToValue(@FieldType(Instruction, "J"), code) }; data.J.imm1_10 = bitSlice(value, 10, 1); data.J.imm11 = bitSlice(value, 11, 11); data.J.imm12_19 = bitSlice(value, 19, 12); @@ -79,7 +79,7 @@ pub fn writeInstJ(code: *[4]u8, value: u32) void { } pub fn writeInstB(code: *[4]u8, value: u32) void { - var data: Instruction = .{ .B = mem.bytesToValue(std.meta.TagPayload(Instruction, .B), code) }; + var data: Instruction = .{ .B = mem.bytesToValue(@FieldType(Instruction, "B"), code) }; data.B.imm1_4 = bitSlice(value, 4, 1); data.B.imm5_10 = bitSlice(value, 10, 5); data.B.imm11 = bitSlice(value, 11, 11); diff --git a/src/main.zig b/src/main.zig index 1075993846..911e1dc1af 100644 --- a/src/main.zig +++ b/src/main.zig @@ -468,8 +468,6 @@ const usage_build_generic = \\ -fno-dll-export-fns Force-disable marking exported functions as DLL exports \\ -freference-trace[=num] Show num lines of reference trace per compile error \\ -fno-reference-trace Disable reference trace - \\ -fbuiltin Enable implicit builtin knowledge of functions - \\ -fno-builtin Disable implicit builtin knowledge of functions \\ -ffunction-sections Places each function in a separate section \\ -fno-function-sections All functions go into same section \\ -fdata-sections Places each data in a separate section @@ -499,9 +497,18 @@ const usage_build_generic = \\ hex (planned feature) Intel IHEX \\ raw (planned feature) Dump machine code directly \\ -mcpu [cpu] Specify target CPU and feature set - \\ -mcmodel=[default|tiny| Limit range of code and data virtual addresses - \\ small|kernel| - \\ medium|large] + \\ -mcmodel=[model] Limit range of code and data virtual addresses + \\ default + \\ extreme + \\ kernel + \\ large + \\ medany + \\ medium + \\ medlow + \\ medmid + \\ normal + \\ small + \\ tiny \\ -mred-zone Force-enable the "red-zone" \\ -mno-red-zone Force-disable the "red-zone" \\ -fomit-frame-pointer Omit the stack frame pointer @@ -520,6 +527,8 @@ const usage_build_generic = \\ -fno-sanitize-thread Disable Thread Sanitizer \\ -ffuzz Enable fuzz testing instrumentation \\ -fno-fuzz Disable fuzz testing instrumentation + \\ -fbuiltin Enable implicit builtin knowledge of functions + \\ -fno-builtin Disable implicit builtin knowledge of functions \\ -funwind-tables Always produce unwind table entries for all functions \\ -fasync-unwind-tables Always produce asynchronous unwind table entries for all functions \\ -fno-unwind-tables Never produce unwind table entries @@ -3065,6 +3074,12 @@ fn buildOutputType( const target = main_mod.resolved_target.result; + if (target.cpu.arch.isNvptx()) { + if (emit_bin != .no and create_module.resolved_options.use_llvm) { + fatal("cannot emit PTX binary with the LLVM backend; only '-femit-asm' is supported", .{}); + } + } + if (target.os.tag == .windows and major_subsystem_version == null and minor_subsystem_version == null) { major_subsystem_version, minor_subsystem_version = switch (target.os.version_range.windows.min) { .nt4 => .{ 4, 0 }, @@ -4043,7 +4058,7 @@ fn createModule( }; } - if (builtin.target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and + if (target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and any_name_queries_remaining) { if (create_module.libc_installation == null) { @@ -4054,11 +4069,10 @@ fn createModule( }) catch |err| { fatal("unable to find native libc installation: {s}", .{@errorName(err)}); }; - - try create_module.lib_directories.ensureUnusedCapacity(arena, 2); - addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?); - addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?); } + try create_module.lib_directories.ensureUnusedCapacity(arena, 2); + addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?); + addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?); } // Destructively mutates but does not transfer ownership of `unresolved_link_inputs`. diff --git a/src/mingw.zig b/src/mingw.zig index d24921909c..fbedf638bf 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -14,7 +14,7 @@ const dev = @import("dev.zig"); pub const CrtFile = enum { crt2_o, dllcrt2_o, - mingw32_lib, + libmingw32_lib, }; /// TODO replace anyerror with explicit error set, recording user-friendly errors with @@ -69,7 +69,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre }); }, - .mingw32_lib => { + .libmingw32_lib => { var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena); { @@ -173,7 +173,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre } } - return comp.build_crt_file("mingw32", .Lib, .@"mingw-w64 mingw32.lib", prog_node, c_source_files.items, .{ + return comp.build_crt_file("libmingw32", .Lib, .@"mingw-w64 libmingw32.lib", prog_node, c_source_files.items, .{ .unwind_tables = unwind_tables, // https://github.com/llvm/llvm-project/issues/43698#issuecomment-2542660611 .allow_lto = false, diff --git a/src/register_manager.zig b/src/register_manager.zig index 5621c8f750..90fe09980a 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -14,14 +14,7 @@ const link = @import("link.zig"); const log = std.log.scoped(.register_manager); -pub const AllocationError = error{ - OutOfRegisters, - OutOfMemory, - /// Compiler was asked to operate on a number larger than supported. - Overflow, - /// Indicates the error is already stored in `failed_codegen` on the Zcu. - CodegenFail, -}; +pub const AllocationError = @import("codegen.zig").CodeGenError || error{OutOfRegisters}; pub fn RegisterManager( comptime Function: type, diff --git a/src/target.zig b/src/target.zig index 76eec4fa6e..611fbf2301 100644 --- a/src/target.zig +++ b/src/target.zig @@ -474,6 +474,53 @@ pub fn arePointersLogical(target: std.Target, as: AddressSpace) bool { }; } +pub fn isDynamicAMDGCNFeature(target: std.Target, feature: std.Target.Cpu.Feature) bool { + if (target.cpu.arch != .amdgcn) return false; + + const sramecc_only = &[_]*const std.Target.Cpu.Model{ + &std.Target.amdgcn.cpu.gfx1010, + &std.Target.amdgcn.cpu.gfx1011, + &std.Target.amdgcn.cpu.gfx1012, + &std.Target.amdgcn.cpu.gfx1013, + }; + const xnack_or_sramecc = &[_]*const std.Target.Cpu.Model{ + &std.Target.amdgcn.cpu.gfx1030, + &std.Target.amdgcn.cpu.gfx1031, + &std.Target.amdgcn.cpu.gfx1032, + &std.Target.amdgcn.cpu.gfx1033, + &std.Target.amdgcn.cpu.gfx1034, + &std.Target.amdgcn.cpu.gfx1035, + &std.Target.amdgcn.cpu.gfx1036, + &std.Target.amdgcn.cpu.gfx1100, + &std.Target.amdgcn.cpu.gfx1101, + &std.Target.amdgcn.cpu.gfx1102, + &std.Target.amdgcn.cpu.gfx1103, + &std.Target.amdgcn.cpu.gfx1150, + &std.Target.amdgcn.cpu.gfx1151, + &std.Target.amdgcn.cpu.gfx1152, + &std.Target.amdgcn.cpu.gfx1200, + &std.Target.amdgcn.cpu.gfx1201, + }; + const feature_tag: std.Target.amdgcn.Feature = @enumFromInt(feature.index); + + if (feature_tag == .sramecc) { + if (std.mem.indexOfScalar( + *const std.Target.Cpu.Model, + sramecc_only ++ xnack_or_sramecc, + target.cpu.model, + )) |_| return true; + } + if (feature_tag == .xnack) { + if (std.mem.indexOfScalar( + *const std.Target.Cpu.Model, + xnack_or_sramecc, + target.cpu.model, + )) |_| return true; + } + + return false; +} + pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 { // LLD does not support ELFv1. Rather than having LLVM produce ELFv1 code and then linking it // into a broken ELFv2 binary, just force LLVM to use ELFv2 as well. This will break when glibc diff --git a/src/translate_c.zig b/src/translate_c.zig index 19a2fde13c..dda2ee8e2e 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -325,7 +325,7 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void { fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void { switch (decl.getKind()) { .Function => { - return visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); + return transFnDecl(c, &c.global_scope.base, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, .Typedef => { try transTypeDef(c, &c.global_scope.base, @as(*const clang.TypedefNameDecl, @ptrCast(decl))); @@ -367,7 +367,7 @@ fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.Fi try scope.appendNode(comptime_node); } -fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { +fn transFnDecl(c: *Context, scope: *Scope, fn_decl: *const clang.FunctionDecl) Error!void { const fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin()); if (c.global_scope.sym_table.contains(fn_name)) return; // Avoid processing this decl twice @@ -375,7 +375,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { // Skip this declaration if a proper definition exists if (!fn_decl.isThisDeclarationADefinition()) { if (fn_decl.getDefinition()) |def| - return visitFnDecl(c, def); + return transFnDecl(c, scope, def); } const fn_decl_loc = fn_decl.getLocation(); @@ -403,22 +403,26 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { switch (fn_type.getTypeClass()) { .Attributed => { - const attr_type = @as(*const clang.AttributedType, @ptrCast(fn_type)); + const attr_type: *const clang.AttributedType = @ptrCast(fn_type); fn_qt = attr_type.getEquivalentType(); }, .Paren => { - const paren_type = @as(*const clang.ParenType, @ptrCast(fn_type)); + const paren_type: *const clang.ParenType = @ptrCast(fn_type); fn_qt = paren_type.getInnerType(); }, + .MacroQualified => { + const macroqualified_ty: *const clang.MacroQualifiedType = @ptrCast(fn_type); + fn_qt = macroqualified_ty.getModifiedType(); + }, else => break fn_type, } }; - const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_type)); + const fn_ty: *const clang.FunctionType = @ptrCast(fn_type); const return_qt = fn_ty.getReturnType(); const proto_node = switch (fn_type.getTypeClass()) { .FunctionProto => blk: { - const fn_proto_type = @as(*const clang.FunctionProtoType, @ptrCast(fn_type)); + const fn_proto_type: *const clang.FunctionProtoType = @ptrCast(fn_type); if (has_body and fn_proto_type.isVariadic()) { decl_ctx.has_body = false; decl_ctx.storage_class = .Extern; @@ -434,7 +438,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { }; }, .FunctionNoProto => blk: { - const fn_no_proto_type = @as(*const clang.FunctionType, @ptrCast(fn_type)); + const fn_no_proto_type: *const clang.FunctionType = @ptrCast(fn_type); break :blk transFnNoProto(c, fn_no_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) { error.UnsupportedType => { return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{}); @@ -446,6 +450,9 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { }; if (!decl_ctx.has_body) { + if (scope.id != .root) { + return addLocalExternFnDecl(c, scope, fn_name, Node.initPayload(&proto_node.base)); + } return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base)); } @@ -455,7 +462,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { block_scope.return_type = return_qt; defer block_scope.deinit(); - const scope = &block_scope.base; + const top_scope = &block_scope.base; var param_id: c_uint = 0; for (proto_node.data.params) |*param| { @@ -487,7 +494,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { param_id += 1; } - const casted_body = @as(*const clang.CompoundStmt, @ptrCast(body_stmt)); + const casted_body: *const clang.CompoundStmt = @ptrCast(body_stmt); transCompoundStmtInline(c, casted_body, &block_scope) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.UnsupportedTranslation, @@ -508,7 +515,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void { break :blk; } - const rhs = transZeroInitExpr(c, scope, fn_decl_loc, return_qt.getTypePtr()) catch |err| switch (err) { + const rhs = transZeroInitExpr(c, top_scope, fn_decl_loc, return_qt.getTypePtr()) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.UnsupportedTranslation, error.UnsupportedType, @@ -1874,7 +1881,7 @@ fn transDeclStmtOne( try transEnumDecl(c, scope, @as(*const clang.EnumDecl, @ptrCast(decl))); }, .Function => { - try visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl))); + try transFnDecl(c, scope, @as(*const clang.FunctionDecl, @ptrCast(decl))); }, else => { const decl_name = try c.str(decl.getDeclKindName()); @@ -1903,11 +1910,19 @@ fn transDeclRefExpr( const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(value_decl)).getName_bytes_begin()); const mangled_name = scope.getAlias(name); const decl_is_var = @as(*const clang.Decl, @ptrCast(value_decl)).getKind() == .Var; - const potential_local_extern = if (decl_is_var) ((@as(*const clang.VarDecl, @ptrCast(value_decl)).getStorageClass() == .Extern) and (scope.id != .root)) else false; + const storage_class = @as(*const clang.VarDecl, @ptrCast(value_decl)).getStorageClass(); + const potential_local_extern = if (decl_is_var) ((storage_class == .Extern) and (scope.id != .root)) else false; var confirmed_local_extern = false; + var confirmed_local_extern_fn = false; var ref_expr = val: { if (cIsFunctionDeclRef(@as(*const clang.Expr, @ptrCast(expr)))) { + if (scope.id != .root) { + if (scope.getLocalExternAlias(name)) |v| { + confirmed_local_extern_fn = true; + break :val try Tag.identifier.create(c.arena, v); + } + } break :val try Tag.fn_identifier.create(c.arena, mangled_name); } else if (potential_local_extern) { if (scope.getLocalExternAlias(name)) |v| { @@ -1934,6 +1949,11 @@ fn transDeclRefExpr( .field_name = name, // by necessity, name will always == mangled_name }); } + } else if (confirmed_local_extern_fn) { + ref_expr = try Tag.field_access.create(c.arena, .{ + .lhs = ref_expr, + .field_name = name, // by necessity, name will always == mangled_name + }); } scope.skipVariableDiscard(mangled_name); return ref_expr; @@ -1964,7 +1984,14 @@ fn transImplicitCastExpr( return maybeSuppressResult(c, result_used, sub_expr_node); } - const addr = try Tag.address_of.create(c.arena, sub_expr_node); + const index_val = try Tag.integer_literal.create(c.arena, "0"); + const index = try Tag.as.create(c.arena, .{ + .lhs = try Tag.type.create(c.arena, "usize"), + .rhs = try Tag.int_cast.create(c.arena, index_val), + }); + const array0_node = try Tag.array_access.create(c.arena, .{ .lhs = sub_expr_node, .rhs = index }); + // Convert array to pointer by expression: addr = &sub_expr[0] + const addr = try Tag.address_of.create(c.arena, array0_node); const casted = try transCPtrCast(c, scope, expr.getBeginLoc(), dest_type, src_type, addr); return maybeSuppressResult(c, result_used, casted); }, @@ -4206,6 +4233,23 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void { } } +/// Add an "extern" function prototype declaration that's been declared within a scoped block. +/// Similar to static local variables, this will be wrapped in a struct to work with Zig's syntax requirements. +/// +fn addLocalExternFnDecl(c: *Context, scope: *Scope, name: []const u8, decl_node: Node) !void { + const bs: *Scope.Block = try scope.findBlockScope(c); + + // Special naming convention for local extern function wrapper struct, + // this named "ExternLocal_[name]". + const struct_name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ Scope.Block.extern_inner_prepend, name }); + + // Outer Node for the wrapper struct + const node = try Tag.extern_local_fn.create(c.arena, .{ .name = struct_name, .init = decl_node }); + + try bs.statements.append(node); + try bs.discardVariable(c, struct_name); +} + fn transQualTypeInitializedStringLiteral(c: *Context, elem_ty: Node, string_lit: *const clang.StringLiteral) TypeError!Node { const string_lit_size = string_lit.getLength(); const array_size = @as(usize, @intCast(string_lit_size)); @@ -5186,6 +5230,7 @@ const MacroCtx = struct { loc: clang.SourceLocation, name: []const u8, refs_var_decl: bool = false, + fn_params: ?[]const ast.Payload.Param = null, fn peek(self: *MacroCtx) ?CToken.Id { if (self.i >= self.list.len) return null; @@ -5258,6 +5303,15 @@ const MacroCtx = struct { } return null; } + + fn checkFnParam(self: *MacroCtx, str: []const u8) bool { + if (self.fn_params == null) return false; + + for (self.fn_params.?) |param| { + if (mem.eql(u8, param.name.?, str)) return true; + } + return false; + } }; fn getMacroText(unit: *const clang.ASTUnit, c: *const Context, macro: *const clang.MacroDefinitionRecord) ![]const u8 { @@ -5434,10 +5488,9 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void { defer fn_params.deinit(); while (true) { - switch (m.peek().?) { - .identifier, .extended_identifier => _ = m.next(), - else => break, - } + if (!m.peek().?.isMacroIdentifier()) break; + + _ = m.next(); const mangled_name = try block_scope.makeMangledName(c, m.slice()); try fn_params.append(.{ @@ -5450,6 +5503,8 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void { _ = m.next(); } + m.fn_params = fn_params.items; + try m.skip(c, .r_paren); if (m.checkTranslatableMacro(scope, fn_params.items)) |err| switch (err) { @@ -5865,38 +5920,41 @@ fn parseCPrimaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { .pp_num => { return parseCNumLit(c, m); }, - .identifier, .extended_identifier => { - if (c.global_scope.blank_macros.contains(slice)) { - return parseCPrimaryExpr(c, m, scope); - } - const mangled_name = scope.getAlias(slice); - if (builtin_typedef_map.get(mangled_name)) |ty| return Tag.type.create(c.arena, ty); - const identifier = try Tag.identifier.create(c.arena, mangled_name); - scope.skipVariableDiscard(identifier.castTag(.identifier).?.data); - refs_var: { - const ident_node = c.global_scope.sym_table.get(slice) orelse break :refs_var; - const var_decl_node = ident_node.castTag(.var_decl) orelse break :refs_var; - if (!var_decl_node.data.is_const) m.refs_var_decl = true; - } - return identifier; - }, .l_paren => { const inner_node = try parseCExpr(c, m, scope); try m.skip(c, .r_paren); return inner_node; }, - else => { - // for handling type macros (EVIL) - // TODO maybe detect and treat type macros as typedefs in parseCSpecifierQualifierList? - m.i -= 1; - if (try parseCTypeName(c, m, scope, true)) |type_name| { - return type_name; - } - try m.fail(c, "unable to translate C expr: unexpected token '{s}'", .{tok.symbol()}); - return error.ParseError; - }, + else => {}, } + + // The C preprocessor has no knowledge of C, so C keywords aren't special in macros. + // Thus the current token should be treated like an identifier if its name matches a parameter. + if (tok == .identifier or tok == .extended_identifier or m.checkFnParam(slice)) { + if (c.global_scope.blank_macros.contains(slice)) { + return parseCPrimaryExpr(c, m, scope); + } + const mangled_name = scope.getAlias(slice); + if (builtin_typedef_map.get(mangled_name)) |ty| return Tag.type.create(c.arena, ty); + const identifier = try Tag.identifier.create(c.arena, mangled_name); + scope.skipVariableDiscard(identifier.castTag(.identifier).?.data); + refs_var: { + const ident_node = c.global_scope.sym_table.get(slice) orelse break :refs_var; + const var_decl_node = ident_node.castTag(.var_decl) orelse break :refs_var; + if (!var_decl_node.data.is_const) m.refs_var_decl = true; + } + return identifier; + } + + // for handling type macros (EVIL) + // TODO maybe detect and treat type macros as typedefs in parseCSpecifierQualifierList? + m.i -= 1; + if (try parseCTypeName(c, m, scope, true)) |type_name| { + return type_name; + } + try m.fail(c, "unable to translate C expr: unexpected token '{s}'", .{tok.symbol()}); + return error.ParseError; } fn macroIntFromBool(c: *Context, node: Node) !Node { @@ -6159,41 +6217,50 @@ fn parseCTypeName(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) Pa fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_fail: bool) ParseError!?Node { const tok = m.next().?; - switch (tok) { - .identifier, .extended_identifier => { - if (c.global_scope.blank_macros.contains(m.slice())) { - return try parseCSpecifierQualifierList(c, m, scope, allow_fail); - } - const mangled_name = scope.getAlias(m.slice()); - if (!allow_fail or c.typedefs.contains(mangled_name)) { - if (builtin_typedef_map.get(mangled_name)) |ty| return try Tag.type.create(c.arena, ty); - return try Tag.identifier.create(c.arena, mangled_name); - } - }, - .keyword_void => return try Tag.type.create(c.arena, "anyopaque"), - .keyword_bool => return try Tag.type.create(c.arena, "bool"), - .keyword_char, - .keyword_int, - .keyword_short, - .keyword_long, - .keyword_float, - .keyword_double, - .keyword_signed, - .keyword_unsigned, - .keyword_complex, - => { - m.i -= 1; - return try parseCNumericType(c, m); - }, - .keyword_enum, .keyword_struct, .keyword_union => { - // struct Foo will be declared as struct_Foo by transRecordDecl - const slice = m.slice(); - try m.skip(c, .identifier); + const slice = m.slice(); + const mangled_name = scope.getAlias(slice); + if (!m.checkFnParam(mangled_name)) { + switch (tok) { + .identifier, .extended_identifier => { + if (c.global_scope.blank_macros.contains(m.slice())) { + return try parseCSpecifierQualifierList(c, m, scope, allow_fail); + } + if (!allow_fail or c.typedefs.contains(mangled_name)) { + if (builtin_typedef_map.get(mangled_name)) |ty| return try Tag.type.create(c.arena, ty); + return try Tag.identifier.create(c.arena, mangled_name); + } + }, + .keyword_void => return try Tag.type.create(c.arena, "anyopaque"), + .keyword_bool => return try Tag.type.create(c.arena, "bool"), + .keyword_char, + .keyword_int, + .keyword_short, + .keyword_long, + .keyword_float, + .keyword_double, + .keyword_signed, + .keyword_unsigned, + .keyword_complex, + => { + m.i -= 1; + return try parseCNumericType(c, m); + }, + .keyword_enum, .keyword_struct, .keyword_union => { + // struct Foo will be declared as struct_Foo by transRecordDecl + try m.skip(c, .identifier); - const name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ slice, m.slice() }); - return try Tag.identifier.create(c.arena, name); - }, - else => {}, + const name = try std.fmt.allocPrint(c.arena, "{s}_{s}", .{ slice, m.slice() }); + return try Tag.identifier.create(c.arena, name); + }, + else => {}, + } + } else { + if (allow_fail) { + m.i -= 1; + return null; + } else { + return try Tag.identifier.create(c.arena, mangled_name); + } } if (allow_fail) { @@ -6471,7 +6538,7 @@ fn parseCPostfixExprInner(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ? } fn parseCUnaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { - switch (m.next().?) { + sw: switch (m.next().?) { .bang => { const operand = try macroIntToBool(c, try parseCCastExpr(c, m, scope)); return Tag.not.create(c.arena, operand); @@ -6494,6 +6561,9 @@ fn parseCUnaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { return Tag.address_of.create(c.arena, operand); }, .keyword_sizeof => { + // 'sizeof' could be used as a parameter to a macro function. + if (m.checkFnParam(m.slice())) break :sw; + const operand = if (m.peek().? == .l_paren) blk: { _ = m.next(); const inner = (try parseCTypeName(c, m, scope, false)).?; @@ -6504,6 +6574,9 @@ fn parseCUnaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { return Tag.helpers_sizeof.create(c.arena, operand); }, .keyword_alignof => { + // 'alignof' could be used as a parameter to a macro function. + if (m.checkFnParam(m.slice())) break :sw; + // TODO this won't work if using 's // #define alignof _Alignof try m.skip(c, .l_paren); @@ -6516,11 +6589,11 @@ fn parseCUnaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { try m.fail(c, "TODO unary inc/dec expr", .{}); return error.ParseError; }, - else => { - m.i -= 1; - return try parseCPostfixExpr(c, m, scope, null); - }, + else => {}, } + + m.i -= 1; + return try parseCPostfixExpr(c, m, scope, null); } fn getContainer(c: *Context, node: Node) ?Node { diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig index 7f1094c81e..2bce01436e 100644 --- a/src/wasi_libc.zig +++ b/src/wasi_libc.zig @@ -1255,6 +1255,6 @@ const emulated_signal_bottom_half_src_files = &[_][]const u8{ }; const emulated_signal_top_half_src_files = &[_][]const u8{ - "wasi/libc-top-half/musl/src/signal/psignal.c", - "wasi/libc-top-half/musl/src/string/strsignal.c", + "musl/src/signal/psignal.c", + "musl/src/string/strsignal.c", }; diff --git a/stage1/wasi.c b/stage1/wasi.c index 9d35efd280..0c9ca18c57 100644 --- a/stage1/wasi.c +++ b/stage1/wasi.c @@ -1003,6 +1003,8 @@ uint32_t wasi_snapshot_preview1_fd_seek(uint32_t fd, uint64_t in_offset, uint32_ default: panic("unimplemented"); } + if (fds[fd].stream == NULL) return wasi_errno_success; + int seek_whence; switch (whence) { case wasi_whence_set: diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 2e015ea11f..5454fcf756 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -635,3 +635,13 @@ test "function pointer @intFromPtr/@ptrFromInt roundtrip" { try std.testing.expectEqual(nothing_ptr, nothing_ptr2); } + +test "function pointer align mask" { + if (!(builtin.cpu.arch.isArm() or builtin.cpu.arch.isMIPS())) return error.SkipZigTest; + + const a: *const fn () callconv(.c) void = @ptrFromInt(0x20202021); + _ = &a; + + const b: *align(16) const fn () callconv(.c) void = @alignCast(a); + _ = &b; +} diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 61225aa3e0..14b2a9694b 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -1094,3 +1094,44 @@ test "@splat zero-length array" { try S.doTheTest(?*anyopaque, null); try comptime S.doTheTest(?*anyopaque, null); } + +test "initialize slice with reference to empty array initializer" { + const a: []const u8 = &.{}; + comptime assert(a.len == 0); +} + +test "initialize many-pointer with reference to empty array initializer" { + const a: [*]const u8 = &.{}; + _ = a; // nothing meaningful to test; points to zero bits +} + +test "initialize sentinel-terminated slice with reference to empty array initializer" { + const a: [:0]const u8 = &.{}; + comptime assert(a.len == 0); + comptime assert(a[0] == 0); +} + +test "initialize sentinel-terminated many-pointer with reference to empty array initializer" { + const a: [*:0]const u8 = &.{}; + comptime assert(a[0] == 0); +} + +test "pass pointer to empty array initializer to anytype parameter" { + const S = struct { + fn TypeOf(x: anytype) type { + return @TypeOf(x); + } + }; + comptime assert(S.TypeOf(&.{}) == @TypeOf(&.{})); +} + +test "initialize pointer to anyopaque with reference to empty array initializer" { + const ptr: *const anyopaque = &.{}; + // The above acts like an untyped initializer, since the `.{}` has no result type. + // So, `ptr` points in memory to an empty tuple (`@TypeOf(.{})`). + const casted: *const @TypeOf(.{}) = @alignCast(@ptrCast(ptr)); + const loaded = casted.*; + // `val` should be a `@TypeOf(.{})`, as expected. + // We can't check the value, but it's zero-bit, so the type matching is good enough. + comptime assert(@TypeOf(loaded) == @TypeOf(.{})); +} diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index baca3da72d..64b494b23e 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -515,3 +515,66 @@ fn fieldPtrTest() u32 { test "pointer in aggregate field can mutate comptime state" { try comptime std.testing.expect(fieldPtrTest() == 2); } + +test "comptime store of extern struct with void field" { + comptime { + var x: extern struct { a: u8, b: void } = undefined; + x = .{ .a = 123, .b = {} }; + std.debug.assert(x.a == 123); + } +} + +test "comptime store of extern struct with void field into array" { + comptime { + var x: [3]extern struct { a: u8, b: void } = undefined; + x[1] = .{ .a = 123, .b = {} }; + std.debug.assert(x[1].a == 123); + } +} + +test "comptime store of packed struct with void field" { + comptime { + var x: packed struct { a: u8, b: void } = undefined; + x = .{ .a = 123, .b = {} }; + std.debug.assert(x.a == 123); + } +} + +test "comptime store of packed struct with void field into array" { + comptime { + var x: [3]packed struct { a: u8, b: void } = undefined; + x[1] = .{ .a = 123, .b = {} }; + std.debug.assert(x[1].a == 123); + } +} + +test "comptime store of reinterpreted zero-bit type" { + const S = struct { + fn doTheTest(comptime T: type) void { + comptime var buf: T = undefined; + const ptr: *void = @ptrCast(&buf); + ptr.* = {}; + } + }; + S.doTheTest(void); + S.doTheTest(u0); + S.doTheTest([0]u8); + S.doTheTest([1]u0); + S.doTheTest([5]u0); + S.doTheTest([5]void); + S.doTheTest(packed struct(u0) {}); +} + +test "comptime store to extern struct reinterpreted as byte array" { + const T = extern struct { + x: u32, + y: f32, + z: [2]void, + }; + comptime var val: T = undefined; + + const bytes: *[@sizeOf(T)]u8 = @ptrCast(&val); + @memset(bytes, 0); + + comptime std.debug.assert(val.x == 0); +} diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index f2fa2f4b0a..4b11f61f19 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -132,13 +132,20 @@ test "cmp f16" { try comptime testCmp(f16); } -test "cmp f32/f64" { +test "cmp f32" { + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + try testCmp(f32); + try comptime testCmp(f32); +} + +test "cmp f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 - try testCmp(f32); - try comptime testCmp(f32); try testCmp(f64); try comptime testCmp(f64); } @@ -224,6 +231,98 @@ fn testCmp(comptime T: type) !void { } } +test "vector cmp f16" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f16); + try comptime testCmpVector(f16); +} + +test "vector cmp f32" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f32); + try comptime testCmpVector(f32); +} + +test "vector cmp f64" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f64); + try comptime testCmpVector(f64); +} + +test "vector cmp f128" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArm()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; + + try testCmpVector(f128); + try comptime testCmpVector(f128); +} + +test "vector cmp f80/c_longdouble" { + if (true) return error.SkipZigTest; + + try testCmpVector(f80); + try comptime testCmpVector(f80); + try testCmpVector(c_longdouble); + try comptime testCmpVector(c_longdouble); +} +fn testCmpVector(comptime T: type) !void { + var edges = [_]T{ + -math.inf(T), + -math.floatMax(T), + -math.floatMin(T), + -math.floatTrueMin(T), + -0.0, + math.nan(T), + 0.0, + math.floatTrueMin(T), + math.floatMin(T), + math.floatMax(T), + math.inf(T), + }; + _ = &edges; + for (edges, 0..) |rhs, rhs_i| { + const rhs_v: @Vector(4, T) = .{ rhs, rhs, rhs, rhs }; + for (edges, 0..) |lhs, lhs_i| { + const no_nan = lhs_i != 5 and rhs_i != 5; + const lhs_order = if (lhs_i < 5) lhs_i else lhs_i - 2; + const rhs_order = if (rhs_i < 5) rhs_i else rhs_i - 2; + const lhs_v: @Vector(4, T) = .{ lhs, lhs, lhs, lhs }; + try expect(@reduce(.And, (lhs_v == rhs_v)) == (no_nan and lhs_order == rhs_order)); + try expect(@reduce(.And, (lhs_v != rhs_v)) == !(no_nan and lhs_order == rhs_order)); + try expect(@reduce(.And, (lhs_v < rhs_v)) == (no_nan and lhs_order < rhs_order)); + try expect(@reduce(.And, (lhs_v > rhs_v)) == (no_nan and lhs_order > rhs_order)); + try expect(@reduce(.And, (lhs_v <= rhs_v)) == (no_nan and lhs_order <= rhs_order)); + try expect(@reduce(.And, (lhs_v >= rhs_v)) == (no_nan and lhs_order >= rhs_order)); + } + } +} + test "different sized float comparisons" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -1703,3 +1802,33 @@ test "optimized float mode" { try expect(S.optimized(small) == small); try expect(S.strict(small) == tiny); } + +fn MakeType(comptime x: anytype) type { + return struct { + fn get() @TypeOf(x) { + return x; + } + }; +} + +const nan_a: f32 = @bitCast(@as(u32, 0xffc00000)); +const nan_b: f32 = @bitCast(@as(u32, 0xffe00000)); + +fn testMemoization() !void { + try expect(MakeType(nan_a) == MakeType(nan_a)); + try expect(MakeType(nan_b) == MakeType(nan_b)); + try expect(MakeType(nan_a) != MakeType(nan_b)); +} + +fn testVectorMemoization(comptime T: type) !void { + const nan_a_v: T = @splat(nan_a); + const nan_b_v: T = @splat(nan_b); + try expect(MakeType(nan_a_v) == MakeType(nan_a_v)); + try expect(MakeType(nan_b_v) == MakeType(nan_b_v)); + try expect(MakeType(nan_a_v) != MakeType(nan_b_v)); +} + +test "comptime calls are only memoized when float arguments are bit-for-bit equal" { + try comptime testMemoization(); + try comptime testVectorMemoization(@Vector(4, f32)); +} diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 27be4f9586..cff4cf567e 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -732,3 +732,27 @@ test "inline function return type is evaluated at comptime" { comptime assert(@TypeOf(result) == u16); try expect(result == 123); } + +test "coerce generic function making concrete parameter generic" { + const S = struct { + fn foo(_: anytype, x: u32) u32 { + comptime assert(@TypeOf(x) == u32); + return x; + } + }; + const coerced: fn (anytype, anytype) u32 = S.foo; + const result = coerced({}, 123); + try expect(result == 123); +} + +test "coerce generic function making generic parameter concrete" { + const S = struct { + fn foo(_: anytype, x: anytype) u32 { + comptime assert(@TypeOf(x) == u32); + return x; + } + }; + const coerced: fn (anytype, u32) u32 = S.foo; + const result = coerced({}, 123); + try expect(result == 123); +} diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 465a0f7d46..697497970f 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -535,3 +535,12 @@ test "return from inline for" { }; try std.testing.expect(!S.do()); } + +test "for loop 0 length range" { + const map: []const u8 = &.{}; + for (map, 0..map.len) |i, j| { + _ = i; + _ = j; + comptime unreachable; + } +} diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index f54d0aac74..4fc07f7b6f 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -619,3 +619,55 @@ test "generic parameter resolves to comptime-only type but is not marked comptim const ct_result = comptime S.foo(u8, false, S.bar); comptime std.debug.assert(ct_result == 123); } + +test "instantiate coerced generic function" { + const S = struct { + fn generic(comptime T: type, arg: *const u8) !void { + _ = T; + _ = arg; + } + }; + const coerced: fn (comptime type, *u8) anyerror!void = S.generic; + var x: u8 = 20; + try coerced(u8, &x); +} + +test "generic struct captures slice of another struct" { + const S = struct { + const Foo = struct { x: u32 }; + const foo_array: [2]Foo = undefined; + + fn Bar(foo_slice: []const Foo) type { + return struct { + const foo_ptr: [*]const Foo = foo_slice.ptr; + }; + } + }; + const T = S.Bar(&S.foo_array); + comptime std.debug.assert(T.foo_ptr == &S.foo_array); +} + +test "noalias paramters with generic return type" { + const S = struct { + pub fn a(noalias _: *u8, im_noalias: usize) im_noalias {} + pub fn b(noalias _: *u8, im_noalias: usize, x: *isize) x { + _ = im_noalias; + } + pub fn c(noalias _: *u8, im_noalias: usize, x: isize) struct { x } { + _ = im_noalias; + } + pub fn d(noalias _: *u8, im_noalias: usize, _: anytype) struct { im_noalias } {} + pub fn e(noalias _: *u8, _: usize, im_noalias: [5]u9) switch (@TypeOf(im_noalias)) { + else => void, + } {} + pub fn f(noalias _: *u8, _: anytype, im_noalias: u8) switch (@TypeOf(im_noalias)) { + else => enum { x, y, z }, + } {} + }; + _ = S.a; + _ = S.b; + _ = S.c; + _ = S.d; + _ = S.e; + _ = S.f; +} diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig index c11fa7cb25..b07c5116f8 100644 --- a/test/behavior/globals.zig +++ b/test/behavior/globals.zig @@ -167,3 +167,31 @@ test "global var can be indirectly self-referential" { try std.testing.expect(S.bar.other == &S.foo); try std.testing.expect(S.bar.other.other == &S.bar); } + +pub const Callbacks = extern struct { + key_callback: *const fn (key: i32) callconv(.c) i32, +}; + +var callbacks: Callbacks = undefined; +var callbacks_loaded: bool = false; + +test "function pointer field call on global extern struct, conditional on global" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + if (callbacks_loaded) { + try std.testing.expectEqual(42, callbacks.key_callback(42)); + } +} + +test "function pointer field call on global extern struct" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = struct { + fn keyCallback(key: i32) callconv(.c) i32 { + return key; + } + }; + + callbacks = Callbacks{ .key_callback = S.keyCallback }; + try std.testing.expectEqual(42, callbacks.key_callback(42)); +} diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig index cf6367d078..810494b774 100644 --- a/test/behavior/memcpy.zig +++ b/test/behavior/memcpy.zig @@ -69,6 +69,27 @@ fn testMemcpyDestManyPtr() !void { try expect(buf[4] == 'o'); } +test "@memcpy C pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + try testMemcpyCPointer(); + try comptime testMemcpyCPointer(); +} + +fn testMemcpyCPointer() !void { + const src = "hello"; + var buf: [5]u8 = undefined; + @memcpy(@as([*c]u8, &buf), src); + try expect(buf[0] == 'h'); + try expect(buf[1] == 'e'); + try expect(buf[2] == 'l'); + try expect(buf[3] == 'l'); + try expect(buf[4] == 'o'); +} + test "@memcpy slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -133,3 +154,34 @@ test "@memcpy zero-bit type with aliasing" { S.doTheTest(); comptime S.doTheTest(); } + +test "@memcpy with sentinel" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = struct { + fn doTheTest() void { + const field = @typeInfo(struct { a: u32 }).@"struct".fields[0]; + var buffer: [field.name.len]u8 = undefined; + @memcpy(&buffer, field.name); + } + }; + + S.doTheTest(); + comptime S.doTheTest(); +} + +test "@memcpy no sentinel source into sentinel destination" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = struct { + fn doTheTest() void { + const src: []const u8 = &.{ 1, 2, 3 }; + comptime var dest_buf: [3:0]u8 = @splat(0); + const dest: [:0]u8 = &dest_buf; + @memcpy(dest, src); + } + }; + + S.doTheTest(); + comptime S.doTheTest(); +} diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 666a7427e9..5ebdb17abb 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -1349,3 +1349,45 @@ test "assign packed struct initialized with RLS to packed struct literal field" try expect(outer.inner.x == x); try expect(outer.x == x); } + +test "byte-aligned packed relocation" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + + const S = struct { + var global: u8 align(2) = 0; + var packed_value: packed struct { x: u8, y: *align(2) u8 } = .{ .x = 111, .y = &global }; + }; + try expect(S.packed_value.x == 111); + try expect(S.packed_value.y == &S.global); +} + +test "packed struct store of comparison result" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S1 = packed struct { + val1: u3, + val2: u3, + }; + const S2 = packed struct { + a: bool, + b: bool, + }; + + var A: S1 = .{ .val1 = 1, .val2 = 1 }; + A.val2 += 1; + try expectEqual(1, A.val1); + try expectEqual(2, A.val2); + try expect((A.val2 & 1) != 1); + const result1: S2 = .{ .a = (A.val2 & 1) != 1, .b = (A.val1 & 1) != 1 }; + try expect(result1.a); + try expect(!result1.b); + + try expect((A.val2 == 3) == false); + try expect((A.val2 == 2) == true); + const result2: S2 = .{ .a = !(A.val2 == 3), .b = (A.val1 == 2) }; + try expect(result2.a); + try expect(!result2.b); +} diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index e892918571..b8f81d74af 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -760,3 +760,27 @@ test "comptime pointer equality through distinct elements with well-defined layo comptime assert(buf[1] == 456); comptime assert(second_elem.* == 456); } + +test "pointers to elements of slice of zero-bit type" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + var slice: []const u0 = undefined; + slice = &.{ 0, 0 }; + + const a = &slice[0]; + const b = &slice[1]; + + try expect(a == b); +} + +test "pointers to elements of many-ptr to zero-bit type" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + var many_ptr: [*]const u0 = undefined; + many_ptr = &.{ 0, 0 }; + + const a = &many_ptr[0]; + const b = &many_ptr[1]; + + try expect(a == b); +} diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 84a87bba9c..ac29197438 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -509,3 +509,23 @@ test "@ptrCast array pointer to slice with complex length decrease" { try S.doTheTest(@splat(0)); try comptime S.doTheTest(@splat(0)); } + +test "@ptrCast slice of zero-bit type to different slice" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = struct { + fn doTheTest(comptime T: type, zero_bits: []const T) !void { + const out: []const u8 = @ptrCast(zero_bits); + try expect(out.len == 0); + } + }; + try S.doTheTest(void, &.{ {}, {}, {} }); + try S.doTheTest(u0, &.{ 0, 0, 0, 0 }); + try S.doTheTest(packed struct(u0) {}, &.{ .{}, .{} }); + try comptime S.doTheTest(void, &.{ {}, {}, {} }); + try comptime S.doTheTest(u0, &.{ 0, 0, 0, 0 }); + try comptime S.doTheTest(packed struct(u0) {}, &.{ .{}, .{} }); +} diff --git a/test/behavior/select.zig b/test/behavior/select.zig index f551ff9533..604227b17f 100644 --- a/test/behavior/select.zig +++ b/test/behavior/select.zig @@ -66,3 +66,23 @@ fn selectArrays() !void { const xyz = @select(f32, x, y, z); try expect(mem.eql(f32, &@as([4]f32, xyz), &[4]f32{ 0.0, 312.1, -145.9, -3381.233 })); } + +test "@select compare result" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + const S = struct { + fn min(comptime V: type, lhs: V, rhs: V) V { + return @select(@typeInfo(V).vector.child, lhs < rhs, lhs, rhs); + } + + fn doTheTest() !void { + try expect(@reduce(.And, min(@Vector(4, f32), .{ -1, 2, -3, 4 }, .{ 1, -2, 3, -4 }) == @Vector(4, f32){ -1, -2, -3, -4 })); + try expect(@reduce(.And, min(@Vector(2, f64), .{ -1, 2 }, .{ 1, -2 }) == @Vector(2, f64){ -1, -2 })); + } + }; + + try S.doTheTest(); + try comptime S.doTheTest(); +} diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 367d11588f..94d8268e86 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1529,7 +1529,8 @@ test "optional generic function label struct field" { } test "struct fields get automatically reordered" { - if (builtin.zig_backend != .stage2_llvm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const S1 = struct { a: u32, @@ -2140,3 +2141,45 @@ test "anonymous struct equivalence" { comptime assert(A != C); comptime assert(B != C); } + +test "field access through mem ptr arg" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + + const S = struct { + fn nestedFieldAccess( + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + ptr_struct: *const struct { field: u32 }, + ) u32 { + return ptr_struct.field; + } + }; + try expect(S.nestedFieldAccess( + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + &.{ .field = 0x6b00a2eb }, + ) == 0x6b00a2eb); + comptime assert(S.nestedFieldAccess( + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + &.{ .field = 0x0ced271f }, + ) == 0x0ced271f); +} diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 3f8fe13316..9f10348b46 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -1,4 +1,5 @@ -const expect = @import("std").testing.expect; +const std = @import("std"); +const expect = std.testing.expect; const builtin = @import("builtin"); const module = @This(); @@ -55,3 +56,10 @@ test "this used as optional function parameter" { global.enter = prev; global.enter(null); } + +test "@This() in opaque" { + const T = opaque { + const Self = @This(); + }; + comptime std.debug.assert(T.Self == T); +} diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 0a0ed1d620..d014d9cf97 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -603,3 +603,21 @@ test "empty union in tuple" { try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name); try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union"); } + +test "field pointer of underaligned tuple" { + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + const S = struct { + fn doTheTest() !void { + const T = struct { u8, u32 }; + var val: T align(2) = .{ 1, 2 }; + + comptime assert(@TypeOf(&val[0]) == *u8); // `u8` field pointer isn't overaligned + comptime assert(@TypeOf(&val[1]) == *align(2) u32); // `u32` field pointer is correctly underaligned + + try expect(val[0] == 1); + try expect(val[1] == 2); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 61c56bfa18..6964f2d26a 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -808,3 +808,10 @@ test "reify enum where fields refers to part of array" { try testing.expect(b == .bar); try testing.expect(a != b); } + +test "undefined type value" { + const S = struct { + const undef_type: type = undefined; + }; + comptime assert(@TypeOf(S.undef_type) == type); +} diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index a2aa154c72..19cb5c0ba5 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -106,6 +106,7 @@ test "simple variadic function" { } if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO if (builtin.cpu.arch == .s390x and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21350 + if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718 const S = struct { fn simple(...) callconv(.c) c_int { @@ -200,6 +201,7 @@ test "variadic functions" { } if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO if (builtin.cpu.arch == .s390x and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21350 + if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718 const S = struct { fn printf(list_ptr: *std.ArrayList(u8), format: [*:0]const u8, ...) callconv(.c) void { @@ -245,6 +247,7 @@ test "copy VaList" { } if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO if (builtin.cpu.arch == .s390x and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21350 + if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718 const S = struct { fn add(count: c_int, ...) callconv(.c) c_int { @@ -282,6 +285,7 @@ test "unused VaList arg" { return error.SkipZigTest; // TODO } if (builtin.cpu.arch == .s390x and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21350 + if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718 const S = struct { fn thirdArg(dummy: c_int, ...) callconv(.c) c_int { diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index d55d416608..346de5c4c4 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -768,6 +768,7 @@ test "vector reduce operation" { if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21091 + if (builtin.cpu.arch.isSPARC()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23719 const S = struct { fn testReduce(comptime op: std.builtin.ReduceOp, x: anytype, expected: anytype) !void { diff --git a/test/c_abi/cfuncs.c b/test/c_abi/cfuncs.c index 92f95c339c..4a71ebd37c 100644 --- a/test/c_abi/cfuncs.c +++ b/test/c_abi/cfuncs.c @@ -227,6 +227,38 @@ void c_struct_u64_u64_8(size_t, size_t, size_t, size_t, size_t, size_t, size_t, assert_or_panic(s.b == 40); } +struct Struct_f32 { + float a; +}; + +struct Struct_f32 zig_ret_struct_f32(void); + +void zig_struct_f32(struct Struct_f32); + +struct Struct_f32 c_ret_struct_f32(void) { + return (struct Struct_f32){ 2.5f }; +} + +void c_struct_f32(struct Struct_f32 s) { + assert_or_panic(s.a == 2.5f); +} + +struct Struct_f64 { + double a; +}; + +struct Struct_f64 zig_ret_struct_f64(void); + +void zig_struct_f64(struct Struct_f64); + +struct Struct_f64 c_ret_struct_f64(void) { + return (struct Struct_f64){ 2.5 }; +} + +void c_struct_f64(struct Struct_f64 s) { + assert_or_panic(s.a == 2.5); +} + struct Struct_f32f32_f32 { struct { float b, c; @@ -296,6 +328,13 @@ void c_struct_u32_union_u32_u32u32(struct Struct_u32_Union_u32_u32u32 s) { assert_or_panic(s.b.c.e == 3); } +struct Struct_i32_i32 { + int32_t a; + int32_t b; +}; + +void zig_struct_i32_i32(struct Struct_i32_i32); + struct BigStruct { uint64_t a; uint64_t b; @@ -2674,6 +2713,18 @@ void run_c_tests(void) { } #if !defined(ZIG_RISCV64) + { + struct Struct_f32 s = zig_ret_struct_f32(); + assert_or_panic(s.a == 2.5f); + zig_struct_f32((struct Struct_f32){ 2.5f }); + } + + { + struct Struct_f64 s = zig_ret_struct_f64(); + assert_or_panic(s.a == 2.5); + zig_struct_f64((struct Struct_f64){ 2.5 }); + } + { struct Struct_f32f32_f32 s = zig_ret_struct_f32f32_f32(); assert_or_panic(s.a.b == 1.0f); @@ -2699,6 +2750,10 @@ void run_c_tests(void) { assert_or_panic(s.b.c.e == 3); zig_struct_u32_union_u32_u32u32(s); } + { + struct Struct_i32_i32 s = {1, 2}; + zig_struct_i32_i32(s); + } #endif { @@ -5024,6 +5079,21 @@ double complex c_cmultd(double complex a, double complex b) { return 1.5 + I * 13.5; } +struct Struct_i32_i32 c_mut_struct_i32_i32(struct Struct_i32_i32 s) { + assert_or_panic(s.a == 1); + assert_or_panic(s.b == 2); + s.a += 100; + s.b += 250; + assert_or_panic(s.a == 101); + assert_or_panic(s.b == 252); + return s; +} + +void c_struct_i32_i32(struct Struct_i32_i32 s) { + assert_or_panic(s.a == 1); + assert_or_panic(s.b == 2); +} + void c_big_struct(struct BigStruct x) { assert_or_panic(x.a == 1); assert_or_panic(x.b == 2); diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig index 4b42eb637b..c8bfb926fb 100644 --- a/test/c_abi/main.zig +++ b/test/c_abi/main.zig @@ -13,7 +13,7 @@ const expectEqual = std.testing.expectEqual; const have_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isArm() and !builtin.cpu.arch.isMIPS() and !builtin.cpu.arch.isPowerPC32(); -const have_f128 = builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin(); +const have_f128 = builtin.cpu.arch.isWasm() or (builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin()); const have_f80 = builtin.cpu.arch.isX86(); extern fn run_c_tests() void; @@ -339,6 +339,56 @@ test "C ABI struct u64 u64" { c_struct_u64_u64_8(0, 1, 2, 3, 4, 5, 6, 7, .{ .a = 39, .b = 40 }); } +const Struct_f32 = extern struct { + a: f32, +}; + +export fn zig_ret_struct_f32() Struct_f32 { + return .{ .a = 2.5 }; +} + +export fn zig_struct_f32(s: Struct_f32) void { + expect(s.a == 2.5) catch @panic("test failure"); +} + +extern fn c_ret_struct_f32() Struct_f32; + +extern fn c_struct_f32(Struct_f32) void; + +test "C ABI struct f32" { + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; + + const s = c_ret_struct_f32(); + try expect(s.a == 2.5); + c_struct_f32(.{ .a = 2.5 }); +} + +const Struct_f64 = extern struct { + a: f64, +}; + +export fn zig_ret_struct_f64() Struct_f64 { + return .{ .a = 2.5 }; +} + +export fn zig_struct_f64(s: Struct_f64) void { + expect(s.a == 2.5) catch @panic("test failure"); +} + +extern fn c_ret_struct_f64() Struct_f64; + +extern fn c_struct_f64(Struct_f64) void; + +test "C ABI struct f64" { + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; + + const s = c_ret_struct_f64(); + try expect(s.a == 2.5); + c_struct_f64(.{ .a = 2.5 }); +} + const Struct_f32f32_f32 = extern struct { a: extern struct { b: f32, c: f32 }, d: f32, @@ -434,6 +484,34 @@ test "C ABI struct{u32,union{u32,struct{u32,u32}}}" { c_struct_u32_union_u32_u32u32(.{ .a = 1, .b = .{ .c = .{ .d = 2, .e = 3 } } }); } +const Struct_i32_i32 = extern struct { + a: i32, + b: i32, +}; +extern fn c_mut_struct_i32_i32(Struct_i32_i32) Struct_i32_i32; +extern fn c_struct_i32_i32(Struct_i32_i32) void; + +test "C ABI struct i32 i32" { + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; + if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; + + const s: Struct_i32_i32 = .{ + .a = 1, + .b = 2, + }; + const mut_res = c_mut_struct_i32_i32(s); + try expect(s.a == 1); + try expect(s.b == 2); + try expect(mut_res.a == 101); + try expect(mut_res.b == 252); + c_struct_i32_i32(s); +} + +export fn zig_struct_i32_i32(s: Struct_i32_i32) void { + expect(s.a == 1) catch @panic("test failure: zig_struct_i32_i32 1"); + expect(s.b == 2) catch @panic("test failure: zig_struct_i32_i32 2"); +} + const BigStruct = extern struct { a: u64, b: u64, @@ -5591,64 +5669,56 @@ test "f80 extra struct" { try expect(a.b == 24); } -comptime { - skip: { - if (builtin.target.cpu.arch.isWasm()) break :skip; +export fn zig_f128(x: f128) f128 { + expect(x == 12) catch @panic("test failure"); + return 34; +} +extern fn c_f128(f128) f128; +test "f128 bare" { + if (!have_f128) return error.SkipZigTest; - _ = struct { - export fn zig_f128(x: f128) f128 { - expect(x == 12) catch @panic("test failure"); - return 34; - } - extern fn c_f128(f128) f128; - test "f128 bare" { - if (!have_f128) return error.SkipZigTest; + const a = c_f128(12.34); + try expect(@as(f64, @floatCast(a)) == 56.78); +} - const a = c_f128(12.34); - try expect(@as(f64, @floatCast(a)) == 56.78); - } +const f128_struct = extern struct { + a: f128, +}; +export fn zig_f128_struct(a: f128_struct) f128_struct { + expect(a.a == 12345) catch @panic("test failure"); + return .{ .a = 98765 }; +} +extern fn c_f128_struct(f128_struct) f128_struct; +test "f128 struct" { + if (!have_f128) return error.SkipZigTest; - const f128_struct = extern struct { - a: f128, - }; - export fn zig_f128_struct(a: f128_struct) f128_struct { - expect(a.a == 12345) catch @panic("test failure"); - return .{ .a = 98765 }; - } - extern fn c_f128_struct(f128_struct) f128_struct; - test "f128 struct" { - if (!have_f128) return error.SkipZigTest; + const a = c_f128_struct(.{ .a = 12.34 }); + try expect(@as(f64, @floatCast(a.a)) == 56.78); - const a = c_f128_struct(.{ .a = 12.34 }); - try expect(@as(f64, @floatCast(a.a)) == 56.78); + const b = c_f128_f128_struct(.{ .a = 12.34, .b = 87.65 }); + try expect(@as(f64, @floatCast(b.a)) == 56.78); + try expect(@as(f64, @floatCast(b.b)) == 43.21); +} - const b = c_f128_f128_struct(.{ .a = 12.34, .b = 87.65 }); - try expect(@as(f64, @floatCast(b.a)) == 56.78); - try expect(@as(f64, @floatCast(b.b)) == 43.21); - } +const f128_f128_struct = extern struct { + a: f128, + b: f128, +}; +export fn zig_f128_f128_struct(a: f128_f128_struct) f128_f128_struct { + expect(a.a == 13) catch @panic("test failure"); + expect(a.b == 57) catch @panic("test failure"); + return .{ .a = 24, .b = 68 }; +} +extern fn c_f128_f128_struct(f128_f128_struct) f128_f128_struct; +test "f128 f128 struct" { + if (!have_f128) return error.SkipZigTest; - const f128_f128_struct = extern struct { - a: f128, - b: f128, - }; - export fn zig_f128_f128_struct(a: f128_f128_struct) f128_f128_struct { - expect(a.a == 13) catch @panic("test failure"); - expect(a.b == 57) catch @panic("test failure"); - return .{ .a = 24, .b = 68 }; - } - extern fn c_f128_f128_struct(f128_f128_struct) f128_f128_struct; - test "f128 f128 struct" { - if (!have_f128) return error.SkipZigTest; + const a = c_f128_struct(.{ .a = 12.34 }); + try expect(@as(f64, @floatCast(a.a)) == 56.78); - const a = c_f128_struct(.{ .a = 12.34 }); - try expect(@as(f64, @floatCast(a.a)) == 56.78); - - const b = c_f128_f128_struct(.{ .a = 12.34, .b = 87.65 }); - try expect(@as(f64, @floatCast(b.a)) == 56.78); - try expect(@as(f64, @floatCast(b.b)) == 43.21); - } - }; - } + const b = c_f128_f128_struct(.{ .a = 12.34, .b = 87.65 }); + try expect(@as(f64, @floatCast(b.a)) == 56.78); + try expect(@as(f64, @floatCast(b.b)) == 43.21); } // The stdcall attribute on C functions is ignored when compiled on non-x86 diff --git a/test/cases/compile_errors/aggregate_too_large.zig b/test/cases/compile_errors/aggregate_too_large.zig new file mode 100644 index 0000000000..4a4daeda93 --- /dev/null +++ b/test/cases/compile_errors/aggregate_too_large.zig @@ -0,0 +1,31 @@ +const S = struct { + data: [1 << 32]u8, +}; + +const T = struct { + d1: [1 << 31]u8, + d2: [1 << 31]u8, +}; + +const U = union { + a: u32, + b: [1 << 32]u8, +}; + +const V = union { + a: u32, + b: T, +}; + +comptime { + _ = S; + _ = T; + _ = U; + _ = V; +} + +// error +// +// :1:11: error: struct layout requires size 4294967296, this compiler implementation supports up to 4294967295 +// :5:11: error: struct layout requires size 4294967296, this compiler implementation supports up to 4294967295 +// :10:11: error: union layout requires size 4294967300, this compiler implementation supports up to 4294967295 diff --git a/test/cases/compile_errors/runtime_index_into_comptime_only_many_ptr.zig b/test/cases/compile_errors/runtime_index_into_comptime_only_many_ptr.zig new file mode 100644 index 0000000000..4a2d475b1f --- /dev/null +++ b/test/cases/compile_errors/runtime_index_into_comptime_only_many_ptr.zig @@ -0,0 +1,10 @@ +var rt: usize = 0; +export fn foo() void { + const x: [*]const type = &.{ u8, u16 }; + _ = &x[rt]; +} + +// error +// +// :4:12: error: values of type '[*]const type' must be comptime-known, but index value is runtime-known +// :4:11: note: types are not available at runtime diff --git a/test/cases/compile_errors/runtime_store_to_comptime_field.zig b/test/cases/compile_errors/runtime_store_to_comptime_field.zig new file mode 100644 index 0000000000..0c5d6a7ad3 --- /dev/null +++ b/test/cases/compile_errors/runtime_store_to_comptime_field.zig @@ -0,0 +1,19 @@ +const init: u32 = 1; +fn rt() u32 { + return 3; +} + +var tuple_val = .{init}; +export fn tuple_field() void { + tuple_val[0] = rt(); +} + +var struct_val = .{ .x = init }; +export fn struct_field() void { + struct_val.x = rt(); +} + +// error +// +// :8:14: error: cannot store runtime value in compile time variable +// :13:15: error: cannot store runtime value in compile time variable diff --git a/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig b/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig new file mode 100644 index 0000000000..5c86fb4080 --- /dev/null +++ b/test/cases/compile_errors/union_field_ordered_differently_than_enum.zig @@ -0,0 +1,27 @@ +const Tag = enum { a, b }; + +const Union = union(Tag) { + b, + a, +}; + +const BaseUnion = union(enum) { + a, + b, +}; + +const GeneratedTagUnion = union(@typeInfo(BaseUnion).@"union".tag_type.?) { + b, + a, +}; + +export fn entry() usize { + return @sizeOf(Union) + @sizeOf(GeneratedTagUnion); +} + +// error +// +// :4:5: error: union field 'b' ordered differently than corresponding enum field +// :1:23: note: enum field here +// :14:5: error: union field 'b' ordered differently than corresponding enum field +// :10:5: note: enum field here diff --git a/test/cases/run_translated_c/sub_scope_extern_local_var_ref.c b/test/cases/run_translated_c/sub_scope_extern_local_var_ref.c new file mode 100644 index 0000000000..f3461f8559 --- /dev/null +++ b/test/cases/run_translated_c/sub_scope_extern_local_var_ref.c @@ -0,0 +1,23 @@ +#include +int a = 42; +int foo(int bar) { + extern int a; + if (bar) { + return a; + } + return 0; +} +int main() { + int result1 = foo(0); + if (result1 != 0) abort(); + int result2 = foo(1); + if (result2 != 42) abort(); + a = 100; + int result3 = foo(1); + if (result3 != 100) abort(); + return 0; +} + +// run-translated-c +// c_frontend=clang +// link_libc=true diff --git a/test/cases/translate_c/c_keywords_as_macro_function_parameters.c b/test/cases/translate_c/c_keywords_as_macro_function_parameters.c new file mode 100644 index 0000000000..8485909a94 --- /dev/null +++ b/test/cases/translate_c/c_keywords_as_macro_function_parameters.c @@ -0,0 +1,82 @@ +#define GUARDED_INT_ADDITION(int) ((int) + 1) + +#define UNGUARDED_INT_SUBTRACTION(int) (int - 2) + +#define GUARDED_INT_MULTIPLY(int) ((int) * 3) + +#define UNGUARDED_INT_DIVIDE(int) (int / 4) + +#define WRAPPED_RETURN(return) ((return) % 2) + +#define UNWRAPPED_RETURN(return) (return ^ 0x7F) + +#define WITH_TWO_PARAMETERS(signed, x) ((signed) + (x) + 9) + +#define GUARDED_ALIGNOF(_Alignof) ((_Alignof) & 0x55) + +#define UNGUARDED_ALIGNOF(_Alignof) (_Alignof | 0x80) + +#define GUARDED_SIZEOF(sizeof) ((sizeof) == 64) + +#define UNGUARDED_SIZEOF(sizeof) (sizeof < 64) + +#define SIZEOF(x) ((int)sizeof(x)) + +#define SIZEOF2(x) ((int)sizeof x) + +// translate-c +// c_frontend=clang +// +// pub inline fn GUARDED_INT_ADDITION(int: anytype) @TypeOf(int + @as(c_int, 1)) { +// _ = ∫ +// return int + @as(c_int, 1); +// } +// pub inline fn UNGUARDED_INT_SUBTRACTION(int: anytype) @TypeOf(int - @as(c_int, 2)) { +// _ = ∫ +// return int - @as(c_int, 2); +// } +// pub inline fn GUARDED_INT_MULTIPLY(int: anytype) @TypeOf(int * @as(c_int, 3)) { +// _ = ∫ +// return int * @as(c_int, 3); +// } +// pub inline fn UNGUARDED_INT_DIVIDE(int: anytype) @TypeOf(@import("std").zig.c_translation.MacroArithmetic.div(int, @as(c_int, 4))) { +// _ = ∫ +// return @import("std").zig.c_translation.MacroArithmetic.div(int, @as(c_int, 4)); +// } +// pub inline fn WRAPPED_RETURN(@"return": anytype) @TypeOf(@import("std").zig.c_translation.MacroArithmetic.rem(@"return", @as(c_int, 2))) { +// _ = &@"return"; +// return @import("std").zig.c_translation.MacroArithmetic.rem(@"return", @as(c_int, 2)); +// } +// pub inline fn UNWRAPPED_RETURN(@"return": anytype) @TypeOf(@"return" ^ @as(c_int, 0x7F)) { +// _ = &@"return"; +// return @"return" ^ @as(c_int, 0x7F); +// } +// pub inline fn WITH_TWO_PARAMETERS(signed: anytype, x: anytype) @TypeOf((signed + x) + @as(c_int, 9)) { +// _ = &signed; +// _ = &x; +// return (signed + x) + @as(c_int, 9); +// } +// pub inline fn GUARDED_ALIGNOF(_Alignof: anytype) @TypeOf(_Alignof & @as(c_int, 0x55)) { +// _ = &_Alignof; +// return _Alignof & @as(c_int, 0x55); +// } +// pub inline fn UNGUARDED_ALIGNOF(_Alignof: anytype) @TypeOf(_Alignof | @as(c_int, 0x80)) { +// _ = &_Alignof; +// return _Alignof | @as(c_int, 0x80); +// } +// pub inline fn GUARDED_SIZEOF(sizeof: anytype) @TypeOf(sizeof == @as(c_int, 64)) { +// _ = &sizeof; +// return sizeof == @as(c_int, 64); +// } +// pub inline fn UNGUARDED_SIZEOF(sizeof: anytype) @TypeOf(sizeof < @as(c_int, 64)) { +// _ = &sizeof; +// return sizeof < @as(c_int, 64); +// } +// pub inline fn SIZEOF(x: anytype) c_int { +// _ = &x; +// return @import("std").zig.c_translation.cast(c_int, @import("std").zig.c_translation.sizeof(x)); +// } +// pub inline fn SIZEOF2(x: anytype) c_int { +// _ = &x; +// return @import("std").zig.c_translation.cast(c_int, @import("std").zig.c_translation.sizeof(x)); +// } diff --git a/test/cases/translate_c/macro_calling_convention.c b/test/cases/translate_c/macro_calling_convention.c new file mode 100644 index 0000000000..01331e572c --- /dev/null +++ b/test/cases/translate_c/macro_calling_convention.c @@ -0,0 +1,9 @@ +#define SYSV_ABI __attribute__((sysv_abi)) +void SYSV_ABI foo(void); + + +// translate-c +// c_frontend=clang +// target=x86_64-windows +// +// pub extern fn foo() callconv(.{ .x86_64_sysv = .{} }) void; diff --git a/test/incremental/no_change_preserves_tag_names b/test/incremental/no_change_preserves_tag_names new file mode 100644 index 0000000000..55219b8356 --- /dev/null +++ b/test/incremental/no_change_preserves_tag_names @@ -0,0 +1,20 @@ +#target=x86_64-linux-selfhosted +#target=x86_64-linux-cbe +#target=x86_64-windows-cbe +//#target=wasm32-wasi-selfhosted +#update=initial version +#file=main.zig +const std = @import("std"); +var some_enum: enum { first, second } = .first; +pub fn main() !void { + try std.io.getStdOut().writeAll(@tagName(some_enum)); +} +#expect_stdout="first" +#update=no change +#file=main.zig +const std = @import("std"); +var some_enum: enum { first, second } = .first; +pub fn main() !void { + try std.io.getStdOut().writeAll(@tagName(some_enum)); +} +#expect_stdout="first" diff --git a/test/link/elf.zig b/test/link/elf.zig index 60203f0a78..b4c5699f20 100644 --- a/test/link/elf.zig +++ b/test/link/elf.zig @@ -114,7 +114,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step { elf_step.dependOn(testLargeBss(b, .{ .target = gnu_target })); elf_step.dependOn(testLinkOrder(b, .{ .target = gnu_target })); elf_step.dependOn(testLdScript(b, .{ .target = gnu_target })); - elf_step.dependOn(testLdScriptPathError(b, .{ .target = gnu_target })); + // https://github.com/ziglang/zig/issues/23125 + // elf_step.dependOn(testLdScriptPathError(b, .{ .target = gnu_target })); elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = gnu_target, .use_lld = true })); elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = gnu_target, .use_lld = true })); // https://github.com/ziglang/zig/issues/17451 diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index 179a7c2cbf..b34e17a627 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -2442,8 +2442,10 @@ fn addTest( db_argv2: []const []const u8, expected_output: []const []const u8, ) void { - for (db.options.test_filters) |test_filter| { - if (std.mem.indexOf(u8, name, test_filter)) |_| return; + if (db.options.test_filters.len > 0) { + for (db.options.test_filters) |test_filter| { + if (std.mem.indexOf(u8, name, test_filter) != null) break; + } else return; } if (db.options.test_target_filters.len > 0) { const triple_txt = target.resolved.result.zigTriple(db.b.allocator) catch @panic("OOM"); diff --git a/test/standalone/windows_spawn/main.zig b/test/standalone/windows_spawn/main.zig index 9496895d61..3b0d0efe75 100644 --- a/test/standalone/windows_spawn/main.zig +++ b/test/standalone/windows_spawn/main.zig @@ -51,6 +51,8 @@ pub fn main() anyerror!void { try testExec(allocator, "HeLLo.exe", "hello from exe\n"); // without extension should find the .exe (case insensitive) try testExec(allocator, "heLLo", "hello from exe\n"); + // with invalid cwd + try std.testing.expectError(error.FileNotFound, testExecWithCwd(allocator, "hello.exe", "missing_dir", "")); // now add a .bat try tmp.dir.writeFile(.{ .sub_path = "hello.bat", .data = "@echo hello from bat" }); diff --git a/test/translate_c.zig b/test/translate_c.zig index 933983c364..835e48ce10 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -3537,9 +3537,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return bar(1, 2); \\} , &[_][]const u8{ - \\pub extern fn bar(c_int, c_int) c_int; \\pub export fn foo() c_int { - \\ return bar(@as(c_int, 1), @as(c_int, 2)); + \\ const ExternLocal_bar = struct { + \\ pub extern fn bar(c_int, c_int) c_int; + \\ }; + \\ _ = &ExternLocal_bar; + \\ return ExternLocal_bar.bar(@as(c_int, 1), @as(c_int, 2)); \\} }); diff --git a/tools/doctest.zig b/tools/doctest.zig index 025d59517f..301bd8ed98 100644 --- a/tools/doctest.zig +++ b/tools/doctest.zig @@ -89,7 +89,18 @@ pub fn main() !void { const out = bw.writer(); try printSourceBlock(arena, out, source, fs.path.basename(input_path)); - try printOutput(arena, out, code, input_path, zig_path, opt_zig_lib_dir, tmp_dir_path); + try printOutput( + arena, + out, + code, + tmp_dir_path, + try std.fs.path.relative(arena, tmp_dir_path, zig_path), + try std.fs.path.relative(arena, tmp_dir_path, input_path), + if (opt_zig_lib_dir) |zig_lib_dir| + try std.fs.path.relative(arena, tmp_dir_path, zig_lib_dir) + else + null, + ); try bw.flush(); } @@ -98,10 +109,14 @@ fn printOutput( arena: Allocator, out: anytype, code: Code, - input_path: []const u8, - zig_exe: []const u8, - opt_zig_lib_dir: ?[]const u8, + /// Relative to this process' cwd. tmp_dir_path: []const u8, + /// Relative to `tmp_dir_path`. + zig_exe: []const u8, + /// Relative to `tmp_dir_path`. + input_path: []const u8, + /// Relative to `tmp_dir_path`. + opt_zig_lib_dir: ?[]const u8, ) !void { var env_map = try process.getEnvMap(arena); try env_map.put("CLICOLOR_FORCE", "1"); @@ -304,7 +319,7 @@ fn printOutput( }, } } - const result = run(arena, &env_map, null, test_args.items) catch + const result = run(arena, &env_map, tmp_dir_path, test_args.items) catch fatal("test failed", .{}); const escaped_stderr = try escapeHtml(arena, result.stderr); const escaped_stdout = try escapeHtml(arena, result.stdout); @@ -339,6 +354,7 @@ fn printOutput( .allocator = arena, .argv = test_args.items, .env_map = &env_map, + .cwd = tmp_dir_path, .max_output_bytes = max_doc_file_size, }); switch (result.term) { @@ -395,6 +411,7 @@ fn printOutput( .allocator = arena, .argv = test_args.items, .env_map = &env_map, + .cwd = tmp_dir_path, .max_output_bytes = max_doc_file_size, }); switch (result.term) { @@ -432,10 +449,7 @@ fn printOutput( zig_exe, "build-obj", "--color", "on", "--name", code_name, - input_path, - try std.fmt.allocPrint(arena, "-femit-bin={s}{c}{s}", .{ - tmp_dir_path, fs.path.sep, name_plus_obj_ext, - }), + input_path, try std.fmt.allocPrint(arena, "-femit-bin={s}", .{name_plus_obj_ext}), }); if (opt_zig_lib_dir) |zig_lib_dir| { try build_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir }); @@ -465,6 +479,7 @@ fn printOutput( .allocator = arena, .argv = build_args.items, .env_map = &env_map, + .cwd = tmp_dir_path, .max_output_bytes = max_doc_file_size, }); switch (result.term) { @@ -489,7 +504,7 @@ fn printOutput( const colored_stderr = try termColor(arena, escaped_stderr); try shell_out.print("\n{s} ", .{colored_stderr}); } else { - _ = run(arena, &env_map, null, build_args.items) catch fatal("example failed to compile", .{}); + _ = run(arena, &env_map, tmp_dir_path, build_args.items) catch fatal("example failed to compile", .{}); } try shell_out.writeAll("\n"); }, @@ -505,10 +520,7 @@ fn printOutput( try test_args.appendSlice(&[_][]const u8{ zig_exe, "build-lib", - input_path, - try std.fmt.allocPrint(arena, "-femit-bin={s}{s}{s}", .{ - tmp_dir_path, fs.path.sep_str, bin_basename, - }), + input_path, try std.fmt.allocPrint(arena, "-femit-bin={s}", .{bin_basename}), }); if (opt_zig_lib_dir) |zig_lib_dir| { try test_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir }); @@ -542,7 +554,7 @@ fn printOutput( try test_args.append(option); try shell_out.print("{s} ", .{option}); } - const result = run(arena, &env_map, null, test_args.items) catch fatal("test failed", .{}); + const result = run(arena, &env_map, tmp_dir_path, test_args.items) catch fatal("test failed", .{}); const escaped_stderr = try escapeHtml(arena, result.stderr); const escaped_stdout = try escapeHtml(arena, result.stdout); try shell_out.print("\n{s}{s}\n", .{ escaped_stderr, escaped_stdout }); @@ -1076,7 +1088,7 @@ fn in(slice: []const u8, number: u8) bool { fn run( allocator: Allocator, env_map: *process.EnvMap, - cwd: ?[]const u8, + cwd: []const u8, args: []const []const u8, ) !process.Child.RunResult { const result = try process.Child.run(.{