Compare commits

...

63 commits

Author SHA1 Message Date
Andrew Kelley
b0ba8b728f Release 0.12.1 2024-06-06 16:19:30 -07:00
Andrew Kelley
404a057f77 disable failing IoUring test
tracked by #20212
2024-06-06 11:14:39 -07:00
Andrew Kelley
7a29161e8e Merge pull request #20000 from Frojdholm/fix-gpa-crash-when-deallocating-metadata
Fix GeneralPurposeAllocator crash when deallocating metadata
2024-06-06 10:46:28 -07:00
Andrew Kelley
236fb915cc seriously people, don't put "zig-" in your package names
related #20178
2024-06-06 10:46:11 -07:00
Michael Dusan
e9388fde64 ld.lld: fix -m option for big-endian arm/aarch64 2024-06-06 10:46:02 -07:00
Veikka Tuominen
46a28175b3 Merge pull request #20084 from Vexu/missing-errors
Add missing errors to `@ptrFromInt` and Signal calling convention validation
2024-06-06 10:45:10 -07:00
George Thayamkery
59dd7a0fbd not android check on std.debug.getContext
have_getcontext must be false for android, this makes sure that
std.debug.getContext wont call the non-existant function (and thus hit a
compileError)
2024-06-06 10:44:39 -07:00
expikr
0aeeff0d94 math.hypot: fix incorrect over/underflow behavior (#19472) 2024-06-06 10:44:28 -07:00
Tim Culverhouse
ce9d2eda73 init: clarify .paths usage in build.zig.zon
Clarify the usage of .paths in build.zig.zon. Follow the recommendation
of the comments to explicitly list paths by explicitly listing the paths
in the init project.
2024-06-06 10:44:15 -07:00
T. M
d6eac43a5b std: Avoid overflowing in the midpoint calculation in upperBound 2024-06-06 10:44:04 -07:00
Linus Groh
0039cb7ef2 std.Build.step.Compile: Fix lib{c,cpp} mixup in dependsOnSystemLibrary() 2024-06-06 10:43:54 -07:00
Matthew Lugg
88146ea704 std.process.Child: prevent racing children from inheriting one another's pipes
The added comment explains the issue here relatively well. The new
progress API made this bug obvious because it became visibly clear that
certain Compile steps were seemingly "hanging" until other steps
completed. As it turned out, these child processes had raced to spawn,
and hence one had inherited the other's stdio pipes, meaning the `poll`
call in `std.Build.Step.evalZigProcess` was not identifying the child
stdout as closed until an unrelated process terminated.
2024-06-06 10:43:14 -07:00
Jacob Young
6e469bc44d EnumMap: fix init 2024-06-06 10:41:03 -07:00
Andrew Kelley
2d4b264527 std autodocs server: don't trim all URLs
This is a partial revert of 6635360dbd.
2024-05-22 07:18:14 -07:00
Veikka Tuominen
95845ba2ac llvm: fix @wasmMemory{Size,Grow} for wasm64
Closes #19942
2024-05-22 07:17:50 -07:00
Jiacai Liu
4a09703f62 std-docs: use open for macOS. 2024-05-22 07:15:30 -07:00
Andrew Kelley
82908b525d Merge pull request #19987 from Frojdholm/fix-gpa-double-free-stack-traces
gpa: Fix GeneralPurposeAllocator double free stack traces
2024-05-22 07:05:17 -07:00
Wes Koerber
aba1dbc954 fix: incorrect field sizes in std.os.linux.ifmap 2024-05-22 07:04:56 -07:00
Andrew Kelley
6ce3c2423e Merge pull request #19926 from squeek502/windows-sdk-cachepath-registry
WindowsSdk: Fix finding the _Instances directory when it's not in the default location
2024-05-22 07:04:47 -07:00
Ronald Chen
52150b701c [std] Fixed bug missing optional for lpName param on CreateEventExW. fixes #19946
https://learn.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-createeventexw
2024-05-22 07:04:35 -07:00
Dominic
869880adac astgen: fix result info for catch switch_block_err_union 2024-05-22 07:04:01 -07:00
190n
511aa28983 Do not run asserts for WASI alignment when not targeting WASI 2024-05-22 07:03:54 -07:00
Lucas Santos
e57c9c0931 Avoid unnecessary operation in PageAllocator.
There's no need to call `alignForward` before `VirtualAlloc`.
From [MSDN](https://learn.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc):
```
If the lpAddress parameter is NULL, this value is rounded up to the next page boundary
```
2024-05-22 07:03:25 -07:00
Abhinav Gupta
d37182383d ChildProcess: document StdIo behaviors (#17553)
Add some basic documentation for the different ChildProcess.StdIo
behaviors and the fields they affect.
2024-05-22 07:03:09 -07:00
Veikka Tuominen
1216050520 define an error set for std.io.tty.Config.setColor 2024-05-22 07:02:54 -07:00
Pyry Kovanen
8832314acf llvm: always include debug information for global variables 2024-05-22 07:02:27 -07:00
Karl Bohlmark
4616fb0937 fix integer overflow in IoUring buffer ring size calculation 2024-05-22 07:02:23 -07:00
Pavel Verigo
fa4a626fac std.compress.flate: fix panic when reading into empty buffer 2024-05-22 07:02:16 -07:00
Jacob Young
33c3bf0631 cmake: make static curses configurable
Not sure why this exists in the first place, but disabling it reduces
the required dependencies of a CI runner.
2024-05-22 07:01:21 -07:00
Jacob Young
2563d32425 fs: handle OBJECT_NAME_COLLISION in makeOpenPath
This fixes a race condition when two threads/processes try to
`makeOpenPath` the same path simultaneously.
2024-05-22 07:01:14 -07:00
mlugg
33809a0c53 InternPool: eliminate var_args_param_type
This was a "fake" type used to handle C varargs parameters, much like
generic poison. In fact, it is treated identically to generic poison in
all cases other than one (the final coercion of a call argument), which
is trivially special-cased. Thus, it makes sense to remove this special
tag and instead use `generic_poison_type` in its place. This fixes
several bugs in Sema related to missing handling of this tag.

Resolves: #19781
2024-05-22 07:01:07 -07:00
Jacob Young
2648e3651e link: restore lost passthrough behavior
Fixes bug introduced by 3b5be9fb6e
2024-05-22 07:00:19 -07:00
Jacob Young
d09afc08da lld: use a response file on NameTooLong 2024-05-22 06:59:22 -07:00
Ryan Liptak
ddde99bdfa Build system: Allow specifying Win32 resource include paths using LazyPath
Adds an `include_paths` field to RcSourceFile that takes a slice of LazyPaths. The paths are resolved and subsequently appended to the -rcflags as `/I <resolved path>`.

This fixes an accidental regression from https://github.com/ziglang/zig/pull/19174. Before that PR, all Win32 resource compilation would inherit the CC flags (via `addCCArgs`), which included things like include directories. After that PR, though, that is no longer the case.

However, this commit intentionally does not restore the previous behavior (inheriting the C include paths). Instead, each .rc file will need to have its include paths specified directly and the include paths only apply to one particular resource script. This allows more fine-grained control and has less potentially surprising behavior (at the cost of some convenience).

Closes #19605
2024-05-22 06:58:09 -07:00
reokodoku
f2110b0c0d Change std.os to std.posix in error messages 2024-05-22 06:57:16 -07:00
Anton Lilja
7ce03acb9d LLVM: Fix panic when using tagged union backed by enum with negative values 2024-05-22 06:56:49 -07:00
Antonio Gomes
c3aa32e984 Sema: Don't generate runtime instructions on zirSplat if dest_ty doesn't have runtime bits 2024-05-22 06:56:30 -07:00
Igor Anić
efe06c5f31 std.tar: fix finding tar root_dir (#19829)
For issue #19820.
2024-05-01 12:05:29 -07:00
Jacob Young
7b908cb024 cbe: fix ub with integer @abs 2024-05-01 12:05:25 -07:00
Andrew Kelley
16d3f7b2fa std.Build.Step.CheckObject: fix parseDumpNames
This function incorrectly assumed that module name subsections, function
name subsections, and local name subsections are encoded the same,
however according to
[the specification](https://webassembly.github.io/spec/core/appendix/custom.html)
they are encoded differently.

This commit adds support for parsing module name subsections correctly,
which started appearing after upgrading to LLVM 18.
2024-05-01 12:05:20 -07:00
Andrew Kelley
fa5011aa31 C backend: avoid memcpy when len=0
As of Clang 18, calling memcpy() with a misaligned pointer trips UBSAN,
even if the length is zero. This unfortunately includes any call to
`@memcpy` when source or destination are undefined and the length is
zero.

This patch makes the C backend avoid calling memcpy when the length is
zero, thereby avoiding undefined behavior.

A zig1.wasm update will be needed in the llvm18 branch to activate this
code.
2024-05-01 12:05:12 -07:00
Alain Greppin
595a7f8b08 NetBSD: avoid ldd error on executables. zig cc produce 4 PT_LOAD segments,
wheras on NetBSD, only 2 PT_LOAD are usually produced by other compilers
(tested with host gcc and clang).

    $ ldd -v main_4segs
    .../main_4segs: wrong number of segments (4 != 2)
    .../main_4segs: invalid ELF class 2; expected 1
2024-05-01 12:05:05 -07:00
daurnimator
6c482b8033 test/link/glibc_compat: fix incorrect strlcpy result 2024-05-01 12:04:52 -07:00
Julian
ad63be7eb9 Sema+llvm: properly implement Interrupt callconv
Co-authored-by: Veikka Tuominen <git@vexu.eu>
2024-05-01 12:04:44 -07:00
Alexandre Janon
704f8f4013 Fix ELF alignment for freestanding targets (#19766)
* Fix the ELF binaries for freestanding target created with the self-hosted linker.

    The ELF specification (generic ABI) states that ``loadable process segments must have congruent
    values for p_vaddr and p_offset, modulo the page size''. Linux refuses to load binaries that
    don't meet this requirement (execve() fails with EINVAL).
2024-05-01 12:04:19 -07:00
zhylmzr
ef9fb428b7 fix: object size error in archive 2024-05-01 12:04:03 -07:00
Andrew Kelley
600b652825 Merge pull request #19698 from squeek502/windows-batbadbut
std.process.Child: Mitigate arbitrary command execution vulnerability on Windows (BatBadBut)
2024-04-24 13:50:29 -07:00
Ryan Liptak
e36bf2baff windows.GetFinalPathNameByHandle: Support volumes mounted as paths
A volume can be mounted as a NTFS path, e.g. as C:\Mnt\Foo. In that case, IOCTL_MOUNTMGR_QUERY_POINTS gives us a mount point with a symlink value something like `\??\Volume{383da0b0-717f-41b6-8c36-00500992b58d}`. In order to get the `C:\Mnt\Foo` path, we can query the mountmgr again using IOCTL_MOUNTMGR_QUERY_DOS_VOLUME_PATH.

Fixes #19731
2024-04-24 13:45:36 -07:00
IntegratedQuantum
4babedf1be Clarify the blocking behavior of RwLock.lockShared(). (#19752) 2024-04-24 13:45:29 -07:00
Sean
0dc64d9064 Update fmt.zig tests
Changed uses of `std.testing.expect` to `std.testing.expectEqual`, `std.testing.expectError`, and `std.testing.expectEqualStrings` where appropriate
2024-04-24 13:45:18 -07:00
Andrew Kelley
5d29b9fad6 CI: enable builds of 0.12.x branch 2024-04-23 17:23:29 -07:00
Eric Joldasov
6de152ec7c std.zig.system: fix ELF file search
* Adjust buffer length a bit.
 * Fix detecting if file is a script. Logic below was unreachable,
 because 99% of scripts failed "At least 255 bytes long" check and were detected as ELF files.
 It should be "At least 4" instead (minimum value of "ELF magic length" and "smallest possible interpreter path length").
 * Fix parsing interpreter path, when text after shebang:
     1. does not have newline,
     2. has leading spaces and tabs,
     3. separates interpreter and arguments by tab or NUL.
 * Remove empty error set from `defaultAbiAndDynamicLinker`.

Signed-off-by: Eric Joldasov <bratishkaerik@landless-city.net>
2024-04-23 17:22:04 -07:00
clickingbuttons
cc25f75478 std.crypto: make ff.ct_unprotected.limbsCmpLt compile (#19741)
* std.crypto: make ff.ct_unprotected.limbsCmpLt compile

* std.crypto: add ff.ct test

* fix testCt to work on x86

* disable test on stage2-c

---------

Co-authored-by: Frank Denis <124872+jedisct1@users.noreply.github.com>
2024-04-23 17:17:02 -07:00
David Rubin
eb28c8aa35 error on undefined end index 2024-04-23 17:16:54 -07:00
Frank Denis
f6773232ac Fix WASI threads, again
Properly call the entrypoint when it doesn't return an optional error,
and use the per-thread copy of the arguments list.
2024-04-23 17:16:08 -07:00
Jacob Young
9deea9b1d8 x86_64: fix C abi for unions
Closes #19721
2024-04-23 17:16:03 -07:00
Jakub Konka
8e0a802ea1 link/macho: make --verbose-link represent the actual linker line 2024-04-23 17:15:47 -07:00
Jakub Konka
eb5d67b146 Merge pull request #19714 from ziglang/elf-merge-strings
link/elf: implement string merging
2024-04-23 17:14:03 -07:00
Jakub Konka
082e5091af Merge pull request #19710 from jacobly0/elf-segment-align
Elf: fix unaligned segments on non-linux
2024-04-23 17:13:22 -07:00
Jared Baur
3b1f9b476c Fix usage of unexpectedErrno
`unexpectedErrno` comes from `std.posix`, not `std.os`.
2024-04-23 17:12:24 -07:00
Marco F
2478b5bb0e update list of missing features in no-LLVM built zig2 2024-04-23 17:12:07 -07:00
Linus Groh
7974576967 std.fs.Dir.openDir: use wasi libc API when -lc
Same as #19680 but for directories.
2024-04-23 17:11:48 -07:00
Andrew Kelley
011a78325e start the 0.12.1 release cycle 2024-04-23 17:10:15 -07:00
92 changed files with 3777 additions and 1636 deletions

View file

@ -4,6 +4,7 @@ on:
push:
branches:
- master
- 0.12.x
concurrency:
# Cancels pending runs when a PR gets updated.
group: ${{ github.head_ref || github.run_id }}-${{ github.actor }}

View file

@ -37,7 +37,7 @@ set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})
set(ZIG_VERSION_MAJOR 0)
set(ZIG_VERSION_MINOR 12)
set(ZIG_VERSION_PATCH 0)
set(ZIG_VERSION_PATCH 1)
set(ZIG_VERSION "" CACHE STRING "Override Zig version string. Default is to find out with git.")
if("${ZIG_VERSION}" STREQUAL "")
@ -92,6 +92,12 @@ set(ZIG_SHARED_LLVM off CACHE BOOL "Prefer linking against shared LLVM libraries
set(ZIG_STATIC_LLVM ${ZIG_STATIC} CACHE BOOL "Prefer linking against static LLVM libraries")
set(ZIG_STATIC_ZLIB ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zlib")
set(ZIG_STATIC_ZSTD ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zstd")
if(APPLE AND ZIG_STATIC)
set(ZIG_STATIC_CURSES on)
else()
set(ZIG_STATIC_CURSES off)
endif()
set(ZIG_STATIC_CURSES ${ZIG_STATIC_CURSES} CACHE BOOL "Prefer linking against static curses")
set(ZIG_USE_CCACHE off CACHE BOOL "Use ccache")
if(ZIG_USE_CCACHE)
@ -160,7 +166,7 @@ if(ZIG_STATIC_ZSTD)
list(APPEND LLVM_LIBRARIES "${ZSTD}")
endif()
if(APPLE AND ZIG_STATIC)
if(ZIG_STATIC_CURSES)
list(REMOVE_ITEM LLVM_LIBRARIES "-lcurses")
find_library(CURSES NAMES libcurses.a libncurses.a NAMES_PER_DIR
PATHS

View file

@ -85,8 +85,6 @@ therefore lacking these features:
- [Some ELF linking features](https://github.com/ziglang/zig/issues/17749)
- [Most COFF/PE linking features](https://github.com/ziglang/zig/issues/17751)
- [Some WebAssembly linking features](https://github.com/ziglang/zig/issues/17750)
- [Ability to output LLVM bitcode](https://github.com/ziglang/zig/issues/13265)
- [Windows resource file compilation](https://github.com/ziglang/zig/issues/17752)
- [Ability to create import libs from def files](https://github.com/ziglang/zig/issues/17807)
- [Automatic importlib file generation for Windows DLLs](https://github.com/ziglang/zig/issues/17753)
- [Ability to create static archives from object files](https://github.com/ziglang/zig/issues/9828)

View file

@ -9,7 +9,7 @@ const fs = std.fs;
const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
const assert = std.debug.assert;
const zig_version = std.SemanticVersion{ .major = 0, .minor = 12, .patch = 0 };
const zig_version = std.SemanticVersion{ .major = 0, .minor = 12, .patch = 1 };
const stack_size = 32 * 1024 * 1024;
pub fn build(b: *std.Build) !void {

View file

@ -12,6 +12,14 @@ build.zig.
String. Required.
This is the default name used by packages depending on this one. For example,
when a user runs `zig fetch --save <url>`, this field is used as the key in the
`dependencies` table. Although the user can choose a different name, most users
will stick with this provided value.
It is redundant to include "zig" in this name because it is already within the
Zig package namespace.
### `version`
String. Required.

View file

@ -314,8 +314,8 @@
<a href="https://ziglang.org/documentation/0.9.1/">0.9.1</a> |
<a href="https://ziglang.org/documentation/0.10.1/">0.10.1</a> |
<a href="https://ziglang.org/documentation/0.11.0/">0.11.0</a> |
<a href="https://ziglang.org/documentation/0.12.0/">0.12.0</a> |
master
0.12.1 |
<a href="https://ziglang.org/documentation/master/">master</a>
</nav>
<nav aria-labelledby="table-of-contents">
<h2 id="table-of-contents">Table of Contents</h2>
@ -9040,7 +9040,7 @@ test "integer cast panic" {
{#header_close#}
{#header_open|@wasmMemorySize#}
<pre>{#syntax#}@wasmMemorySize(index: u32) u32{#endsyntax#}</pre>
<pre>{#syntax#}@wasmMemorySize(index: u32) usize{#endsyntax#}</pre>
<p>
This function returns the size of the Wasm memory identified by {#syntax#}index{#endsyntax#} as
an unsigned value in units of Wasm pages. Note that each Wasm page is 64KB in size.
@ -9054,7 +9054,7 @@ test "integer cast panic" {
{#header_close#}
{#header_open|@wasmMemoryGrow#}
<pre>{#syntax#}@wasmMemoryGrow(index: u32, delta: u32) i32{#endsyntax#}</pre>
<pre>{#syntax#}@wasmMemoryGrow(index: u32, delta: usize) isize{#endsyntax#}</pre>
<p>
This function increases the size of the Wasm memory identified by {#syntax#}index{#endsyntax#} by
{#syntax#}delta{#endsyntax#} in units of unsigned number of Wasm pages. Note that each Wasm page

View file

@ -117,6 +117,7 @@ const Context = struct {
fn serveRequest(request: *std.http.Server.Request, context: *Context) !void {
if (std.mem.eql(u8, request.head.target, "/") or
std.mem.eql(u8, request.head.target, "/debug") or
std.mem.eql(u8, request.head.target, "/debug/"))
{
try serveDocsFile(request, context, "docs/index.html", "text/html");
@ -433,6 +434,7 @@ fn openBrowserTab(gpa: Allocator, url: []const u8) !void {
fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void {
const main_exe = switch (builtin.os.tag) {
.windows => "explorer",
.macos => "open",
else => "xdg-open",
};
var child = std.ChildProcess.init(&.{ main_exe, url }, gpa);

View file

@ -1,5 +1,13 @@
.{
// This is the default name used by packages depending on this one. For
// example, when a user runs `zig fetch --save <url>`, this field is used
// as the key in the `dependencies` table. Although the user can choose a
// different name, most users will stick with this provided value.
//
// It is redundant to include "zig" in this name because it is already
// within the Zig package namespace.
.name = "$",
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.0.0",
@ -47,20 +55,17 @@
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package.
// is computed for this package. Only files listed here will remain on disk
// when using the zig package manager. As a rule of thumb, one should list
// files required for compilation plus any license(s).
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
// This makes *all* files, recursively, included in this package. It is generally
// better to explicitly list the files and directories instead, to insure that
// fetching from tarballs, file system paths, and version control all result
// in the same contents hash.
"",
"build.zig",
"build.zig.zon",
"src",
// For example...
//"build.zig",
//"build.zig.zon",
//"src",
//"LICENSE",
//"README.md",
},

View file

@ -110,11 +110,18 @@ pub const RcSourceFile = struct {
/// /x (ignore the INCLUDE environment variable)
/// /D_DEBUG or /DNDEBUG depending on the optimization mode
flags: []const []const u8 = &.{},
/// Include paths that may or may not exist yet and therefore need to be
/// specified as a LazyPath. Each path will be appended to the flags
/// as `/I <resolved path>`.
include_paths: []const LazyPath = &.{},
pub fn dupe(self: RcSourceFile, b: *std.Build) RcSourceFile {
const include_paths = b.allocator.alloc(LazyPath, self.include_paths.len) catch @panic("OOM");
for (include_paths, self.include_paths) |*dest, lazy_path| dest.* = lazy_path.dupe(b);
return .{
.file = self.file.dupe(b),
.flags = b.dupeStrings(self.flags),
.include_paths = include_paths,
};
}
};
@ -503,6 +510,9 @@ pub fn addWin32ResourceFile(m: *Module, source: RcSourceFile) void {
rc_source_file.* = source.dupe(b);
m.link_objects.append(allocator, .{ .win32_resource_file = rc_source_file }) catch @panic("OOM");
addLazyPathDependenciesOnly(m, source.file);
for (source.include_paths) |include_path| {
addLazyPathDependenciesOnly(m, include_path);
}
}
pub fn addAssemblyFile(m: *Module, source: LazyPath) void {

View file

@ -2475,12 +2475,12 @@ const WasmDumper = struct {
try writer.print("params {d}\n", .{params});
var index: u32 = 0;
while (index < params) : (index += 1) {
try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
} else index = 0;
const returns = try std.leb.readULEB128(u32, reader);
try writer.print("returns {d}\n", .{returns});
while (index < returns) : (index += 1) {
try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
}
}
},
@ -2512,11 +2512,11 @@ const WasmDumper = struct {
try parseDumpLimits(reader, writer);
},
.global => {
try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u32, reader)});
},
.table => {
try parseDumpType(step, std.wasm.RefType, reader, writer);
_ = try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
},
}
@ -2531,7 +2531,7 @@ const WasmDumper = struct {
.table => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try parseDumpType(step, std.wasm.RefType, reader, writer);
_ = try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
}
},
@ -2544,7 +2544,7 @@ const WasmDumper = struct {
.global => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u1, reader)});
try parseDumpInit(step, reader, writer);
}
@ -2605,12 +2605,13 @@ const WasmDumper = struct {
}
}
fn parseDumpType(step: *Step, comptime WasmType: type, reader: anytype, writer: anytype) !void {
const type_byte = try reader.readByte();
const valtype = std.meta.intToEnum(WasmType, type_byte) catch {
return step.fail("Invalid wasm type value '{d}'", .{type_byte});
fn parseDumpType(step: *Step, comptime E: type, reader: anytype, writer: anytype) !E {
const byte = try reader.readByte();
const tag = std.meta.intToEnum(E, byte) catch {
return step.fail("invalid wasm type value '{d}'", .{byte});
};
try writer.print("type {s}\n", .{@tagName(valtype)});
try writer.print("type {s}\n", .{@tagName(tag)});
return tag;
}
fn parseDumpLimits(reader: anytype, writer: anytype) !void {
@ -2642,29 +2643,54 @@ const WasmDumper = struct {
}
}
/// https://webassembly.github.io/spec/core/appendix/custom.html
fn parseDumpNames(step: *Step, reader: anytype, writer: anytype, data: []const u8) !void {
while (reader.context.pos < data.len) {
try parseDumpType(step, std.wasm.NameSubsection, reader, writer);
const size = try std.leb.readULEB128(u32, reader);
const entries = try std.leb.readULEB128(u32, reader);
try writer.print(
\\size {d}
\\names {d}
, .{ size, entries });
try writer.writeByte('\n');
var i: u32 = 0;
while (i < entries) : (i += 1) {
const index = try std.leb.readULEB128(u32, reader);
const name_len = try std.leb.readULEB128(u32, reader);
const pos = reader.context.pos;
const name = data[pos..][0..name_len];
reader.context.pos += name_len;
switch (try parseDumpType(step, std.wasm.NameSubsection, reader, writer)) {
// The module name subsection ... consists of a single name
// that is assigned to the module itself.
.module => {
const size = try std.leb.readULEB128(u32, reader);
const name_len = try std.leb.readULEB128(u32, reader);
if (size != name_len + 1) return error.BadSubsectionSize;
if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream;
try writer.print("name {s}\n", .{data[reader.context.pos..][0..name_len]});
reader.context.pos += name_len;
},
try writer.print(
\\index {d}
\\name {s}
, .{ index, name });
try writer.writeByte('\n');
// The function name subsection ... consists of a name map
// assigning function names to function indices.
.function, .global, .data_segment => {
const size = try std.leb.readULEB128(u32, reader);
const entries = try std.leb.readULEB128(u32, reader);
try writer.print(
\\size {d}
\\names {d}
\\
, .{ size, entries });
for (0..entries) |_| {
const index = try std.leb.readULEB128(u32, reader);
const name_len = try std.leb.readULEB128(u32, reader);
if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream;
const name = data[reader.context.pos..][0..name_len];
reader.context.pos += name.len;
try writer.print(
\\index {d}
\\name {s}
\\
, .{ index, name });
}
},
// The local name subsection ... consists of an indirect name
// map assigning local names to local indices grouped by
// function indices.
.local => {
return step.fail("TODO implement parseDumpNames for local subsections", .{});
},
else => |t| return step.fail("invalid subsection type: {s}", .{@tagName(t)}),
}
}
}

View file

@ -591,7 +591,7 @@ pub fn dependsOnSystemLibrary(self: *const Compile, name: []const u8) bool {
else => continue,
}
}
is_linking_libc = is_linking_libc or module.link_libcpp == true;
is_linking_libc = is_linking_libc or module.link_libc == true;
is_linking_libcpp = is_linking_libcpp or module.link_libcpp == true;
}
@ -1277,7 +1277,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
.win32_resource_file => |rc_source_file| l: {
if (!my_responsibility) break :l;
if (rc_source_file.flags.len == 0) {
if (rc_source_file.flags.len == 0 and rc_source_file.include_paths.len == 0) {
if (prev_has_rcflags) {
try zig_args.append("-rcflags");
try zig_args.append("--");
@ -1288,6 +1288,10 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
for (rc_source_file.flags) |arg| {
try zig_args.append(arg);
}
for (rc_source_file.include_paths) |include_path| {
try zig_args.append("/I");
try zig_args.append(include_path.getPath2(module.owner, step));
}
try zig_args.append("--");
prev_has_rcflags = true;
}

View file

@ -844,19 +844,19 @@ const WasiThreadImpl = struct {
const bad_fn_ret = "expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'";
switch (@typeInfo(@typeInfo(@TypeOf(f)).Fn.return_type.?)) {
.NoReturn, .Void => {
@call(.auto, w, args);
@call(.auto, f, w.args);
},
.Int => |info| {
if (info.bits != 8) {
@compileError(bad_fn_ret);
}
_ = @call(.auto, w, args); // WASI threads don't support exit status, ignore value
_ = @call(.auto, f, w.args); // WASI threads don't support exit status, ignore value
},
.ErrorUnion => |info| {
if (info.payload != void) {
@compileError(bad_fn_ret);
}
@call(.auto, f, args) catch |err| {
@call(.auto, f, w.args) catch |err| {
std.debug.print("error: {s}\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);

View file

@ -41,7 +41,9 @@ pub fn tryLockShared(rwl: *RwLock) bool {
return rwl.impl.tryLockShared();
}
/// Blocks until shared lock ownership is acquired.
/// Obtains shared lock ownership.
/// Blocks if another thread has exclusive ownership.
/// May block if another thread is attempting to get exclusive ownership.
pub fn lockShared(rwl: *RwLock) void {
return rwl.impl.lockShared();
}

View file

@ -31,10 +31,23 @@ pub const ChildProcess = struct {
allocator: mem.Allocator,
/// The writing end of the child process's standard input pipe.
/// Usage requires `stdin_behavior == StdIo.Pipe`.
/// Available after calling `spawn()`.
stdin: ?File,
/// The reading end of the child process's standard output pipe.
/// Usage requires `stdout_behavior == StdIo.Pipe`.
/// Available after calling `spawn()`.
stdout: ?File,
/// The reading end of the child process's standard error pipe.
/// Usage requires `stderr_behavior == StdIo.Pipe`.
/// Available after calling `spawn()`.
stderr: ?File,
/// Terminated state of the child process.
/// Available after calling `wait()`.
term: ?(SpawnError!Term),
argv: []const []const u8,
@ -136,6 +149,14 @@ pub const ChildProcess = struct {
/// Windows-only. `cwd` was provided, but the path did not exist when spawning the child process.
CurrentWorkingDirectoryUnlinked,
/// Windows-only. NUL (U+0000), LF (U+000A), CR (U+000D) are not allowed
/// within arguments when executing a `.bat`/`.cmd` script.
/// - NUL/LF signifiies end of arguments, so anything afterwards
/// would be lost after execution.
/// - CR is stripped by `cmd.exe`, so any CR codepoints
/// would be lost after execution.
InvalidBatchScriptArg,
} ||
posix.ExecveError ||
posix.SetIdError ||
@ -151,10 +172,23 @@ pub const ChildProcess = struct {
Unknown: u32,
};
/// Behavior of the child process's standard input, output, and error
/// streams.
pub const StdIo = enum {
/// Inherit the stream from the parent process.
Inherit,
/// Pass a null stream to the child process.
/// This is /dev/null on POSIX and NUL on Windows.
Ignore,
/// Create a pipe for the stream.
/// The corresponding field (`stdout`, `stderr`, or `stdin`)
/// will be assigned a `File` object that can be used
/// to read from or write to the pipe.
Pipe,
/// Close the stream after the child process spawns.
Close,
};
@ -495,7 +529,18 @@ pub const ChildProcess = struct {
}
fn spawnPosix(self: *ChildProcess) SpawnError!void {
const pipe_flags: posix.O = .{};
// The child process does need to access (one end of) these pipes. However,
// we must initially set CLOEXEC to avoid a race condition. If another thread
// is racing to spawn a different child process, we don't want it to inherit
// these FDs in any scenario; that would mean that, for instance, calls to
// `poll` from the parent would not report the child's stdout as closing when
// expected, since the other child may retain a reference to the write end of
// the pipe. So, we create the pipes with CLOEXEC initially. After fork, we
// need to do something in the new child to make sure we preserve the reference
// we want. We could use `fcntl` to remove CLOEXEC from the FD, but as it
// turns out, we `dup2` everything anyway, so there's no need!
const pipe_flags: posix.O = .{ .CLOEXEC = true };
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@ -583,19 +628,6 @@ pub const ChildProcess = struct {
setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
if (self.stdin_behavior == .Pipe) {
posix.close(stdin_pipe[0]);
posix.close(stdin_pipe[1]);
}
if (self.stdout_behavior == .Pipe) {
posix.close(stdout_pipe[0]);
posix.close(stdout_pipe[1]);
}
if (self.stderr_behavior == .Pipe) {
posix.close(stderr_pipe[0]);
posix.close(stderr_pipe[1]);
}
if (self.cwd_dir) |cwd| {
posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err);
} else if (self.cwd) |cwd| {
@ -814,17 +846,20 @@ pub const ChildProcess = struct {
const app_name_w = try unicode.wtf8ToWtf16LeAllocZ(self.allocator, app_basename_wtf8);
defer self.allocator.free(app_name_w);
const cmd_line_w = argvToCommandLineWindows(self.allocator, self.argv) catch |err| switch (err) {
// argv[0] contains unsupported characters that will never resolve to a valid exe.
error.InvalidArg0 => return error.FileNotFound,
else => |e| return e,
};
defer self.allocator.free(cmd_line_w);
run: {
const PATH: [:0]const u16 = std.process.getenvW(unicode.utf8ToUtf16LeStringLiteral("PATH")) orelse &[_:0]u16{};
const PATHEXT: [:0]const u16 = std.process.getenvW(unicode.utf8ToUtf16LeStringLiteral("PATHEXT")) orelse &[_:0]u16{};
// In case the command ends up being a .bat/.cmd script, we need to escape things using the cmd.exe rules
// and invoke cmd.exe ourselves in order to mitigate arbitrary command execution from maliciously
// constructed arguments.
//
// We'll need to wait until we're actually trying to run the command to know for sure
// if the resolved command has the `.bat` or `.cmd` extension, so we defer actually
// serializing the command line until we determine how it should be serialized.
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
defer cmd_line_cache.deinit();
var app_buf = std.ArrayListUnmanaged(u16){};
defer app_buf.deinit(self.allocator);
@ -846,8 +881,10 @@ pub const ChildProcess = struct {
dir_buf.shrinkRetainingCapacity(normalized_len);
}
windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
const original_err = switch (no_path_err) {
// argv[0] contains unsupported characters that will never resolve to a valid exe.
error.InvalidArg0 => return error.FileNotFound,
error.FileNotFound, error.InvalidExe, error.AccessDenied => |e| e,
error.UnrecoverableInvalidExe => return error.InvalidExe,
else => |e| return e,
@ -872,9 +909,11 @@ pub const ChildProcess = struct {
const normalized_len = windows.normalizePath(u16, dir_buf.items) catch continue;
dir_buf.shrinkRetainingCapacity(normalized_len);
if (windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) {
if (windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) {
break :run;
} else |err| switch (err) {
// argv[0] contains unsupported characters that will never resolve to a valid exe.
error.InvalidArg0 => return error.FileNotFound,
error.FileNotFound, error.AccessDenied, error.InvalidExe => continue,
error.UnrecoverableInvalidExe => return error.InvalidExe,
else => |e| return e,
@ -935,7 +974,7 @@ fn windowsCreateProcessPathExt(
dir_buf: *std.ArrayListUnmanaged(u16),
app_buf: *std.ArrayListUnmanaged(u16),
pathext: [:0]const u16,
cmd_line: [*:0]u16,
cmd_line_cache: *WindowsCommandLineCache,
envp_ptr: ?[*]u16,
cwd_ptr: ?[*:0]u16,
lpStartupInfo: *windows.STARTUPINFOW,
@ -1069,7 +1108,26 @@ fn windowsCreateProcessPathExt(
try dir_buf.append(allocator, 0);
const full_app_name = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
if (windowsCreateProcess(full_app_name.ptr, cmd_line, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
const is_bat_or_cmd = bat_or_cmd: {
const app_name = app_buf.items[0..app_name_len];
const ext_start = std.mem.lastIndexOfScalar(u16, app_name, '.') orelse break :bat_or_cmd false;
const ext = app_name[ext_start..];
const ext_enum = windowsCreateProcessSupportsExtension(ext) orelse break :bat_or_cmd false;
switch (ext_enum) {
.cmd, .bat => break :bat_or_cmd true,
else => break :bat_or_cmd false,
}
};
const cmd_line_w = if (is_bat_or_cmd)
try cmd_line_cache.scriptCommandLine(full_app_name)
else
try cmd_line_cache.commandLine();
const app_name_w = if (is_bat_or_cmd)
try cmd_line_cache.cmdExePath()
else
full_app_name;
if (windowsCreateProcess(app_name_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
return;
} else |err| switch (err) {
error.FileNotFound,
@ -1111,7 +1169,20 @@ fn windowsCreateProcessPathExt(
try dir_buf.append(allocator, 0);
const full_app_name = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
if (windowsCreateProcess(full_app_name.ptr, cmd_line, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
const is_bat_or_cmd = switch (ext_enum) {
.cmd, .bat => true,
else => false,
};
const cmd_line_w = if (is_bat_or_cmd)
try cmd_line_cache.scriptCommandLine(full_app_name)
else
try cmd_line_cache.commandLine();
const app_name_w = if (is_bat_or_cmd)
try cmd_line_cache.cmdExePath()
else
full_app_name;
if (windowsCreateProcess(app_name_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_ptr, lpStartupInfo, lpProcessInformation)) |_| {
return;
} else |err| switch (err) {
error.FileNotFound => continue,
@ -1236,6 +1307,223 @@ test windowsCreateProcessSupportsExtension {
try std.testing.expect(windowsCreateProcessSupportsExtension(&[_]u16{ '.', 'e', 'X', 'e', 'c' }) == null);
}
/// Serializes argv into a WTF-16 encoded command-line string for use with CreateProcessW.
///
/// Serialization is done on-demand and the result is cached in order to allow for:
/// - Only serializing the particular type of command line needed (`.bat`/`.cmd`
/// command line serialization is different from `.exe`/etc)
/// - Reusing the serialized command lines if necessary (i.e. if the execution
/// of a command fails and the PATH is going to be continued to be searched
/// for more candidates)
pub const WindowsCommandLineCache = struct {
cmd_line: ?[:0]u16 = null,
script_cmd_line: ?[:0]u16 = null,
cmd_exe_path: ?[:0]u16 = null,
argv: []const []const u8,
allocator: mem.Allocator,
pub fn init(allocator: mem.Allocator, argv: []const []const u8) WindowsCommandLineCache {
return .{
.allocator = allocator,
.argv = argv,
};
}
pub fn deinit(self: *WindowsCommandLineCache) void {
if (self.cmd_line) |cmd_line| self.allocator.free(cmd_line);
if (self.script_cmd_line) |script_cmd_line| self.allocator.free(script_cmd_line);
if (self.cmd_exe_path) |cmd_exe_path| self.allocator.free(cmd_exe_path);
}
pub fn commandLine(self: *WindowsCommandLineCache) ![:0]u16 {
if (self.cmd_line == null) {
self.cmd_line = try argvToCommandLineWindows(self.allocator, self.argv);
}
return self.cmd_line.?;
}
/// Not cached, since the path to the batch script will change during PATH searching.
/// `script_path` should be as qualified as possible, e.g. if the PATH is being searched,
/// then script_path should include both the search path and the script filename
/// (this allows avoiding cmd.exe having to search the PATH again).
pub fn scriptCommandLine(self: *WindowsCommandLineCache, script_path: []const u16) ![:0]u16 {
if (self.script_cmd_line) |v| self.allocator.free(v);
self.script_cmd_line = try argvToScriptCommandLineWindows(
self.allocator,
script_path,
self.argv[1..],
);
return self.script_cmd_line.?;
}
pub fn cmdExePath(self: *WindowsCommandLineCache) ![:0]u16 {
if (self.cmd_exe_path == null) {
self.cmd_exe_path = try windowsCmdExePath(self.allocator);
}
return self.cmd_exe_path.?;
}
};
pub fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
var buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 128);
errdefer buf.deinit(allocator);
while (true) {
const unused_slice = buf.unusedCapacitySlice();
// TODO: Get the system directory from PEB.ReadOnlyStaticServerData
const len = windows.kernel32.GetSystemDirectoryW(@ptrCast(unused_slice), @intCast(unused_slice.len));
if (len == 0) {
switch (windows.kernel32.GetLastError()) {
else => |err| return windows.unexpectedError(err),
}
}
if (len > unused_slice.len) {
try buf.ensureUnusedCapacity(allocator, len);
} else {
buf.items.len = len;
break;
}
}
switch (buf.items[buf.items.len - 1]) {
'/', '\\' => {},
else => try buf.append(allocator, fs.path.sep),
}
try buf.appendSlice(allocator, std.unicode.utf8ToUtf16LeStringLiteral("cmd.exe"));
return try buf.toOwnedSliceSentinel(allocator, 0);
}
pub const ArgvToScriptCommandLineError = error{
OutOfMemory,
InvalidWtf8,
/// NUL (U+0000), LF (U+000A), CR (U+000D) are not allowed
/// within arguments when executing a `.bat`/`.cmd` script.
/// - NUL/LF signifiies end of arguments, so anything afterwards
/// would be lost after execution.
/// - CR is stripped by `cmd.exe`, so any CR codepoints
/// would be lost after execution.
InvalidBatchScriptArg,
};
/// Serializes `argv` to a Windows command-line string that uses `cmd.exe /c` and `cmd.exe`-specific
/// escaping rules. The caller owns the returned slice.
///
/// Escapes `argv` using the suggested mitigation against arbitrary command execution from:
/// https://flatt.tech/research/posts/batbadbut-you-cant-securely-execute-commands-on-windows/
pub fn argvToScriptCommandLineWindows(
allocator: mem.Allocator,
/// Path to the `.bat`/`.cmd` script. If this path is relative, it is assumed to be relative to the CWD.
/// The script must have been verified to exist at this path before calling this function.
script_path: []const u16,
/// Arguments, not including the script name itself. Expected to be encoded as WTF-8.
script_args: []const []const u8,
) ArgvToScriptCommandLineError![:0]u16 {
var buf = try std.ArrayList(u8).initCapacity(allocator, 64);
defer buf.deinit();
// `/d` disables execution of AutoRun commands.
// `/e:ON` and `/v:OFF` are needed for BatBadBut mitigation:
// > If delayed expansion is enabled via the registry value DelayedExpansion,
// > it must be disabled by explicitly calling cmd.exe with the /V:OFF option.
// > Escaping for % requires the command extension to be enabled.
// > If its disabled via the registry value EnableExtensions, it must be enabled with the /E:ON option.
// https://flatt.tech/research/posts/batbadbut-you-cant-securely-execute-commands-on-windows/
buf.appendSliceAssumeCapacity("cmd.exe /d /e:ON /v:OFF /c \"");
// Always quote the path to the script arg
buf.appendAssumeCapacity('"');
// We always want the path to the batch script to include a path separator in order to
// avoid cmd.exe searching the PATH for the script. This is not part of the arbitrary
// command execution mitigation, we just know exactly what script we want to execute
// at this point, and potentially making cmd.exe re-find it is unnecessary.
//
// If the script path does not have a path separator, then we know its relative to CWD and
// we can just put `.\` in the front.
if (mem.indexOfAny(u16, script_path, &[_]u16{ mem.nativeToLittle(u16, '\\'), mem.nativeToLittle(u16, '/') }) == null) {
try buf.appendSlice(".\\");
}
// Note that we don't do any escaping/mitigations for this argument, since the relevant
// characters (", %, etc) are illegal in file paths and this function should only be called
// with script paths that have been verified to exist.
try std.unicode.wtf16LeToWtf8ArrayList(&buf, script_path);
buf.appendAssumeCapacity('"');
for (script_args) |arg| {
// Literal carriage returns get stripped when run through cmd.exe
// and NUL/newlines act as 'end of command.' Because of this, it's basically
// always a mistake to include these characters in argv, so it's
// an error condition in order to ensure that the return of this
// function can always roundtrip through cmd.exe.
if (std.mem.indexOfAny(u8, arg, "\x00\r\n") != null) {
return error.InvalidBatchScriptArg;
}
// Separate args with a space.
try buf.append(' ');
// Need to quote if the argument is empty (otherwise the arg would just be lost)
// or if the last character is a `\`, since then something like "%~2" in a .bat
// script would cause the closing " to be escaped which we don't want.
var needs_quotes = arg.len == 0 or arg[arg.len - 1] == '\\';
if (!needs_quotes) {
for (arg) |c| {
switch (c) {
// Known good characters that don't need to be quoted
'A'...'Z', 'a'...'z', '0'...'9', '#', '$', '*', '+', '-', '.', '/', ':', '?', '@', '\\', '_' => {},
// When in doubt, quote
else => {
needs_quotes = true;
break;
},
}
}
}
if (needs_quotes) {
try buf.append('"');
}
var backslashes: usize = 0;
for (arg) |c| {
switch (c) {
'\\' => {
backslashes += 1;
},
'"' => {
try buf.appendNTimes('\\', backslashes);
try buf.append('"');
backslashes = 0;
},
// Replace `%` with `%%cd:~,%`.
//
// cmd.exe allows extracting a substring from an environment
// variable with the syntax: `%foo:~<start_index>,<end_index>%`.
// Therefore, `%cd:~,%` will always expand to an empty string
// since both the start and end index are blank, and it is assumed
// that `%cd%` is always available since it is a built-in variable
// that corresponds to the current directory.
//
// This means that replacing `%foo%` with `%%cd:~,%foo%%cd:~,%`
// will stop `%foo%` from being expanded and *after* expansion
// we'll still be left with `%foo%` (the literal string).
'%' => {
// the trailing `%` is appended outside the switch
try buf.appendSlice("%%cd:~,");
backslashes = 0;
},
else => {
backslashes = 0;
},
}
try buf.append(c);
}
if (needs_quotes) {
try buf.appendNTimes('\\', backslashes);
try buf.append('"');
}
}
try buf.append('"');
return try unicode.wtf8ToWtf16LeAllocZ(allocator, buf.items);
}
pub const ArgvToCommandLineError = error{ OutOfMemory, InvalidWtf8, InvalidArg0 };
/// Serializes `argv` to a Windows command-line string suitable for passing to a child process and

View file

@ -347,6 +347,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub fn read(self: *Self, buffer: []u8) Error!usize {
if (buffer.len == 0) return 0;
const out = try self.get(buffer.len);
@memcpy(buffer[0..out.len], out);
return out.len;
@ -556,3 +557,14 @@ test "bug 18966" {
try decompress(.gzip, in.reader(), out.writer());
try testing.expectEqualStrings(expect, out.items);
}
test "bug 19895" {
const input = &[_]u8{
0b0000_0001, 0b0000_1100, 0x00, 0b1111_0011, 0xff, // deflate fixed buffer header len, nlen
'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a, // non compressed data
};
var in = std.io.fixedBufferStream(input);
var decomp = decompressor(.raw, in.reader());
var buf: [0]u8 = undefined;
try testing.expectEqual(0, try decomp.read(&buf));
}

View file

@ -843,7 +843,7 @@ const ct_protected = struct {
// Compares two big integers in constant time, returning true if x >= y.
fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool {
return !ct.limbsCmpLt(x, y);
return !limbsCmpLt(x, y);
}
// Multiplies two limbs and returns the result as a wide limb.
@ -878,11 +878,11 @@ const ct_unprotected = struct {
// Compares two big integers in constant time, returning true if x < y.
fn limbsCmpLt(x: anytype, y: @TypeOf(x)) bool {
assert(x.limbs_count() == y.limbs_count());
const x_limbs = x.limbs.constSlice();
const y_limbs = y.limbs.constSlice();
const x_limbs = x.limbsConst();
const y_limbs = y.limbsConst();
assert(x_limbs.len == y_limbs.len);
var i = x.limbs_count();
var i = x_limbs.len;
while (i != 0) {
i -= 1;
if (x_limbs[i] != y_limbs[i]) {
@ -894,7 +894,7 @@ const ct_unprotected = struct {
// Compares two big integers in constant time, returning true if x >= y.
fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool {
return !ct.limbsCmpLt(x, y);
return !limbsCmpLt(x, y);
}
// Multiplies two limbs and returns the result as a wide limb.
@ -961,3 +961,28 @@ test "finite field arithmetic" {
try testing.expect(x_sq3.eql(x_sq4));
try m.fromMontgomery(&x);
}
fn testCt(ct_: anytype) !void {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const l0: Limb = 0;
const l1: Limb = 1;
try testing.expectEqual(l1, ct_.select(true, l1, l0));
try testing.expectEqual(l0, ct_.select(false, l1, l0));
try testing.expectEqual(false, ct_.eql(l1, l0));
try testing.expectEqual(true, ct_.eql(l1, l1));
const M = Modulus(256);
const m = try M.fromPrimitive(u256, 3429938563481314093726330772853735541133072814650493833233);
const x = try M.Fe.fromPrimitive(u256, m, 80169837251094269539116136208111827396136208141182357733);
const y = try M.Fe.fromPrimitive(u256, m, 24620149608466364616251608466389896540098571);
try testing.expectEqual(false, ct_.limbsCmpLt(x.v, y.v));
try testing.expectEqual(true, ct_.limbsCmpGeq(x.v, y.v));
try testing.expectEqual(WideLimb{ .hi = 0, .lo = 0x88 }, ct_.mulWide(1 << 3, (1 << 4) + 1));
}
test ct {
try testCt(ct_protected);
try testCt(ct_unprotected);
}

View file

@ -228,8 +228,8 @@ pub fn relocateContext(context: *ThreadContext) void {
};
}
pub const have_getcontext = @hasDecl(posix.system, "getcontext") and
native_os != .openbsd and native_os != .haiku and
pub const have_getcontext = native_os != .openbsd and native_os != .haiku and
!builtin.target.isAndroid() and
(native_os != .linux or switch (builtin.cpu.arch) {
.x86,
.x86_64,

View file

@ -452,7 +452,7 @@ pub fn EnumMap(comptime E: type, comptime V: type) type {
values: [Indexer.count]Value = undefined,
/// Initializes the map using a sparse struct of optionals
pub fn init(init_values: EnumFieldStruct(E, ?Value, null)) Self {
pub fn init(init_values: EnumFieldStruct(E, ?Value, @as(?Value, null))) Self {
@setEvalBranchQuota(2 * @typeInfo(E).Enum.fields.len);
var result: Self = .{};
if (@typeInfo(E).Enum.is_exhaustive) {
@ -652,6 +652,19 @@ pub fn EnumMap(comptime E: type, comptime V: type) type {
};
}
test EnumMap {
const Ball = enum { red, green, blue };
const some = EnumMap(Ball, u8).init(.{
.green = 0xff,
.blue = 0x80,
});
try testing.expectEqual(2, some.count());
try testing.expectEqual(null, some.get(.red));
try testing.expectEqual(0xff, some.get(.green));
try testing.expectEqual(0x80, some.get(.blue));
}
/// A multiset of enum elements up to a count of usize. Backed
/// by an EnumArray. This type does no dynamic allocation and can
/// be copied by value.

View file

@ -1501,9 +1501,9 @@ pub fn parseInt(comptime T: type, buf: []const u8, base: u8) ParseIntError!T {
}
test parseInt {
try std.testing.expect((try parseInt(i32, "-10", 10)) == -10);
try std.testing.expect((try parseInt(i32, "+10", 10)) == 10);
try std.testing.expect((try parseInt(u32, "+10", 10)) == 10);
try std.testing.expectEqual(-10, try parseInt(i32, "-10", 10));
try std.testing.expectEqual(10, try parseInt(i32, "+10", 10));
try std.testing.expectEqual(10, try parseInt(u32, "+10", 10));
try std.testing.expectError(error.Overflow, parseInt(u32, "-10", 10));
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, " 10", 10));
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "10 ", 10));
@ -1511,17 +1511,17 @@ test parseInt {
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x_10_", 10));
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x10_", 10));
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x_10", 10));
try std.testing.expect((try parseInt(u8, "255", 10)) == 255);
try std.testing.expectEqual(255, try parseInt(u8, "255", 10));
try std.testing.expectError(error.Overflow, parseInt(u8, "256", 10));
// +0 and -0 should work for unsigned
try std.testing.expect((try parseInt(u8, "-0", 10)) == 0);
try std.testing.expect((try parseInt(u8, "+0", 10)) == 0);
try std.testing.expectEqual(0, try parseInt(u8, "-0", 10));
try std.testing.expectEqual(0, try parseInt(u8, "+0", 10));
// ensure minInt is parsed correctly
try std.testing.expect((try parseInt(i1, "-1", 10)) == math.minInt(i1));
try std.testing.expect((try parseInt(i8, "-128", 10)) == math.minInt(i8));
try std.testing.expect((try parseInt(i43, "-4398046511104", 10)) == math.minInt(i43));
try std.testing.expectEqual(math.minInt(i1), try parseInt(i1, "-1", 10));
try std.testing.expectEqual(math.minInt(i8), try parseInt(i8, "-128", 10));
try std.testing.expectEqual(math.minInt(i43), try parseInt(i43, "-4398046511104", 10));
// empty string or bare +- is invalid
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "", 10));
@ -1532,22 +1532,22 @@ test parseInt {
try std.testing.expectError(error.InvalidCharacter, parseInt(i32, "-", 10));
// autodectect the base
try std.testing.expect((try parseInt(i32, "111", 0)) == 111);
try std.testing.expect((try parseInt(i32, "1_1_1", 0)) == 111);
try std.testing.expect((try parseInt(i32, "1_1_1", 0)) == 111);
try std.testing.expect((try parseInt(i32, "+0b111", 0)) == 7);
try std.testing.expect((try parseInt(i32, "+0B111", 0)) == 7);
try std.testing.expect((try parseInt(i32, "+0b1_11", 0)) == 7);
try std.testing.expect((try parseInt(i32, "+0o111", 0)) == 73);
try std.testing.expect((try parseInt(i32, "+0O111", 0)) == 73);
try std.testing.expect((try parseInt(i32, "+0o11_1", 0)) == 73);
try std.testing.expect((try parseInt(i32, "+0x111", 0)) == 273);
try std.testing.expect((try parseInt(i32, "-0b111", 0)) == -7);
try std.testing.expect((try parseInt(i32, "-0b11_1", 0)) == -7);
try std.testing.expect((try parseInt(i32, "-0o111", 0)) == -73);
try std.testing.expect((try parseInt(i32, "-0x111", 0)) == -273);
try std.testing.expect((try parseInt(i32, "-0X111", 0)) == -273);
try std.testing.expect((try parseInt(i32, "-0x1_11", 0)) == -273);
try std.testing.expectEqual(111, try parseInt(i32, "111", 0));
try std.testing.expectEqual(111, try parseInt(i32, "1_1_1", 0));
try std.testing.expectEqual(111, try parseInt(i32, "1_1_1", 0));
try std.testing.expectEqual(7, try parseInt(i32, "+0b111", 0));
try std.testing.expectEqual(7, try parseInt(i32, "+0B111", 0));
try std.testing.expectEqual(7, try parseInt(i32, "+0b1_11", 0));
try std.testing.expectEqual(73, try parseInt(i32, "+0o111", 0));
try std.testing.expectEqual(73, try parseInt(i32, "+0O111", 0));
try std.testing.expectEqual(73, try parseInt(i32, "+0o11_1", 0));
try std.testing.expectEqual(273, try parseInt(i32, "+0x111", 0));
try std.testing.expectEqual(-7, try parseInt(i32, "-0b111", 0));
try std.testing.expectEqual(-7, try parseInt(i32, "-0b11_1", 0));
try std.testing.expectEqual(-73, try parseInt(i32, "-0o111", 0));
try std.testing.expectEqual(-273, try parseInt(i32, "-0x111", 0));
try std.testing.expectEqual(-273, try parseInt(i32, "-0X111", 0));
try std.testing.expectEqual(-273, try parseInt(i32, "-0x1_11", 0));
// bare binary/octal/decimal prefix is invalid
try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0b", 0));
@ -1643,31 +1643,31 @@ pub fn parseUnsigned(comptime T: type, buf: []const u8, base: u8) ParseIntError!
}
test parseUnsigned {
try std.testing.expect((try parseUnsigned(u16, "050124", 10)) == 50124);
try std.testing.expect((try parseUnsigned(u16, "65535", 10)) == 65535);
try std.testing.expect((try parseUnsigned(u16, "65_535", 10)) == 65535);
try std.testing.expectEqual(50124, try parseUnsigned(u16, "050124", 10));
try std.testing.expectEqual(65535, try parseUnsigned(u16, "65535", 10));
try std.testing.expectEqual(65535, try parseUnsigned(u16, "65_535", 10));
try std.testing.expectError(error.Overflow, parseUnsigned(u16, "65536", 10));
try std.testing.expect((try parseUnsigned(u64, "0ffffffffffffffff", 16)) == 0xffffffffffffffff);
try std.testing.expect((try parseUnsigned(u64, "0f_fff_fff_fff_fff_fff", 16)) == 0xffffffffffffffff);
try std.testing.expectEqual(0xffffffffffffffff, try parseUnsigned(u64, "0ffffffffffffffff", 16));
try std.testing.expectEqual(0xffffffffffffffff, try parseUnsigned(u64, "0f_fff_fff_fff_fff_fff", 16));
try std.testing.expectError(error.Overflow, parseUnsigned(u64, "10000000000000000", 16));
try std.testing.expect((try parseUnsigned(u32, "DeadBeef", 16)) == 0xDEADBEEF);
try std.testing.expectEqual(0xDEADBEEF, try parseUnsigned(u32, "DeadBeef", 16));
try std.testing.expect((try parseUnsigned(u7, "1", 10)) == 1);
try std.testing.expect((try parseUnsigned(u7, "1000", 2)) == 8);
try std.testing.expectEqual(1, try parseUnsigned(u7, "1", 10));
try std.testing.expectEqual(8, try parseUnsigned(u7, "1000", 2));
try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u32, "f", 10));
try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "109", 8));
try std.testing.expect((try parseUnsigned(u32, "NUMBER", 36)) == 1442151747);
try std.testing.expectEqual(1442151747, try parseUnsigned(u32, "NUMBER", 36));
// these numbers should fit even though the base itself doesn't fit in the destination type
try std.testing.expect((try parseUnsigned(u1, "0", 10)) == 0);
try std.testing.expect((try parseUnsigned(u1, "1", 10)) == 1);
try std.testing.expectEqual(0, try parseUnsigned(u1, "0", 10));
try std.testing.expectEqual(1, try parseUnsigned(u1, "1", 10));
try std.testing.expectError(error.Overflow, parseUnsigned(u1, "2", 10));
try std.testing.expect((try parseUnsigned(u1, "001", 16)) == 1);
try std.testing.expect((try parseUnsigned(u2, "3", 16)) == 3);
try std.testing.expectEqual(1, try parseUnsigned(u1, "001", 16));
try std.testing.expectEqual(3, try parseUnsigned(u2, "3", 16));
try std.testing.expectError(error.Overflow, parseUnsigned(u2, "4", 16));
// parseUnsigned does not expect a sign
@ -1717,15 +1717,15 @@ pub fn parseIntSizeSuffix(buf: []const u8, digit_base: u8) ParseIntError!usize {
}
test parseIntSizeSuffix {
try std.testing.expect(try parseIntSizeSuffix("2", 10) == 2);
try std.testing.expect(try parseIntSizeSuffix("2B", 10) == 2);
try std.testing.expect(try parseIntSizeSuffix("2kB", 10) == 2000);
try std.testing.expect(try parseIntSizeSuffix("2k", 10) == 2000);
try std.testing.expect(try parseIntSizeSuffix("2KiB", 10) == 2048);
try std.testing.expect(try parseIntSizeSuffix("2Ki", 10) == 2048);
try std.testing.expect(try parseIntSizeSuffix("aKiB", 16) == 10240);
try std.testing.expect(parseIntSizeSuffix("", 10) == error.InvalidCharacter);
try std.testing.expect(parseIntSizeSuffix("2iB", 10) == error.InvalidCharacter);
try std.testing.expectEqual(2, try parseIntSizeSuffix("2", 10));
try std.testing.expectEqual(2, try parseIntSizeSuffix("2B", 10));
try std.testing.expectEqual(2000, try parseIntSizeSuffix("2kB", 10));
try std.testing.expectEqual(2000, try parseIntSizeSuffix("2k", 10));
try std.testing.expectEqual(2048, try parseIntSizeSuffix("2KiB", 10));
try std.testing.expectEqual(2048, try parseIntSizeSuffix("2Ki", 10));
try std.testing.expectEqual(10240, try parseIntSizeSuffix("aKiB", 16));
try std.testing.expectError(error.InvalidCharacter, parseIntSizeSuffix("", 10));
try std.testing.expectError(error.InvalidCharacter, parseIntSizeSuffix("2iB", 10));
}
pub const parseFloat = @import("fmt/parse_float.zig").parseFloat;
@ -1854,7 +1854,7 @@ test "parse u64 digit too big" {
test "parse unsigned comptime" {
comptime {
try std.testing.expect((try parseUnsigned(usize, "2", 10)) == 2);
try std.testing.expectEqual(2, try parseUnsigned(usize, "2", 10));
}
}
@ -1963,15 +1963,15 @@ test "buffer" {
var buf1: [32]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf1);
try formatType(1234, "", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234"));
try std.testing.expectEqualStrings("1234", fbs.getWritten());
fbs.reset();
try formatType('a', "c", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a"));
try std.testing.expectEqualStrings("a", fbs.getWritten());
fbs.reset();
try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100"));
try std.testing.expectEqualStrings("1100", fbs.getWritten());
}
}
@ -2372,10 +2372,10 @@ test "union" {
var buf: [100]u8 = undefined;
const uu_result = try bufPrint(buf[0..], "{}", .{uu_inst});
try std.testing.expect(mem.eql(u8, uu_result[0..18], "fmt.test.union.UU@"));
try std.testing.expectEqualStrings("fmt.test.union.UU@", uu_result[0..18]);
const eu_result = try bufPrint(buf[0..], "{}", .{eu_inst});
try std.testing.expect(mem.eql(u8, eu_result[0..18], "fmt.test.union.EU@"));
try std.testing.expectEqualStrings("fmt.test.union.EU@", eu_result[0..18]);
}
test "struct.self-referential" {
@ -2476,7 +2476,7 @@ test "formatIntValue with comptime_int" {
var buf: [20]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
try formatIntValue(value, "", FormatOptions{}, fbs.writer());
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "123456789123456789"));
try std.testing.expectEqualStrings("123456789123456789", fbs.getWritten());
}
test "formatFloatValue with comptime_float" {
@ -2542,19 +2542,19 @@ test "formatType max_depth" {
var buf: [1000]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
try formatType(inst, "", FormatOptions{}, fbs.writer(), 0);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "fmt.test.formatType max_depth.S{ ... }"));
try std.testing.expectEqualStrings("fmt.test.formatType max_depth.S{ ... }", fbs.getWritten());
fbs.reset();
try formatType(inst, "", FormatOptions{}, fbs.writer(), 1);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }"));
try std.testing.expectEqualStrings("fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }", fbs.getWritten());
fbs.reset();
try formatType(inst, "", FormatOptions{}, fbs.writer(), 2);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }"));
try std.testing.expectEqualStrings("fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }", fbs.getWritten());
fbs.reset();
try formatType(inst, "", FormatOptions{}, fbs.writer(), 3);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }"));
try std.testing.expectEqualStrings("fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ .a = fmt.test.formatType max_depth.S{ ... }, .tu = fmt.test.formatType max_depth.TU{ ... }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }, .tu = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ .ptr = fmt.test.formatType max_depth.TU{ ... } } }, .e = fmt.test.formatType max_depth.E.Two, .vec = (10.200,2.220) }", fbs.getWritten());
}
test "positional" {

View file

@ -1104,27 +1104,29 @@ pub fn createFileW(self: Dir, sub_path_w: []const u16, flags: File.CreateFlags)
return file;
}
pub const MakeError = posix.MakeDirError;
/// Creates a single directory with a relative or absolute path.
/// To create multiple directories to make an entire path, see `makePath`.
/// To operate on only absolute paths, see `makeDirAbsolute`.
/// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn makeDir(self: Dir, sub_path: []const u8) !void {
pub fn makeDir(self: Dir, sub_path: []const u8) MakeError!void {
try posix.mkdirat(self.fd, sub_path, default_mode);
}
/// Same as `makeDir`, but `sub_path` is null-terminated.
/// To create multiple directories to make an entire path, see `makePath`.
/// To operate on only absolute paths, see `makeDirAbsoluteZ`.
pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) !void {
pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) MakeError!void {
try posix.mkdiratZ(self.fd, sub_path, default_mode);
}
/// Creates a single directory with a relative or absolute null-terminated WTF-16 LE-encoded path.
/// To create multiple directories to make an entire path, see `makePath`.
/// To operate on only absolute paths, see `makeDirAbsoluteW`.
pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) !void {
pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
try posix.mkdiratW(self.fd, sub_path, default_mode);
}
@ -1144,7 +1146,7 @@ pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) !void {
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
/// meaning a `sub_path` like "first/../second" will create both a `./first`
/// and a `./second` directory.
pub fn makePath(self: Dir, sub_path: []const u8) !void {
pub fn makePath(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!void {
var it = try fs.path.componentIterator(sub_path);
var component = it.last() orelse return;
while (true) {
@ -1178,7 +1180,7 @@ pub fn makePath(self: Dir, sub_path: []const u8) !void {
/// This function is not atomic, and if it returns an error, the file system may
/// have been modified regardless.
/// `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
fn makeOpenPathAccessMaskW(self: Dir, sub_path: []const u8, access_mask: u32, no_follow: bool) OpenError!Dir {
fn makeOpenPathAccessMaskW(self: Dir, sub_path: []const u8, access_mask: u32, no_follow: bool) (MakeError || OpenError || StatFileError)!Dir {
const w = windows;
var it = try fs.path.componentIterator(sub_path);
// If there are no components in the path, then create a dummy component with the full path.
@ -1198,12 +1200,27 @@ fn makeOpenPathAccessMaskW(self: Dir, sub_path: []const u8, access_mask: u32, no
component = it.previous() orelse return e;
continue;
},
error.PathAlreadyExists => result: {
assert(!is_last);
// stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
check_dir: {
// workaround for windows, see https://github.com/ziglang/zig/issues/16738
const fstat = self.statFile(component.path) catch |stat_err| switch (stat_err) {
error.IsDir => break :check_dir,
else => |e| return e,
};
if (fstat.kind != .directory) return error.NotDir;
}
break :result null;
},
else => |e| return e,
};
component = it.next() orelse return result;
// Don't leak the intermediate file handles
result.close();
errdefer if (result) |*dir| dir.close();
component = it.next() orelse return result.?;
}
}
@ -1213,7 +1230,7 @@ fn makeOpenPathAccessMaskW(self: Dir, sub_path: []const u8, access_mask: u32, no
/// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenDirOptions) !Dir {
pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenDirOptions) (MakeError || OpenError || StatFileError)!Dir {
return switch (native_os) {
.windows => {
const w = windows;
@ -1401,7 +1418,7 @@ pub fn openDir(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError!
const sub_path_w = try windows.sliceToPrefixedFileW(self.fd, sub_path);
return self.openDirW(sub_path_w.span().ptr, args);
},
.wasi => {
.wasi => if (!builtin.link_libc) {
var base: std.os.wasi.rights_t = .{
.FD_FILESTAT_GET = true,
.FD_FDSTAT_SET_FLAGS = true,
@ -1446,11 +1463,10 @@ pub fn openDir(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError!
};
return .{ .fd = fd };
},
else => {
const sub_path_c = try posix.toPosixPath(sub_path);
return self.openDirZ(&sub_path_c, args);
},
else => {},
}
const sub_path_c = try posix.toPosixPath(sub_path);
return self.openDirZ(&sub_path_c, args);
}
/// Same as `openDir` except the parameter is null-terminated.
@ -1460,7 +1476,9 @@ pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenDirOptions) Open
const sub_path_w = try windows.cStrToPrefixedFileW(self.fd, sub_path_c);
return self.openDirW(sub_path_w.span().ptr, args);
},
.wasi => {
// Use the libc API when libc is linked because it implements things
// such as opening absolute directory paths.
.wasi => if (!builtin.link_libc) {
return openDir(self, mem.sliceTo(sub_path_c, 0), args);
},
.haiku => {
@ -1484,19 +1502,27 @@ pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenDirOptions) Open
else => |err| return posix.unexpectedErrno(err),
}
},
else => {
var symlink_flags: posix.O = .{
.ACCMODE = .RDONLY,
.NOFOLLOW = args.no_follow,
.DIRECTORY = true,
.CLOEXEC = true,
};
if (@hasField(posix.O, "PATH") and !args.iterate)
symlink_flags.PATH = true;
return self.openDirFlagsZ(sub_path_c, symlink_flags);
},
else => {},
}
var symlink_flags: posix.O = switch (native_os) {
.wasi => .{
.read = true,
.NOFOLLOW = args.no_follow,
.DIRECTORY = true,
},
else => .{
.ACCMODE = .RDONLY,
.NOFOLLOW = args.no_follow,
.DIRECTORY = true,
.CLOEXEC = true,
},
};
if (@hasField(posix.O, "PATH") and !args.iterate)
symlink_flags.PATH = true;
return self.openDirFlagsZ(sub_path_c, symlink_flags);
}
/// Same as `openDir` except the path parameter is WTF-16 LE encoded, NT-prefixed.
@ -1507,10 +1533,17 @@ pub fn openDirW(self: Dir, sub_path_w: [*:0]const u16, args: OpenDirOptions) Ope
const base_flags = w.STANDARD_RIGHTS_READ | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA |
w.SYNCHRONIZE | w.FILE_TRAVERSE;
const flags: u32 = if (args.iterate) base_flags | w.FILE_LIST_DIRECTORY else base_flags;
const dir = try self.makeOpenDirAccessMaskW(sub_path_w, flags, .{
const dir = self.makeOpenDirAccessMaskW(sub_path_w, flags, .{
.no_follow = args.no_follow,
.create_disposition = w.FILE_OPEN,
});
}) catch |err| switch (err) {
error.ReadOnlyFileSystem => unreachable,
error.DiskQuota => unreachable,
error.NoSpaceLeft => unreachable,
error.PathAlreadyExists => unreachable,
error.LinkQuotaExceeded => unreachable,
else => |e| return e,
};
return dir;
}
@ -1535,7 +1568,7 @@ const MakeOpenDirAccessMaskWOptions = struct {
create_disposition: u32,
};
fn makeOpenDirAccessMaskW(self: Dir, sub_path_w: [*:0]const u16, access_mask: u32, flags: MakeOpenDirAccessMaskWOptions) OpenError!Dir {
fn makeOpenDirAccessMaskW(self: Dir, sub_path_w: [*:0]const u16, access_mask: u32, flags: MakeOpenDirAccessMaskWOptions) (MakeError || OpenError)!Dir {
const w = windows;
var result = Dir{
@ -1576,6 +1609,7 @@ fn makeOpenDirAccessMaskW(self: Dir, sub_path_w: [*:0]const u16, access_mask: u3
.SUCCESS => return result,
.OBJECT_NAME_INVALID => return error.BadPathName,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.NOT_A_DIRECTORY => return error.NotDir,
// This can happen if the directory has 'List folder contents' permission set to 'Deny'

View file

@ -19,18 +19,22 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = log2_align;
assert(n > 0);
if (n > maxInt(usize) - (mem.page_size - 1)) return null;
const aligned_len = mem.alignForward(usize, n, mem.page_size);
if (native_os == .windows) {
const addr = windows.VirtualAlloc(
null,
aligned_len,
// VirtualAlloc will round the length to a multiple of page size.
// VirtualAlloc docs: If the lpAddress parameter is NULL, this value is rounded up to the next page boundary
n,
windows.MEM_COMMIT | windows.MEM_RESERVE,
windows.PAGE_READWRITE,
) catch return null;
return @ptrCast(addr);
}
const aligned_len = mem.alignForward(usize, n, mem.page_size);
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
const slice = posix.mmap(
hint,

View file

@ -297,6 +297,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
collectStackTrace(ret_addr, stack_addresses);
}
/// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor`
/// of empty buckets being set to `slot_count` when they are added to `empty_buckets`
fn emptyBucketSizeClass(bucket: *BucketHeader) usize {
return @divExact(page_size, bucket.alloc_cursor);
}
};
pub fn allocator(self: *Self) Allocator {
@ -439,15 +445,18 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
// free retained metadata for small allocations
var empty_it = self.empty_buckets.inorderIterator();
while (empty_it.next()) |node| {
while (self.empty_buckets.getMin()) |node| {
// remove the node from the tree before destroying it
var entry = self.empty_buckets.getEntryForExisting(node);
entry.set(null);
var bucket = node.key;
if (config.never_unmap) {
// free page that was intentionally leaked by never_unmap
self.backing_allocator.free(bucket.page[0..page_size]);
}
// alloc_cursor was set to slot count when bucket added to empty_buckets
self.freeBucket(bucket, @divExact(page_size, bucket.alloc_cursor));
self.freeBucket(bucket, bucket.emptyBucketSizeClass());
self.bucket_node_pool.destroy(node);
}
self.empty_buckets.root = null;
@ -726,6 +735,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (!self.large_allocations.contains(@intFromPtr(old_mem.ptr))) {
// object not in active buckets or a large allocation, so search empty buckets
if (searchBucket(&self.empty_buckets, @intFromPtr(old_mem.ptr), null)) |bucket| {
size_class = bucket.emptyBucketSizeClass();
// bucket is empty so is_used below will always be false and we exit there
break :blk bucket;
} else {
@ -844,6 +854,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (!self.large_allocations.contains(@intFromPtr(old_mem.ptr))) {
// object not in active buckets or a large allocation, so search empty buckets
if (searchBucket(&self.empty_buckets, @intFromPtr(old_mem.ptr), null)) |bucket| {
size_class = bucket.emptyBucketSizeClass();
// bucket is empty so is_used below will always be false and we exit there
break :blk bucket;
} else {
@ -1418,6 +1429,23 @@ test "double frees" {
try std.testing.expect(!gpa.large_allocations.contains(@intFromPtr(large.ptr)));
}
test "empty bucket size class" {
const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
var gpa = GPA{};
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
// allocate and free to create an empty bucket
const size_class: usize = @as(usize, 1) << 6;
const small = try allocator.alloc(u8, size_class);
allocator.free(small);
// the metadata tracking system relies on alloc_cursor of empty buckets
// being set to the slot count so that we can get back the size class.
const empty_bucket = GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null).?;
try std.testing.expect(empty_bucket.emptyBucketSizeClass() == size_class);
}
test "bug 9995 fix, large allocs count requested size not backing size" {
// with AtLeast, buffer likely to be larger than requested, especially when shrinking
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
@ -1430,3 +1458,19 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
buf = try allocator.realloc(buf, 2);
try std.testing.expect(gpa.total_requested_bytes == 2);
}
test "retain metadata and never unmap" {
var gpa = std.heap.GeneralPurposeAllocator(.{
.safety = true,
.never_unmap = true,
.retain_metadata = true,
}){};
defer std.debug.assert(gpa.deinit() == .ok);
const allocator = gpa.allocator();
const alloc = try allocator.alloc(u8, 8);
allocator.free(alloc);
const alloc2 = try allocator.alloc(u8, 8);
allocator.free(alloc2);
}

View file

@ -70,7 +70,12 @@ pub const Config = union(enum) {
reset_attributes: u16,
};
pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void {
pub fn setColor(
conf: Config,
writer: anytype,
color: Color,
) (@typeInfo(@TypeOf(writer.writeAll(""))).ErrorUnion.error_set ||
windows.SetConsoleTextAttributeError)!void {
nosuspend switch (conf) {
.no_color => return,
.escape_codes => {
@ -95,7 +100,7 @@ pub const Config = union(enum) {
.dim => "\x1b[2m",
.reset => "\x1b[0m",
};
try out_stream.writeAll(color_string);
try writer.writeAll(color_string);
},
.windows_api => |ctx| if (native_os == .windows) {
const attributes = switch (color) {

View file

@ -52,6 +52,7 @@ pub const floatTrueMin = @import("math/float.zig").floatTrueMin;
pub const floatMin = @import("math/float.zig").floatMin;
pub const floatMax = @import("math/float.zig").floatMax;
pub const floatEps = @import("math/float.zig").floatEps;
pub const floatEpsAt = @import("math/float.zig").floatEpsAt;
pub const inf = @import("math/float.zig").inf;
pub const nan = @import("math/float.zig").nan;
pub const snan = @import("math/float.zig").snan;

View file

@ -94,6 +94,19 @@ pub inline fn floatEps(comptime T: type) T {
return reconstructFloat(T, -floatFractionalBits(T), mantissaOne(T));
}
/// Returns the local epsilon of floating point type T.
pub inline fn floatEpsAt(comptime T: type, x: T) T {
switch (@typeInfo(T)) {
.Float => |F| {
const U: type = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = F.bits } });
const u: U = @bitCast(x);
const y: T = @bitCast(u ^ 1);
return @abs(x - y);
},
else => @compileError("floatEpsAt only supports floats"),
}
}
/// Returns the value inf for floating point type T.
pub inline fn inf(comptime T: type) T {
return reconstructFloat(T, floatExponentMax(T) + 1, mantissaOne(T));

View file

@ -1,13 +1,14 @@
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/hypotf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/hypot.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const maxInt = std.math.maxInt;
const isNan = math.isNan;
const isInf = math.isInf;
const inf = math.inf;
const nan = math.nan;
const floatEpsAt = math.floatEpsAt;
const floatEps = math.floatEps;
const floatMin = math.floatMin;
const floatMax = math.floatMax;
/// Returns sqrt(x * x + y * y), avoiding unnecessary overflow and underflow.
///
@ -15,162 +16,116 @@ const maxInt = std.math.maxInt;
///
/// | x | y | hypot |
/// |-------|-------|-------|
/// | +inf | num | +inf |
/// | num | +-inf | +inf |
/// | nan | any | nan |
/// | any | nan | nan |
/// | +-inf | any | +inf |
/// | any | +-inf | +inf |
/// | nan | fin | nan |
/// | fin | nan | nan |
pub fn hypot(x: anytype, y: anytype) @TypeOf(x, y) {
const T = @TypeOf(x, y);
return switch (T) {
f32 => hypot32(x, y),
f64 => hypot64(x, y),
switch (@typeInfo(T)) {
.Float => {},
.ComptimeFloat => return @sqrt(x * x + y * y),
else => @compileError("hypot not implemented for " ++ @typeName(T)),
};
}
const lower = @sqrt(floatMin(T));
const upper = @sqrt(floatMax(T) / 2);
const incre = @sqrt(floatEps(T) / 2);
const scale = floatEpsAt(T, incre);
const hypfn = if (emulateFma(T)) hypotUnfused else hypotFused;
var major: T = x;
var minor: T = y;
if (isInf(major) or isInf(minor)) return inf(T);
if (isNan(major) or isNan(minor)) return nan(T);
if (T == f16) return @floatCast(@sqrt(@mulAdd(f32, x, x, @as(f32, y) * y)));
if (T == f32) return @floatCast(@sqrt(@mulAdd(f64, x, x, @as(f64, y) * y)));
major = @abs(major);
minor = @abs(minor);
if (minor > major) {
const tempo = major;
major = minor;
minor = tempo;
}
if (major * incre >= minor) return major;
if (major > upper) return hypfn(T, major * scale, minor * scale) / scale;
if (minor < lower) return hypfn(T, major / scale, minor / scale) * scale;
return hypfn(T, major, minor);
}
fn hypot32(x: f32, y: f32) f32 {
var ux = @as(u32, @bitCast(x));
var uy = @as(u32, @bitCast(y));
ux &= maxInt(u32) >> 1;
uy &= maxInt(u32) >> 1;
if (ux < uy) {
const tmp = ux;
ux = uy;
uy = tmp;
}
var xx = @as(f32, @bitCast(ux));
var yy = @as(f32, @bitCast(uy));
if (uy == 0xFF << 23) {
return yy;
}
if (ux >= 0xFF << 23 or uy == 0 or ux - uy >= (25 << 23)) {
return xx + yy;
}
var z: f32 = 1.0;
if (ux >= (0x7F + 60) << 23) {
z = 0x1.0p90;
xx *= 0x1.0p-90;
yy *= 0x1.0p-90;
} else if (uy < (0x7F - 60) << 23) {
z = 0x1.0p-90;
xx *= 0x1.0p-90;
yy *= 0x1.0p-90;
}
return z * @sqrt(@as(f32, @floatCast(@as(f64, x) * x + @as(f64, y) * y)));
inline fn emulateFma(comptime T: type) bool {
// If @mulAdd lowers to the software implementation,
// hypotUnfused should be used in place of hypotFused.
// This takes an educated guess, but ideally we should
// properly detect at comptime when that fallback will
// occur.
return (T == f128 or T == f80);
}
fn sq(hi: *f64, lo: *f64, x: f64) void {
const split: f64 = 0x1.0p27 + 1.0;
const xc = x * split;
const xh = x - xc + xc;
const xl = x - xh;
hi.* = x * x;
lo.* = xh * xh - hi.* + 2 * xh * xl + xl * xl;
inline fn hypotFused(comptime F: type, x: F, y: F) F {
const r = @sqrt(@mulAdd(F, x, x, y * y));
const rr = r * r;
const xx = x * x;
const z = @mulAdd(F, -y, y, rr - xx) + @mulAdd(F, r, r, -rr) - @mulAdd(F, x, x, -xx);
return r - z / (2 * r);
}
fn hypot64(x: f64, y: f64) f64 {
var ux = @as(u64, @bitCast(x));
var uy = @as(u64, @bitCast(y));
ux &= maxInt(u64) >> 1;
uy &= maxInt(u64) >> 1;
if (ux < uy) {
const tmp = ux;
ux = uy;
uy = tmp;
inline fn hypotUnfused(comptime F: type, x: F, y: F) F {
const r = @sqrt(x * x + y * y);
if (r <= 2 * y) { // 30deg or steeper
const dx = r - y;
const z = x * (2 * dx - x) + (dx - 2 * (x - y)) * dx;
return r - z / (2 * r);
} else { // shallower than 30 deg
const dy = r - x;
const z = 2 * dy * (x - 2 * y) + (4 * dy - y) * y + dy * dy;
return r - z / (2 * r);
}
const ex = ux >> 52;
const ey = uy >> 52;
var xx = @as(f64, @bitCast(ux));
var yy = @as(f64, @bitCast(uy));
// hypot(inf, nan) == inf
if (ey == 0x7FF) {
return yy;
}
if (ex == 0x7FF or uy == 0) {
return xx;
}
// hypot(x, y) ~= x + y * y / x / 2 with inexact for small y/x
if (ex - ey > 64) {
return xx + yy;
}
var z: f64 = 1;
if (ex > 0x3FF + 510) {
z = 0x1.0p700;
xx *= 0x1.0p-700;
yy *= 0x1.0p-700;
} else if (ey < 0x3FF - 450) {
z = 0x1.0p-700;
xx *= 0x1.0p700;
yy *= 0x1.0p700;
}
var hx: f64 = undefined;
var lx: f64 = undefined;
var hy: f64 = undefined;
var ly: f64 = undefined;
sq(&hx, &lx, x);
sq(&hy, &ly, y);
return z * @sqrt(ly + lx + hy + hx);
}
const hypot_test_cases = .{
.{ 0.0, -1.2, 1.2 },
.{ 0.2, -0.34, 0.3944616584663203993612799816649560759946493601889826495362 },
.{ 0.8923, 2.636890, 2.7837722899152509525110650481670176852603253522923737962880 },
.{ 1.5, 5.25, 5.4600824169603887033229768686452745953332522619323580787836 },
.{ 37.45, 159.835, 164.16372840856167640478217141034363907565754072954443805164 },
.{ 89.123, 382.028905, 392.28687638576315875933966414927490685367196874260165618371 },
.{ 123123.234375, 529428.707813, 543556.88524707706887251269205923830745438413088753096759371 },
};
test hypot {
const x32: f32 = 0.0;
const y32: f32 = -1.2;
const x64: f64 = 0.0;
const y64: f64 = -1.2;
try expect(hypot(x32, y32) == hypot32(0.0, -1.2));
try expect(hypot(x64, y64) == hypot64(0.0, -1.2));
try expect(hypot(0.3, 0.4) == 0.5);
}
test hypot32 {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, hypot32(0.0, -1.2), 1.2, epsilon));
try expect(math.approxEqAbs(f32, hypot32(0.2, -0.34), 0.394462, epsilon));
try expect(math.approxEqAbs(f32, hypot32(0.8923, 2.636890), 2.783772, epsilon));
try expect(math.approxEqAbs(f32, hypot32(1.5, 5.25), 5.460083, epsilon));
try expect(math.approxEqAbs(f32, hypot32(37.45, 159.835), 164.163742, epsilon));
try expect(math.approxEqAbs(f32, hypot32(89.123, 382.028905), 392.286865, epsilon));
try expect(math.approxEqAbs(f32, hypot32(123123.234375, 529428.707813), 543556.875, epsilon));
test "hypot.correct" {
inline for (.{ f16, f32, f64, f128 }) |T| {
inline for (hypot_test_cases) |v| {
const a: T, const b: T, const c: T = v;
try expect(math.approxEqRel(T, hypot(a, b), c, @sqrt(floatEps(T))));
}
}
}
test hypot64 {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, hypot64(0.0, -1.2), 1.2, epsilon));
try expect(math.approxEqAbs(f64, hypot64(0.2, -0.34), 0.394462, epsilon));
try expect(math.approxEqAbs(f64, hypot64(0.8923, 2.636890), 2.783772, epsilon));
try expect(math.approxEqAbs(f64, hypot64(1.5, 5.25), 5.460082, epsilon));
try expect(math.approxEqAbs(f64, hypot64(37.45, 159.835), 164.163728, epsilon));
try expect(math.approxEqAbs(f64, hypot64(89.123, 382.028905), 392.286876, epsilon));
try expect(math.approxEqAbs(f64, hypot64(123123.234375, 529428.707813), 543556.885247, epsilon));
test "hypot.precise" {
inline for (.{ f16, f32, f64 }) |T| { // f128 seems to be 5 ulp
inline for (hypot_test_cases) |v| {
const a: T, const b: T, const c: T = v;
try expect(math.approxEqRel(T, hypot(a, b), c, floatEps(T)));
}
}
}
test "hypot32.special" {
try expect(math.isPositiveInf(hypot32(math.inf(f32), 0.0)));
try expect(math.isPositiveInf(hypot32(-math.inf(f32), 0.0)));
try expect(math.isPositiveInf(hypot32(0.0, math.inf(f32))));
try expect(math.isPositiveInf(hypot32(0.0, -math.inf(f32))));
try expect(math.isNan(hypot32(math.nan(f32), 0.0)));
try expect(math.isNan(hypot32(0.0, math.nan(f32))));
}
test "hypot.special" {
inline for (.{ f16, f32, f64, f128 }) |T| {
try expect(math.isNan(hypot(nan(T), 0.0)));
try expect(math.isNan(hypot(0.0, nan(T))));
test "hypot64.special" {
try expect(math.isPositiveInf(hypot64(math.inf(f64), 0.0)));
try expect(math.isPositiveInf(hypot64(-math.inf(f64), 0.0)));
try expect(math.isPositiveInf(hypot64(0.0, math.inf(f64))));
try expect(math.isPositiveInf(hypot64(0.0, -math.inf(f64))));
try expect(math.isNan(hypot64(math.nan(f64), 0.0)));
try expect(math.isNan(hypot64(0.0, math.nan(f64))));
try expect(math.isPositiveInf(hypot(inf(T), 0.0)));
try expect(math.isPositiveInf(hypot(0.0, inf(T))));
try expect(math.isPositiveInf(hypot(inf(T), nan(T))));
try expect(math.isPositiveInf(hypot(nan(T), inf(T))));
try expect(math.isPositiveInf(hypot(-inf(T), 0.0)));
try expect(math.isPositiveInf(hypot(0.0, -inf(T))));
try expect(math.isPositiveInf(hypot(-inf(T), nan(T))));
try expect(math.isPositiveInf(hypot(nan(T), -inf(T))));
}
}

View file

@ -1893,7 +1893,7 @@ pub fn sched_setaffinity(pid: pid_t, set: *const cpu_set_t) !void {
switch (std.os.errno(rc)) {
.SUCCESS => return,
else => |err| return std.os.unexpectedErrno(err),
else => |err| return std.posix.unexpectedErrno(err),
}
}
@ -5918,8 +5918,8 @@ pub const SIOCGIFINDEX = 0x8933;
pub const IFNAMESIZE = 16;
pub const ifmap = extern struct {
mem_start: u32,
mem_end: u32,
mem_start: usize,
mem_end: usize,
base_addr: u16,
irq: u8,
dma: u8,

View file

@ -1559,7 +1559,7 @@ pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.p
if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
const mmap_size = entries * @sizeOf(linux.io_uring_buf);
const mmap_size = @as(usize, entries) * @sizeOf(linux.io_uring_buf);
const mmap = try posix.mmap(
null,
mmap_size,
@ -3503,6 +3503,10 @@ test "accept multishot" {
}
test "accept/connect/send_zc/recv" {
if (true) {
// https://github.com/ziglang/zig/issues/20212
return error.SkipZigTest;
}
try skipKernelLessThan(.{ .major = 6, .minor = 0, .patch = 0 });
var ring = IoUring.init(16, 0) catch |err| switch (err) {

View file

@ -1,6 +1,6 @@
const std = @import("../../std.zig");
const errno = linux.E.init;
const unexpectedErrno = std.os.unexpectedErrno;
const unexpectedErrno = std.posix.unexpectedErrno;
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const expect = std.testing.expect;

View file

@ -7,14 +7,16 @@ const std = @import("std");
const assert = std.debug.assert;
comptime {
assert(@alignOf(i8) == 1);
assert(@alignOf(u8) == 1);
assert(@alignOf(i16) == 2);
assert(@alignOf(u16) == 2);
assert(@alignOf(i32) == 4);
assert(@alignOf(u32) == 4);
// assert(@alignOf(i64) == 8);
// assert(@alignOf(u64) == 8);
if (builtin.os.tag == .wasi) {
assert(@alignOf(i8) == 1);
assert(@alignOf(u8) == 1);
assert(@alignOf(i16) == 2);
assert(@alignOf(u16) == 2);
assert(@alignOf(i32) == 4);
assert(@alignOf(u32) == 4);
assert(@alignOf(i64) == 8);
assert(@alignOf(u64) == 8);
}
}
pub const iovec_t = std.posix.iovec;

View file

@ -304,7 +304,7 @@ pub fn CreateEventEx(attributes: ?*SECURITY_ATTRIBUTES, name: []const u8, flags:
return CreateEventExW(attributes, nameW.span().ptr, flags, desired_access);
}
pub fn CreateEventExW(attributes: ?*SECURITY_ATTRIBUTES, nameW: [*:0]const u16, flags: DWORD, desired_access: DWORD) !HANDLE {
pub fn CreateEventExW(attributes: ?*SECURITY_ATTRIBUTES, nameW: ?LPCWSTR, flags: DWORD, desired_access: DWORD) !HANDLE {
const handle = kernel32.CreateEventExW(attributes, nameW, flags, desired_access);
if (handle) |h| {
return h;
@ -1368,6 +1368,61 @@ pub fn GetFinalPathNameByHandle(
return error.BadPathName;
}
return out_buffer[0..total_len];
} else if (mountmgrIsVolumeName(symlink)) {
// If the symlink is a volume GUID like \??\Volume{383da0b0-717f-41b6-8c36-00500992b58d},
// then it is a volume mounted as a path rather than a drive letter. We need to
// query the mount manager again to get the DOS path for the volume.
// 49 is the maximum length accepted by mountmgrIsVolumeName
const vol_input_size = @sizeOf(MOUNTMGR_TARGET_NAME) + (49 * 2);
var vol_input_buf: [vol_input_size]u8 align(@alignOf(MOUNTMGR_TARGET_NAME)) = [_]u8{0} ** vol_input_size;
// Note: If the path exceeds MAX_PATH, the Disk Management GUI doesn't accept the full path,
// and instead if must be specified using a shortened form (e.g. C:\FOO~1\BAR~1\<...>).
// However, just to be sure we can handle any path length, we use PATH_MAX_WIDE here.
const min_output_size = @sizeOf(MOUNTMGR_VOLUME_PATHS) + (PATH_MAX_WIDE * 2);
var vol_output_buf: [min_output_size]u8 align(@alignOf(MOUNTMGR_VOLUME_PATHS)) = undefined;
var vol_input_struct: *MOUNTMGR_TARGET_NAME = @ptrCast(&vol_input_buf[0]);
vol_input_struct.DeviceNameLength = @intCast(symlink.len * 2);
@memcpy(@as([*]WCHAR, &vol_input_struct.DeviceName)[0..symlink.len], symlink);
DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_DOS_VOLUME_PATH, &vol_input_buf, &vol_output_buf) catch |err| switch (err) {
error.AccessDenied => return error.Unexpected,
else => |e| return e,
};
const volume_paths_struct: *const MOUNTMGR_VOLUME_PATHS = @ptrCast(&vol_output_buf[0]);
const volume_path = std.mem.sliceTo(@as(
[*]const u16,
&volume_paths_struct.MultiSz,
)[0 .. volume_paths_struct.MultiSzLength / 2], 0);
if (out_buffer.len < volume_path.len + file_name_u16.len) return error.NameTooLong;
// `out_buffer` currently contains the memory of `file_name_u16`, so it can overlap with where
// we want to place the filename before returning. Here are the possible overlapping cases:
//
// out_buffer: [filename]
// dest: [___(a)___] [___(b)___]
//
// In the case of (a), we need to copy forwards, and in the case of (b) we need
// to copy backwards. We also need to do this before copying the volume path because
// it could overwrite the file_name_u16 memory.
const file_name_dest = out_buffer[volume_path.len..][0..file_name_u16.len];
const file_name_byte_offset = @intFromPtr(file_name_u16.ptr) - @intFromPtr(out_buffer.ptr);
const file_name_index = file_name_byte_offset / @sizeOf(u16);
if (volume_path.len > file_name_index)
mem.copyBackwards(u16, file_name_dest, file_name_u16)
else
mem.copyForwards(u16, file_name_dest, file_name_u16);
@memcpy(out_buffer[0..volume_path.len], volume_path);
const total_len = volume_path.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes.
if (mem.indexOfScalar(u16, out_buffer[0..total_len], 0)) |_| {
return error.BadPathName;
}
return out_buffer[0..total_len];
}
}
@ -1379,6 +1434,32 @@ pub fn GetFinalPathNameByHandle(
}
}
/// Equivalent to the MOUNTMGR_IS_VOLUME_NAME macro in mountmgr.h
fn mountmgrIsVolumeName(name: []const u16) bool {
return (name.len == 48 or (name.len == 49 and name[48] == mem.nativeToLittle(u16, '\\'))) and
name[0] == mem.nativeToLittle(u16, '\\') and
(name[1] == mem.nativeToLittle(u16, '?') or name[1] == mem.nativeToLittle(u16, '\\')) and
name[2] == mem.nativeToLittle(u16, '?') and
name[3] == mem.nativeToLittle(u16, '\\') and
mem.startsWith(u16, name[4..], std.unicode.utf8ToUtf16LeStringLiteral("Volume{")) and
name[19] == mem.nativeToLittle(u16, '-') and
name[24] == mem.nativeToLittle(u16, '-') and
name[29] == mem.nativeToLittle(u16, '-') and
name[34] == mem.nativeToLittle(u16, '-') and
name[47] == mem.nativeToLittle(u16, '}');
}
test mountmgrIsVolumeName {
const L = std.unicode.utf8ToUtf16LeStringLiteral;
try std.testing.expect(mountmgrIsVolumeName(L("\\\\?\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}")));
try std.testing.expect(mountmgrIsVolumeName(L("\\??\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}")));
try std.testing.expect(mountmgrIsVolumeName(L("\\\\?\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}\\")));
try std.testing.expect(mountmgrIsVolumeName(L("\\??\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}\\")));
try std.testing.expect(!mountmgrIsVolumeName(L("\\\\.\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}")));
try std.testing.expect(!mountmgrIsVolumeName(L("\\??\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}\\foo")));
try std.testing.expect(!mountmgrIsVolumeName(L("\\??\\Volume{383da0b0-717f-41b6-8c36-00500992b58}")));
}
test GetFinalPathNameByHandle {
if (builtin.os.tag != .windows)
return;
@ -4845,6 +4926,8 @@ pub const SYMLINK_FLAG_RELATIVE: ULONG = 0x1;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
pub const MOUNTMGRCONTROLTYPE = 0x0000006D;
pub const MOUNTMGR_MOUNT_POINT = extern struct {
SymbolicLinkNameOffset: ULONG,
SymbolicLinkNameLength: USHORT,
@ -4861,7 +4944,17 @@ pub const MOUNTMGR_MOUNT_POINTS = extern struct {
NumberOfMountPoints: ULONG,
MountPoints: [1]MOUNTMGR_MOUNT_POINT,
};
pub const IOCTL_MOUNTMGR_QUERY_POINTS: ULONG = 0x6d0008;
pub const IOCTL_MOUNTMGR_QUERY_POINTS = CTL_CODE(MOUNTMGRCONTROLTYPE, 2, .METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const MOUNTMGR_TARGET_NAME = extern struct {
DeviceNameLength: USHORT,
DeviceName: [1]WCHAR,
};
pub const MOUNTMGR_VOLUME_PATHS = extern struct {
MultiSzLength: ULONG,
MultiSz: [1]WCHAR,
};
pub const IOCTL_MOUNTMGR_QUERY_DOS_VOLUME_PATH = CTL_CODE(MOUNTMGRCONTROLTYPE, 12, .METHOD_BUFFERED, FILE_ANY_ACCESS);
pub const OBJECT_INFORMATION_CLASS = enum(c_int) {
ObjectBasicInformation = 0,

View file

@ -82,7 +82,7 @@ pub extern "kernel32" fn SetEndOfFile(hFile: HANDLE) callconv(WINAPI) BOOL;
pub extern "kernel32" fn CreateEventExW(
lpEventAttributes: ?*SECURITY_ATTRIBUTES,
lpName: [*:0]const u16,
lpName: ?LPCWSTR,
dwFlags: DWORD,
dwDesiredAccess: DWORD,
) callconv(WINAPI) ?HANDLE;
@ -243,6 +243,8 @@ pub extern "kernel32" fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) callconv(WINA
pub extern "kernel32" fn GetSystemTimeAsFileTime(*FILETIME) callconv(WINAPI) void;
pub extern "kernel32" fn IsProcessorFeaturePresent(ProcessorFeature: DWORD) BOOL;
pub extern "kernel32" fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) callconv(WINAPI) UINT;
pub extern "kernel32" fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) callconv(WINAPI) ?HANDLE;
pub extern "kernel32" fn HeapDestroy(hHeap: HANDLE) callconv(WINAPI) BOOL;
pub extern "kernel32" fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *anyopaque, dwBytes: SIZE_T) callconv(WINAPI) ?*anyopaque;

View file

@ -542,7 +542,7 @@ pub fn reboot(cmd: RebootCommand) RebootError!void {
))) {
.SUCCESS => {},
.PERM => return error.PermissionDenied,
else => |err| return std.os.unexpectedErrno(err),
else => |err| return std.posix.unexpectedErrno(err),
}
switch (cmd) {
.CAD_OFF => {},
@ -717,7 +717,7 @@ pub fn raise(sig: u8) RaiseError!void {
}
}
@compileError("std.os.raise unimplemented for this target");
@compileError("std.posix.raise unimplemented for this target");
}
pub const KillError = error{ ProcessNotFound, PermissionDenied } || UnexpectedError;
@ -1932,7 +1932,7 @@ pub fn execvpeZ(
/// See also `getenvZ`.
pub fn getenv(key: []const u8) ?[:0]const u8 {
if (native_os == .windows) {
@compileError("std.os.getenv is unavailable for Windows because environment strings are in WTF-16 format. See std.process.getEnvVarOwned for a cross-platform API or std.process.getenvW for a Windows-specific API.");
@compileError("std.posix.getenv is unavailable for Windows because environment strings are in WTF-16 format. See std.process.getEnvVarOwned for a cross-platform API or std.process.getenvW for a Windows-specific API.");
}
if (builtin.link_libc) {
var ptr = std.c.environ;
@ -1948,7 +1948,7 @@ pub fn getenv(key: []const u8) ?[:0]const u8 {
return null;
}
if (native_os == .wasi) {
@compileError("std.os.getenv is unavailable for WASI. See std.process.getEnvMap or std.process.getEnvVarOwned for a cross-platform API.");
@compileError("std.posix.getenv is unavailable for WASI. See std.process.getEnvMap or std.process.getEnvVarOwned for a cross-platform API.");
}
// The simplified start logic doesn't populate environ.
if (std.start.simplified_logic) return null;
@ -1972,7 +1972,7 @@ pub fn getenvZ(key: [*:0]const u8) ?[:0]const u8 {
return mem.sliceTo(value, 0);
}
if (native_os == .windows) {
@compileError("std.os.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.process.getenvW for Windows-specific API.");
@compileError("std.posix.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.process.getenvW for Windows-specific API.");
}
return getenv(mem.sliceTo(key, 0));
}

View file

@ -616,7 +616,7 @@ pub fn upperBound(
var right: usize = items.len;
while (left < right) {
const mid = (right + left) / 2;
const mid = left + (right - left) / 2;
if (!lessThan(context, key, items[mid])) {
left = mid + 1;
} else {

View file

@ -29,7 +29,7 @@ pub const Diagnostics = struct {
allocator: std.mem.Allocator,
errors: std.ArrayListUnmanaged(Error) = .{},
root_entries: usize = 0,
entries: usize = 0,
root_dir: []const u8 = "",
pub const Error = union(enum) {
@ -48,41 +48,40 @@ pub const Diagnostics = struct {
},
};
fn findRoot(d: *Diagnostics, path: []const u8, kind: FileKind) !void {
if (rootDir(path)) |root_dir| {
d.root_entries += 1;
if (kind == .directory and d.root_entries == 1) {
d.root_dir = try d.allocator.dupe(u8, root_dir);
return;
}
d.allocator.free(d.root_dir);
d.root_dir = "";
fn findRoot(d: *Diagnostics, path: []const u8) !void {
if (path.len == 0) return;
d.entries += 1;
const root_dir = rootDir(path);
if (d.entries == 1) {
d.root_dir = try d.allocator.dupe(u8, root_dir);
return;
}
if (d.root_dir.len == 0 or std.mem.eql(u8, root_dir, d.root_dir))
return;
d.allocator.free(d.root_dir);
d.root_dir = "";
}
// If path is package root returns root_dir name, otherwise null.
fn rootDir(path: []const u8) ?[]const u8 {
if (path.len == 0) return null;
// Returns root dir of the path, assumes non empty path.
fn rootDir(path: []const u8) []const u8 {
const start_index: usize = if (path[0] == '/') 1 else 0;
const end_index: usize = if (path[path.len - 1] == '/') path.len - 1 else path.len;
const buf = path[start_index..end_index];
return if (std.mem.indexOfScalarPos(u8, buf, 0, '/') == null)
buf
else
null;
if (std.mem.indexOfScalarPos(u8, buf, 0, '/')) |idx| {
return buf[0..idx];
}
return buf;
}
test rootDir {
const expectEqualStrings = testing.expectEqualStrings;
const expect = testing.expect;
try expectEqualStrings("a", rootDir("a").?);
try expectEqualStrings("b", rootDir("b").?);
try expectEqualStrings("c", rootDir("/c").?);
try expectEqualStrings("d", rootDir("/d/").?);
try expect(rootDir("a/b") == null);
try expect(rootDir("") == null);
try expectEqualStrings("a", rootDir("a"));
try expectEqualStrings("b", rootDir("b"));
try expectEqualStrings("c", rootDir("/c"));
try expectEqualStrings("d", rootDir("/d/"));
try expectEqualStrings("a", rootDir("a/b"));
try expectEqualStrings("a", rootDir("a/b/c"));
}
pub fn deinit(d: *Diagnostics) void {
@ -625,7 +624,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: PipeOptions)
while (try iter.next()) |file| {
const file_name = stripComponents(file.name, options.strip_components);
if (options.diagnostics) |d| {
try d.findRoot(file_name, file.kind);
try d.findRoot(file_name);
}
switch (file.kind) {
@ -1056,7 +1055,7 @@ test "pipeToFileSystem root_dir" {
// there is no root_dir
try testing.expectEqual(0, diagnostics.root_dir.len);
try testing.expectEqual(3, diagnostics.root_entries);
try testing.expectEqual(5, diagnostics.entries);
}
// with strip_components = 0
@ -1078,10 +1077,25 @@ test "pipeToFileSystem root_dir" {
// root_dir found
try testing.expectEqualStrings("example", diagnostics.root_dir);
try testing.expectEqual(1, diagnostics.root_entries);
try testing.expectEqual(6, diagnostics.entries);
}
}
test "findRoot without explicit root dir" {
const data = @embedFile("tar/testdata/19820.tar");
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
try pipeToFileSystem(tmp.dir, reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("root", diagnostics.root_dir);
}
fn normalizePath(bytes: []u8) []u8 {
const canonical_sep = std.fs.path.sep_posix;
if (std.fs.path.sep == canonical_sep) return bytes;

BIN
lib/std/tar/testdata/19820.tar vendored Normal file

Binary file not shown.

View file

@ -934,7 +934,7 @@ fn utf16LeToUtf8ArrayListImpl(
.cannot_encode_surrogate_half => Utf16LeToUtf8AllocError,
.can_encode_surrogate_half => mem.Allocator.Error,
})!void {
assert(result.capacity >= utf16le.len);
assert(result.unusedCapacitySlice().len >= utf16le.len);
var remaining = utf16le;
vectorized: {
@ -979,7 +979,7 @@ fn utf16LeToUtf8ArrayListImpl(
pub const Utf16LeToUtf8AllocError = mem.Allocator.Error || Utf16LeToUtf8Error;
pub fn utf16LeToUtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16) Utf16LeToUtf8AllocError!void {
try result.ensureTotalCapacityPrecise(utf16le.len);
try result.ensureUnusedCapacity(utf16le.len);
return utf16LeToUtf8ArrayListImpl(result, utf16le, .cannot_encode_surrogate_half);
}
@ -1138,7 +1138,7 @@ test utf16LeToUtf8 {
}
fn utf8ToUtf16LeArrayListImpl(result: *std.ArrayList(u16), utf8: []const u8, comptime surrogates: Surrogates) !void {
assert(result.capacity >= utf8.len);
assert(result.unusedCapacitySlice().len >= utf8.len);
var remaining = utf8;
vectorized: {
@ -1176,7 +1176,7 @@ fn utf8ToUtf16LeArrayListImpl(result: *std.ArrayList(u16), utf8: []const u8, com
}
pub fn utf8ToUtf16LeArrayList(result: *std.ArrayList(u16), utf8: []const u8) error{ InvalidUtf8, OutOfMemory }!void {
try result.ensureTotalCapacityPrecise(utf8.len);
try result.ensureUnusedCapacity(utf8.len);
return utf8ToUtf16LeArrayListImpl(result, utf8, .cannot_encode_surrogate_half);
}
@ -1351,6 +1351,64 @@ test utf8ToUtf16LeAllocZ {
}
}
test "ArrayList functions on a re-used list" {
// utf8ToUtf16LeArrayList
{
var list = std.ArrayList(u16).init(testing.allocator);
defer list.deinit();
const init_slice = utf8ToUtf16LeStringLiteral("abcdefg");
try list.ensureTotalCapacityPrecise(init_slice.len);
list.appendSliceAssumeCapacity(init_slice);
try utf8ToUtf16LeArrayList(&list, "hijklmnopqrstuvwyxz");
try testing.expectEqualSlices(u16, utf8ToUtf16LeStringLiteral("abcdefghijklmnopqrstuvwyxz"), list.items);
}
// utf16LeToUtf8ArrayList
{
var list = std.ArrayList(u8).init(testing.allocator);
defer list.deinit();
const init_slice = "abcdefg";
try list.ensureTotalCapacityPrecise(init_slice.len);
list.appendSliceAssumeCapacity(init_slice);
try utf16LeToUtf8ArrayList(&list, utf8ToUtf16LeStringLiteral("hijklmnopqrstuvwyxz"));
try testing.expectEqualStrings("abcdefghijklmnopqrstuvwyxz", list.items);
}
// wtf8ToWtf16LeArrayList
{
var list = std.ArrayList(u16).init(testing.allocator);
defer list.deinit();
const init_slice = utf8ToUtf16LeStringLiteral("abcdefg");
try list.ensureTotalCapacityPrecise(init_slice.len);
list.appendSliceAssumeCapacity(init_slice);
try wtf8ToWtf16LeArrayList(&list, "hijklmnopqrstuvwyxz");
try testing.expectEqualSlices(u16, utf8ToUtf16LeStringLiteral("abcdefghijklmnopqrstuvwyxz"), list.items);
}
// wtf16LeToWtf8ArrayList
{
var list = std.ArrayList(u8).init(testing.allocator);
defer list.deinit();
const init_slice = "abcdefg";
try list.ensureTotalCapacityPrecise(init_slice.len);
list.appendSliceAssumeCapacity(init_slice);
try wtf16LeToWtf8ArrayList(&list, utf8ToUtf16LeStringLiteral("hijklmnopqrstuvwyxz"));
try testing.expectEqualStrings("abcdefghijklmnopqrstuvwyxz", list.items);
}
}
/// Converts a UTF-8 string literal into a UTF-16LE string literal.
pub fn utf8ToUtf16LeStringLiteral(comptime utf8: []const u8) *const [calcUtf16LeLen(utf8) catch |err| @compileError(err):0]u16 {
return comptime blk: {
@ -1685,7 +1743,7 @@ pub const Wtf8Iterator = struct {
};
pub fn wtf16LeToWtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16) mem.Allocator.Error!void {
try result.ensureTotalCapacityPrecise(utf16le.len);
try result.ensureUnusedCapacity(utf16le.len);
return utf16LeToUtf8ArrayListImpl(result, utf16le, .can_encode_surrogate_half);
}
@ -1714,7 +1772,7 @@ pub fn wtf16LeToWtf8(wtf8: []u8, wtf16le: []const u16) usize {
}
pub fn wtf8ToWtf16LeArrayList(result: *std.ArrayList(u16), wtf8: []const u8) error{ InvalidWtf8, OutOfMemory }!void {
try result.ensureTotalCapacityPrecise(wtf8.len);
try result.ensureUnusedCapacity(wtf8.len);
return utf8ToUtf16LeArrayListImpl(result, wtf8, .can_encode_surrogate_half);
}

View file

@ -7071,8 +7071,10 @@ fn switchExprErrUnion(
.ctx = ri.ctx,
};
const payload_is_ref = node_ty == .@"if" and
if_full.payload_token != null and token_tags[if_full.payload_token.?] == .asterisk;
const payload_is_ref = switch (node_ty) {
.@"if" => if_full.payload_token != null and token_tags[if_full.payload_token.?] == .asterisk,
.@"catch" => ri.rl == .ref or ri.rl == .ref_coerced_ty,
};
// We need to call `rvalue` to write through to the pointer only if we had a
// result pointer and aren't forwarding it.
@ -9459,7 +9461,7 @@ fn builtinCall(
},
.wasm_memory_grow => {
const index_arg = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]);
const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[1]);
const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[1]);
const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
.lhs = index_arg,

View file

@ -23,7 +23,7 @@ pub fn find(allocator: std.mem.Allocator) error{ OutOfMemory, NotFound, PathTooL
if (builtin.os.tag != .windows) return error.NotFound;
//note(dimenus): If this key doesn't exist, neither the Win 8 SDK nor the Win 10 SDK is installed
const roots_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, windows_kits_reg_key) catch |err| switch (err) {
const roots_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, windows_kits_reg_key, .{ .wow64_32 = true }) catch |err| switch (err) {
error.KeyNotFound => return error.NotFound,
};
defer roots_key.closeKey();
@ -137,11 +137,17 @@ fn iterateAndFilterByVersion(
return dirs.toOwnedSlice();
}
const OpenOptions = struct {
/// Sets the KEY_WOW64_32KEY access flag.
/// https://learn.microsoft.com/en-us/windows/win32/winprog64/accessing-an-alternate-registry-view
wow64_32: bool = false,
};
const RegistryWtf8 = struct {
key: windows.HKEY,
/// Assert that `key` is valid WTF-8 string
pub fn openKey(hkey: windows.HKEY, key: []const u8) error{KeyNotFound}!RegistryWtf8 {
pub fn openKey(hkey: windows.HKEY, key: []const u8, options: OpenOptions) error{KeyNotFound}!RegistryWtf8 {
const key_wtf16le: [:0]const u16 = key_wtf16le: {
var key_wtf16le_buf: [RegistryWtf16Le.key_name_max_len]u16 = undefined;
const key_wtf16le_len: usize = std.unicode.wtf8ToWtf16Le(key_wtf16le_buf[0..], key) catch |err| switch (err) {
@ -151,7 +157,7 @@ const RegistryWtf8 = struct {
break :key_wtf16le key_wtf16le_buf[0..key_wtf16le_len :0];
};
const registry_wtf16le = try RegistryWtf16Le.openKey(hkey, key_wtf16le);
const registry_wtf16le = try RegistryWtf16Le.openKey(hkey, key_wtf16le, options);
return .{ .key = registry_wtf16le.key };
}
@ -239,15 +245,17 @@ const RegistryWtf16Le = struct {
pub const value_name_max_len = 16_383;
/// Under HKEY_LOCAL_MACHINE with flags:
/// KEY_QUERY_VALUE, KEY_WOW64_32KEY, and KEY_ENUMERATE_SUB_KEYS.
/// KEY_QUERY_VALUE, KEY_ENUMERATE_SUB_KEYS, optionally KEY_WOW64_32KEY.
/// After finishing work, call `closeKey`.
fn openKey(hkey: windows.HKEY, key_wtf16le: [:0]const u16) error{KeyNotFound}!RegistryWtf16Le {
fn openKey(hkey: windows.HKEY, key_wtf16le: [:0]const u16, options: OpenOptions) error{KeyNotFound}!RegistryWtf16Le {
var key: windows.HKEY = undefined;
var access: windows.REGSAM = windows.KEY_QUERY_VALUE | windows.KEY_ENUMERATE_SUB_KEYS;
if (options.wow64_32) access |= windows.KEY_WOW64_32KEY;
const return_code_int: windows.HRESULT = windows.advapi32.RegOpenKeyExW(
hkey,
key_wtf16le,
0,
windows.KEY_QUERY_VALUE | windows.KEY_WOW64_32KEY | windows.KEY_ENUMERATE_SUB_KEYS,
access,
&key,
);
const return_code: windows.Win32Error = @enumFromInt(return_code_int);
@ -484,13 +492,14 @@ pub const Installation = struct {
version_key_name: []const u8,
) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation {
var key_name_buf: [RegistryWtf16Le.key_name_max_len]u8 = undefined;
const key = key: for ([_][]const u8{ "\\Wow6432Node", "" }) |wow6432node| {
const key_name = std.fmt.bufPrint(
&key_name_buf,
"SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\{s}",
.{version_key_name},
) catch unreachable;
const key = key: for ([_]bool{ true, false }) |wow6432node| {
for ([_]windows.HKEY{ windows.HKEY_LOCAL_MACHINE, windows.HKEY_CURRENT_USER }) |hkey| {
break :key RegistryWtf8.openKey(hkey, std.fmt.bufPrint(
&key_name_buf,
"SOFTWARE{s}\\Microsoft\\Microsoft SDKs\\Windows\\{s}",
.{ wow6432node, version_key_name },
) catch unreachable) catch |err| switch (err) {
break :key RegistryWtf8.openKey(hkey, key_name, .{ .wow64_32 = wow6432node }) catch |err| switch (err) {
error.KeyNotFound => return error.InstallationNotFound,
};
}
@ -563,6 +572,7 @@ pub const Installation = struct {
const options_key = RegistryWtf8.openKey(
windows.HKEY_LOCAL_MACHINE,
reg_query_as_wtf8,
.{ .wow64_32 = true },
) catch |err| switch (err) {
error.KeyNotFound => return false,
};
@ -587,9 +597,34 @@ pub const Installation = struct {
};
const MsvcLibDir = struct {
fn findInstancesDirViaSetup(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
const vs_setup_key_path = "SOFTWARE\\Microsoft\\VisualStudio\\Setup";
const vs_setup_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, vs_setup_key_path, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
};
defer vs_setup_key.closeKey();
const packages_path = vs_setup_key.getString(allocator, "", "CachePath") catch |err| switch (err) {
error.NotAString,
error.ValueNameNotFound,
error.StringNotFound,
=> return error.PathNotFound,
error.OutOfMemory => return error.OutOfMemory,
};
defer allocator.free(packages_path);
if (!std.fs.path.isAbsolute(packages_path)) return error.PathNotFound;
const instances_path = try std.fs.path.join(allocator, &.{ packages_path, "_Instances" });
defer allocator.free(instances_path);
return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return error.PathNotFound;
}
fn findInstancesDirViaCLSID(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
const setup_configuration_clsid = "{177f0c4a-1cd3-4de7-a32c-71dbbb9fa36d}";
const setup_config_key = RegistryWtf8.openKey(windows.HKEY_CLASSES_ROOT, "CLSID\\" ++ setup_configuration_clsid) catch |err| switch (err) {
const setup_config_key = RegistryWtf8.openKey(windows.HKEY_CLASSES_ROOT, "CLSID\\" ++ setup_configuration_clsid, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
};
defer setup_config_key.closeKey();
@ -604,6 +639,8 @@ const MsvcLibDir = struct {
};
defer allocator.free(dll_path);
if (!std.fs.path.isAbsolute(dll_path)) return error.PathNotFound;
var path_it = std.fs.path.componentIterator(dll_path) catch return error.PathNotFound;
// the .dll filename
_ = path_it.last();
@ -622,22 +659,40 @@ const MsvcLibDir = struct {
}
fn findInstancesDir(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
// First try to get the path from the .dll that would have been
// First, try getting the packages cache path from the registry.
// This only seems to exist when the path is different from the default.
method1: {
return findInstancesDirViaSetup(allocator) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => break :method1,
};
}
// Otherwise, try to get the path from the .dll that would have been
// loaded via COM for SetupConfiguration.
return findInstancesDirViaCLSID(allocator) catch |orig_err| {
// If that can't be found, fall back to manually appending
// `Microsoft\VisualStudio\Packages\_Instances` to %PROGRAMDATA%
method2: {
return findInstancesDirViaCLSID(allocator) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => break :method2,
};
}
// If that can't be found, fall back to manually appending
// `Microsoft\VisualStudio\Packages\_Instances` to %PROGRAMDATA%
method3: {
const program_data = std.process.getEnvVarOwned(allocator, "PROGRAMDATA") catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return orig_err,
error.InvalidWtf8 => unreachable,
error.EnvironmentVariableNotFound => break :method3,
};
defer allocator.free(program_data);
if (!std.fs.path.isAbsolute(program_data)) break :method3;
const instances_path = try std.fs.path.join(allocator, &.{ program_data, "Microsoft", "VisualStudio", "Packages", "_Instances" });
defer allocator.free(instances_path);
return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return orig_err;
};
return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch break :method3;
}
return error.PathNotFound;
}
/// Intended to be equivalent to `ISetupHelper.ParseVersion`
@ -896,7 +951,7 @@ const MsvcLibDir = struct {
}
}
const vs7_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7") catch return error.PathNotFound;
const vs7_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", .{ .wow64_32 = true }) catch return error.PathNotFound;
defer vs7_key.closeKey();
try_vs7_key: {
const path_maybe_with_trailing_slash = vs7_key.getString(allocator, "", "14.0") catch |err| switch (err) {

View file

@ -2194,9 +2194,6 @@ pub const Inst = struct {
empty_struct,
generic_poison,
/// This tag is here to match Air and InternPool, however it is unused
/// for ZIR purposes.
var_args_param_type = std.math.maxInt(u32) - 1,
/// This Ref does not correspond to any ZIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
none = std.math.maxInt(u32),

View file

@ -983,7 +983,7 @@ fn detectAbiAndDynamicLinker(
// Best case scenario: the executable is dynamically linked, and we can iterate
// over our own shared objects and find a dynamic linker.
const elf_file = blk: {
const elf_file = elf_file: {
// This block looks for a shebang line in /usr/bin/env,
// if it finds one, then instead of using /usr/bin/env as the ELF file to examine, it uses the file it references instead,
// doing the same logic recursively in case it finds another shebang line.
@ -995,9 +995,22 @@ fn detectAbiAndDynamicLinker(
// Haiku does not have a /usr root directory.
.haiku => "/bin/env",
};
// #! (2) + 255 (max length of shebang line since Linux 5.1) + \n (1)
var buffer: [258]u8 = undefined;
// According to `man 2 execve`:
//
// The kernel imposes a maximum length on the text
// that follows the "#!" characters at the start of a script;
// characters beyond the limit are ignored.
// Before Linux 5.1, the limit is 127 characters.
// Since Linux 5.1, the limit is 255 characters.
//
// Tests show that bash and zsh consider 255 as total limit,
// *including* "#!" characters and ignoring newline.
// For safety, we set max length as 255 + \n (1).
var buffer: [255 + 1]u8 = undefined;
while (true) {
// Interpreter path can be relative on Linux, but
// for simplicity we are asserting it is an absolute path.
const file = fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
error.NameTooLong => unreachable,
@ -1027,27 +1040,55 @@ fn detectAbiAndDynamicLinker(
else => |e| return e,
};
errdefer file.close();
var is_elf_file = false;
defer if (is_elf_file == false) file.close();
const len = preadAtLeast(file, &buffer, 0, buffer.len) catch |err| switch (err) {
// Shortest working interpreter path is "#!/i" (4)
// (interpreter is "/i", assuming all pathes are absolute, like in above comment).
// ELF magic number length is also 4.
//
// If file is shorter than that, it is definitely not ELF file
// nor file with "shebang" line.
const min_len: usize = 4;
const len = preadAtLeast(file, &buffer, 0, min_len) catch |err| switch (err) {
error.UnexpectedEndOfFile,
error.UnableToReadElfFile,
=> break :blk file,
=> return defaultAbiAndDynamicLinker(cpu, os, query),
else => |e| return e,
};
const newline = mem.indexOfScalar(u8, buffer[0..len], '\n') orelse break :blk file;
const line = buffer[0..newline];
if (!mem.startsWith(u8, line, "#!")) break :blk file;
var it = mem.tokenizeScalar(u8, line[2..], ' ');
file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, query);
file.close();
const content = buffer[0..len];
if (mem.eql(u8, content[0..4], std.elf.MAGIC)) {
// It is very likely ELF file!
is_elf_file = true;
break :elf_file file;
} else if (mem.eql(u8, content[0..2], "#!")) {
// We detected shebang, now parse entire line.
// Trim leading "#!", spaces and tabs.
const trimmed_line = mem.trimLeft(u8, content[2..], &.{ ' ', '\t' });
// This line can have:
// * Interpreter path only,
// * Interpreter path and arguments, all separated by space, tab or NUL character.
// And optionally newline at the end.
const path_maybe_args = mem.trimRight(u8, trimmed_line, "\n");
// Separate path and args.
const path_end = mem.indexOfAny(u8, path_maybe_args, &.{ ' ', '\t', 0 }) orelse path_maybe_args.len;
file_name = path_maybe_args[0..path_end];
continue;
} else {
// Not a ELF file, not a shell script with "shebang line", invalid duck.
return defaultAbiAndDynamicLinker(cpu, os, query);
}
}
};
defer elf_file.close();
// If Zig is statically linked, such as via distributed binary static builds, the above
// trick (block self_exe) won't work. The next thing we fall back to is the same thing, but for elf_file.
// TODO: inline this function and combine the buffer we already read above to find
// the possible shebang line with the buffer we use for the ELF header.
return abiAndDynamicLinkerFromFile(elf_file, cpu, os, ld_info_list, query) catch |err| switch (err) {
@ -1075,7 +1116,7 @@ fn detectAbiAndDynamicLinker(
};
}
fn defaultAbiAndDynamicLinker(cpu: Target.Cpu, os: Target.Os, query: Target.Query) !Target {
fn defaultAbiAndDynamicLinker(cpu: Target.Cpu, os: Target.Os, query: Target.Query) Target {
const abi = query.abi orelse Target.Abi.default(cpu.arch, os);
return .{
.cpu = cpu,

View file

@ -530,6 +530,10 @@ typedef ptrdiff_t intptr_t;
return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \
? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \
} \
\
static inline uint##w##_t zig_abs_i##w(int##w##_t val) { \
return (val < 0) ? -(uint##w##_t)val : (uint##w##_t)val; \
} \
\
zig_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
@ -990,24 +994,6 @@ typedef unsigned long zig_Builtin64;
typedef unsigned long long zig_Builtin64;
#endif
#define zig_builtin8_rev(name, val) __builtin_##name(val)
#define zig_builtin16_rev(name, val) __builtin_##name(val)
#if INT_MIN <= INT32_MIN
#define zig_builtin32_rev(name, val) __builtin_##name(val)
#elif LONG_MIN <= INT32_MIN
#define zig_builtin32_rev(name, val) __builtin_l##name(val)
#endif
#if INT_MIN <= INT64_MIN
#define zig_builtin64_rev(name, val) __builtin_##name(val)
#elif LONG_MIN <= INT64_MIN
#define zig_builtin64_rev(name, val) __builtin_l##name(val)
#elif LLONG_MIN <= INT64_MIN
#define zig_builtin64_rev(name, val) __builtin_ll##name(val)
#endif
static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) {
return zig_wrap_u8(val >> (8 - bits), bits);
}
@ -1203,24 +1189,6 @@ zig_builtin_clz(16)
zig_builtin_clz(32)
zig_builtin_clz(64)
#if zig_has_builtin(abs) || defined(zig_gnuc)
#define zig_builtin_abs(w) \
static inline int##w##_t zig_abs_i##w(int##w##_t val) { \
return zig_builtin##w##_rev(abs, val); \
}
#else
#define zig_builtin_abs(w) \
static inline int##w##_t zig_abs_i##w(int##w##_t val) { \
if (val == INT##w##_MIN) return val; \
int##w##_t tmp = val >> (w - 1); \
return (val ^ tmp) - tmp; \
}
#endif
zig_builtin_abs(8)
zig_builtin_abs(16)
zig_builtin_abs(32)
zig_builtin_abs(64)
/* ======================== 128-bit Integer Support ========================= */
#if !defined(zig_has_int128)
@ -3375,31 +3343,31 @@ zig_float_negate_builtin(128, zig_make_u128, (UINT64_C(1) << 63, UINT64_C(0)))
zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, sub, -) \
zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, mul, *) \
zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, div, /) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sqrt)))(zig_f##w, zig_float_fn_f##w##_sqrt, zig_libc_name_f##w(sqrt), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sin)))(zig_f##w, zig_float_fn_f##w##_sin, zig_libc_name_f##w(sin), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(cos)))(zig_f##w, zig_float_fn_f##w##_cos, zig_libc_name_f##w(cos), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(tan)))(zig_f##w, zig_float_fn_f##w##_tan, zig_libc_name_f##w(tan), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp)))(zig_f##w, zig_float_fn_f##w##_exp, zig_libc_name_f##w(exp), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp2)))(zig_f##w, zig_float_fn_f##w##_exp2, zig_libc_name_f##w(exp2), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log)))(zig_f##w, zig_float_fn_f##w##_log, zig_libc_name_f##w(log), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log2)))(zig_f##w, zig_float_fn_f##w##_log2, zig_libc_name_f##w(log2), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log10)))(zig_f##w, zig_float_fn_f##w##_log10, zig_libc_name_f##w(log10), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fabs)))(zig_f##w, zig_float_fn_f##w##_fabs, zig_libc_name_f##w(fabs), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(floor)))(zig_f##w, zig_float_fn_f##w##_floor, zig_libc_name_f##w(floor), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(ceil)))(zig_f##w, zig_float_fn_f##w##_ceil, zig_libc_name_f##w(ceil), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(round)))(zig_f##w, zig_float_fn_f##w##_round, zig_libc_name_f##w(round), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(trunc)))(zig_f##w, zig_float_fn_f##w##_trunc, zig_libc_name_f##w(trunc), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmod)))(zig_f##w, zig_float_fn_f##w##_fmod, zig_libc_name_f##w(fmod), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmin)))(zig_f##w, zig_float_fn_f##w##_fmin, zig_libc_name_f##w(fmin), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmax)))(zig_f##w, zig_float_fn_f##w##_fmax, zig_libc_name_f##w(fmax), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fma)))(zig_f##w, zig_float_fn_f##w##_fma, zig_libc_name_f##w(fma), (zig_f##w x, zig_f##w y, zig_f##w z), (x, y, z)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sqrt)))(zig_f##w, zig_sqrt_f##w, zig_libc_name_f##w(sqrt), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sin)))(zig_f##w, zig_sin_f##w, zig_libc_name_f##w(sin), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(cos)))(zig_f##w, zig_cos_f##w, zig_libc_name_f##w(cos), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(tan)))(zig_f##w, zig_tan_f##w, zig_libc_name_f##w(tan), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp)))(zig_f##w, zig_exp_f##w, zig_libc_name_f##w(exp), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp2)))(zig_f##w, zig_exp2_f##w, zig_libc_name_f##w(exp2), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log)))(zig_f##w, zig_log_f##w, zig_libc_name_f##w(log), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log2)))(zig_f##w, zig_log2_f##w, zig_libc_name_f##w(log2), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log10)))(zig_f##w, zig_log10_f##w, zig_libc_name_f##w(log10), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fabs)))(zig_f##w, zig_abs_f##w, zig_libc_name_f##w(fabs), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(floor)))(zig_f##w, zig_floor_f##w, zig_libc_name_f##w(floor), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(ceil)))(zig_f##w, zig_ceil_f##w, zig_libc_name_f##w(ceil), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(round)))(zig_f##w, zig_round_f##w, zig_libc_name_f##w(round), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(trunc)))(zig_f##w, zig_trunc_f##w, zig_libc_name_f##w(trunc), (zig_f##w x), (x)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmod)))(zig_f##w, zig_fmod_f##w, zig_libc_name_f##w(fmod), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmin)))(zig_f##w, zig_min_f##w, zig_libc_name_f##w(fmin), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmax)))(zig_f##w, zig_max_f##w, zig_libc_name_f##w(fmax), (zig_f##w x, zig_f##w y), (x, y)) \
zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fma)))(zig_f##w, zig_fma_f##w, zig_libc_name_f##w(fma), (zig_f##w x, zig_f##w y, zig_f##w z), (x, y, z)) \
\
static inline zig_f##w zig_div_trunc_f##w(zig_f##w lhs, zig_f##w rhs) { \
return zig_float_fn_f##w##_trunc(zig_div_f##w(lhs, rhs)); \
return zig_trunc_f##w(zig_div_f##w(lhs, rhs)); \
} \
\
static inline zig_f##w zig_div_floor_f##w(zig_f##w lhs, zig_f##w rhs) { \
return zig_float_fn_f##w##_floor(zig_div_f##w(lhs, rhs)); \
return zig_floor_f##w(zig_div_f##w(lhs, rhs)); \
} \
\
static inline zig_f##w zig_mod_f##w(zig_f##w lhs, zig_f##w rhs) { \
@ -3503,7 +3471,7 @@ zig_float_builtins(64)
zig_##Type zig_atomicrmw_desired; \
zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \
do { \
zig_atomicrmw_desired = zig_float_fn_##Type##_fmin(zig_atomicrmw_expected, arg); \
zig_atomicrmw_desired = zig_min_##Type(zig_atomicrmw_expected, arg); \
} while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \
res = zig_atomicrmw_expected; \
} while (0)
@ -3512,7 +3480,7 @@ zig_float_builtins(64)
zig_##Type zig_atomicrmw_desired; \
zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \
do { \
zig_atomicrmw_desired = zig_float_fn_##Type##_fmax(zig_atomicrmw_expected, arg); \
zig_atomicrmw_desired = zig_max_##Type(zig_atomicrmw_expected, arg); \
} while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \
res = zig_atomicrmw_expected; \
} while (0)

View file

@ -782,13 +782,13 @@ pub const Inst = struct {
field_parent_ptr,
/// Implements @wasmMemorySize builtin.
/// Result type is always `u32`,
/// Result type is always `usize`,
/// Uses the `pl_op` field, payload represents the index of the target memory.
/// The operand is unused and always set to `Ref.none`.
wasm_memory_size,
/// Implements @wasmMemoryGrow builtin.
/// Result type is always `i32`,
/// Result type is always `isize`,
/// Uses the `pl_op` field, payload represents the index of the target memory.
wasm_memory_grow,
@ -891,8 +891,7 @@ pub const Inst = struct {
/// The most-significant bit of the value is a tag bit. This bit is 1 if the value represents an
/// instruction index and 0 if it represents an InternPool index.
///
/// The hardcoded refs `none` and `var_args_param_type` are exceptions to this rule: they have
/// their tag bit set but refer to the InternPool.
/// The ref `none` is an exception: it has the tag bit set but refers to the InternPool.
pub const Ref = enum(u32) {
u0_type = @intFromEnum(InternPool.Index.u0_type),
i0_type = @intFromEnum(InternPool.Index.i0_type),
@ -979,9 +978,6 @@ pub const Inst = struct {
empty_struct = @intFromEnum(InternPool.Index.empty_struct),
generic_poison = @intFromEnum(InternPool.Index.generic_poison),
/// This Ref does not correspond to any AIR instruction or constant
/// value. It is used to handle argument types of var args functions.
var_args_param_type = @intFromEnum(InternPool.Index.var_args_param_type),
/// This Ref does not correspond to any AIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
none = @intFromEnum(InternPool.Index.none),
@ -994,7 +990,6 @@ pub const Inst = struct {
pub fn toInternedAllowNone(ref: Ref) ?InternPool.Index {
return switch (ref) {
.var_args_param_type => .var_args_param_type,
.none => .none,
else => if (@intFromEnum(ref) >> 31 == 0)
@enumFromInt(@as(u31, @truncate(@intFromEnum(ref))))
@ -1010,7 +1005,7 @@ pub const Inst = struct {
pub fn toIndexAllowNone(ref: Ref) ?Index {
return switch (ref) {
.var_args_param_type, .none => null,
.none => null,
else => if (@intFromEnum(ref) >> 31 != 0)
@enumFromInt(@as(u31, @truncate(@intFromEnum(ref))))
else
@ -1476,8 +1471,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.save_err_return_trace_index,
=> return Type.usize,
.wasm_memory_grow => return Type.i32,
.wasm_memory_size => return Type.u32,
.wasm_memory_grow => return Type.isize,
.wasm_memory_size => return Type.usize,
.int_from_bool => return Type.u1,
@ -1557,7 +1552,6 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
return switch (ip_index) {
.var_args_param_type => .var_args_param_type,
.none => .none,
else => {
assert(@intFromEnum(ip_index) >> 31 == 0);

View file

@ -5091,7 +5091,7 @@ fn spawnZigRc(
}
}
pub fn tmpFilePath(comp: *Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
const s = std.fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
if (comp.local_cache_directory.path) |p| {
@ -5894,14 +5894,16 @@ pub fn lockAndSetMiscFailure(
return setMiscFailure(comp, tag, format, args);
}
fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []const u8) Allocator.Error!void {
fn parseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []const u8) Allocator.Error!void {
var context_lines = std.ArrayList([]const u8).init(comp.gpa);
defer context_lines.deinit();
var current_err: ?*LldError = null;
var lines = mem.splitSequence(u8, stderr, if (builtin.os.tag == .windows) "\r\n" else "\n");
while (lines.next()) |line| {
if (mem.startsWith(u8, line, prefix ++ ":")) {
if (line.len > prefix.len + ":".len and
mem.eql(u8, line[0..prefix.len], prefix) and line[prefix.len] == ':')
{
if (current_err) |err| {
err.context_lines = try context_lines.toOwnedSlice();
}
@ -5933,7 +5935,7 @@ fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []con
}
}
pub fn lockAndParseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []const u8) void {
pub fn lockAndParseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []const u8) void {
comp.mutex.lock();
defer comp.mutex.unlock();

View file

@ -1921,7 +1921,7 @@ pub const LoadedUnionType = struct {
return self.flagsPtr(ip).layout;
}
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: u32) Alignment {
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment {
if (self.field_aligns.len == 0) return .none;
return self.field_aligns.get(ip)[field_index];
}
@ -2087,41 +2087,41 @@ pub const LoadedStructType = struct {
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
self: LoadedStructType,
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
}
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment {
if (s.field_aligns.len == 0) return .none;
return s.field_aligns.get(ip)[i];
}
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none;
assert(s.haveFieldInits(ip));
return s.field_inits.get(ip)[i];
}
/// Returns `none` in the case the struct is a tuple.
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) OptionalNullTerminatedString {
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
}
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool {
return s.comptime_bits.getBit(ip, i);
}
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
pub fn setFieldComptime(s: LoadedStructType, ip: *InternPool, i: usize) void {
s.comptime_bits.setBit(ip, i);
}
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
pub fn knownNonOpv(s: LoadedStructType, ip: *InternPool) bool {
return switch (s.layout) {
.@"packed" => false,
.auto, .@"extern" => s.flagsPtr(ip).known_non_opv,
@ -2130,7 +2130,7 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
pub fn flagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags {
assert(self.layout != .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
@ -2138,13 +2138,13 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
pub fn packedFlagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags {
assert(self.layout == .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
@ -2154,7 +2154,7 @@ pub const LoadedStructType = struct {
return false;
}
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
pub fn setTypesWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
@ -2162,12 +2162,12 @@ pub const LoadedStructType = struct {
return false;
}
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
pub fn clearTypesWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).field_types_wip = false;
}
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
@ -2175,12 +2175,12 @@ pub const LoadedStructType = struct {
return false;
}
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).layout_wip = false;
}
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
pub fn setAlignmentWip(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
@ -2188,12 +2188,12 @@ pub const LoadedStructType = struct {
return false;
}
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void {
if (s.layout == .@"packed") return;
s.flagsPtr(ip).alignment_wip = false;
}
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool {
switch (s.layout) {
.@"packed" => {
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
@ -2210,14 +2210,14 @@ pub const LoadedStructType = struct {
}
}
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void {
switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false,
.auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false,
}
}
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool {
if (s.layout == .@"packed") return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
@ -2225,13 +2225,13 @@ pub const LoadedStructType = struct {
return false;
}
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void {
s.flagsPtr(ip).fully_resolved = false;
}
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
pub fn size(self: LoadedStructType, ip: *InternPool) *u32 {
assert(self.layout != .@"packed");
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
@ -2241,50 +2241,50 @@ pub const LoadedStructType = struct {
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
pub fn backingIntType(s: LoadedStructType, ip: *const InternPool) *Index {
assert(s.layout == .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
assert(s.layout != .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
}
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool {
const types = s.field_types.get(ip);
return types.len == 0 or types[0] != .none;
}
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool {
return switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).inits_resolved,
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved,
};
}
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void {
switch (s.layout) {
.@"packed" => s.packedFlagsPtr(ip).inits_resolved = true,
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true,
}
}
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool {
return switch (s.layout) {
.@"packed" => s.backingIntType(ip).* != .none,
.auto, .@"extern" => s.flagsPtr(ip).layout_resolved,
};
}
pub fn isTuple(s: @This(), ip: *InternPool) bool {
pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool {
return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple;
}
pub fn hasReorderedFields(s: @This()) bool {
pub fn hasReorderedFields(s: LoadedStructType) bool {
return s.layout == .auto;
}
@ -2318,7 +2318,7 @@ pub const LoadedStructType = struct {
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
pub fn iterateRuntimeOrder(s: LoadedStructType, ip: *InternPool) RuntimeOrderIterator {
assert(s.layout != .@"packed");
return .{
.ip = ip,
@ -2358,7 +2358,7 @@ pub const LoadedStructType = struct {
}
};
pub fn iterateRuntimeOrderReverse(s: @This(), ip: *InternPool) ReverseRuntimeOrderIterator {
pub fn iterateRuntimeOrderReverse(s: LoadedStructType, ip: *InternPool) ReverseRuntimeOrderIterator {
assert(s.layout != .@"packed");
return .{
.ip = ip,
@ -2818,7 +2818,6 @@ pub const Index = enum(u32) {
generic_poison,
/// Used by Air/Sema only.
var_args_param_type = std.math.maxInt(u32) - 1,
none = std.math.maxInt(u32),
_,
@ -8938,7 +8937,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.memoized_call => unreachable,
},
.var_args_param_type => unreachable,
.none => unreachable,
};
}
@ -9153,8 +9151,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.empty_struct => unreachable,
.generic_poison => unreachable,
.var_args_param_type => unreachable, // special tag
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) {
.removed => unreachable,

View file

@ -6140,18 +6140,18 @@ pub const UnionLayout = struct {
padding: u32,
};
pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
pub fn getUnionLayout(mod: *Module, loaded_union: InternPool.LoadedUnionType) UnionLayout {
const ip = &mod.intern_pool;
assert(u.haveLayout(ip));
assert(loaded_union.haveLayout(ip));
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var payload_size: u64 = 0;
var payload_align: Alignment = .@"1";
for (u.field_types.get(ip), 0..) |field_ty, i| {
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const explicit_align = u.fieldAlign(ip, @intCast(i));
const explicit_align = loaded_union.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
@ -6159,16 +6159,16 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
const field_size = Type.fromInterned(field_ty).abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(i);
biggest_field = @intCast(field_index);
}
if (field_align.compare(.gte, payload_align)) {
payload_align = field_align;
most_aligned_field = @intCast(i);
most_aligned_field = @intCast(field_index);
most_aligned_field_size = field_size;
}
}
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(u.enum_tag_ty).hasRuntimeBits(mod)) {
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(mod)) {
return .{
.abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
@ -6183,10 +6183,10 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
};
}
const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
.abi_size = u.size(ip).*,
.abi_size = loaded_union.size(ip).*,
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
@ -6195,24 +6195,24 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
.padding = u.padding(ip).*,
.padding = loaded_union.padding(ip).*,
};
}
pub fn unionAbiSize(mod: *Module, u: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(u).abi_size;
pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(loaded_union).abi_size;
}
/// Returns 0 if the union is represented with 0 bits at runtime.
pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment {
pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
if (have_tag) max_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod);
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod);
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
max_align = max_align.max(field_align);
}
return max_align;
@ -6221,20 +6221,20 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.LoadedUnionType, field_index: u32) Alignment {
pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
const field_align = loaded_union.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
return field_ty.abiAlignment(mod);
}
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == u.enum_tag_ty);
return u.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
/// Returns the field alignment of a non-packed struct in byte units.

View file

@ -1901,7 +1901,6 @@ pub fn resolveConstStringIntern(
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
const air_inst = try sema.resolveInst(zir_ref);
assert(air_inst != .var_args_param_type);
const ty = try sema.analyzeAsType(block, src, air_inst);
if (ty.isGenericPoison()) return error.GenericPoison;
return ty;
@ -4572,12 +4571,10 @@ fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const src = un_tok.src();
// In case of GenericPoison, we don't actually have a type, so this will be
// treated as an untyped address-of operator.
if (un_tok.operand == .var_args_param_type) return;
const operand_air_inst = sema.resolveInst(un_tok.operand) catch |err| switch (err) {
error.GenericPoison => return,
else => |e| return e,
};
if (operand_air_inst == .var_args_param_type) return;
const ty_operand = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) {
error.GenericPoison => return,
else => |e| return e,
@ -7363,7 +7360,7 @@ const CallArgsInfo = union(enum) {
}
/// Analyzes the arg at `arg_index` and coerces it to `param_ty`.
/// `param_ty` may be `generic_poison` or `var_args_param`.
/// `param_ty` may be `generic_poison`. A value of `null` indicates a varargs parameter.
/// `func_ty_info` may be the type before instantiation, even if a generic
/// instantiation has been partially completed.
fn analyzeArg(
@ -7371,16 +7368,16 @@ const CallArgsInfo = union(enum) {
sema: *Sema,
block: *Block,
arg_index: usize,
param_ty: Type,
maybe_param_ty: ?Type,
func_ty_info: InternPool.Key.FuncType,
func_inst: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const param_count = func_ty_info.param_types.len;
switch (param_ty.toIntern()) {
.generic_poison_type, .var_args_param_type => {},
if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) {
.generic_poison_type => {},
else => try sema.queueFullTypeResolution(param_ty),
}
};
const uncoerced_arg: Air.Inst.Ref = switch (cai) {
inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
.zir_call => |zir_call| arg_val: {
@ -7409,7 +7406,8 @@ const CallArgsInfo = union(enum) {
// TODO set comptime_reason
}
// Give the arg its result type
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(param_ty.toIntern()));
const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison;
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(provide_param_ty.toIntern()));
// Resolve the arg!
const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
@ -7426,9 +7424,11 @@ const CallArgsInfo = union(enum) {
break :arg_val uncoerced_arg;
},
};
const param_ty = maybe_param_ty orelse {
return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index));
};
switch (param_ty.toIntern()) {
.generic_poison_type => return uncoerced_arg,
.var_args_param_type => return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index)),
else => return sema.coerceExtra(
block,
param_ty,
@ -7970,10 +7970,10 @@ fn analyzeCall(
const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count());
for (args, 0..) |*arg_out, arg_idx| {
// Non-generic, so param types are already resolved
const param_ty = if (arg_idx < func_ty_info.param_types.len) ty: {
const param_ty: ?Type = if (arg_idx < func_ty_info.param_types.len) ty: {
break :ty Type.fromInterned(func_ty_info.param_types.get(ip)[arg_idx]);
} else Type.fromInterned(InternPool.Index.var_args_param_type);
assert(!param_ty.isGenericPoison());
} else null;
if (param_ty) |t| assert(!t.isGenericPoison());
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*);
if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) {
@ -9700,6 +9700,18 @@ fn funcCommon(
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
switch (cc_resolved) {
.Interrupt => if (target.cpu.arch.isX86()) {
const err_code_size = target.ptrBitWidth();
switch (i) {
0 => if (param_ty.zigTypeTag(mod) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
1 => if (param_ty.bitSize(mod) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
}
} else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
.Signal => return sema.fail(block, param_src, "parameters are not allowed with 'Signal' calling convention", .{}),
else => {},
}
}
var ret_ty_requires_comptime = false;
@ -10005,6 +10017,16 @@ fn finishFunc(
return sema.failWithOwnedErrorMsg(block, msg);
}
switch (cc_resolved) {
.Interrupt, .Signal => if (return_type.zigTypeTag(mod) != .Void and return_type.zigTypeTag(mod) != .NoReturn) {
return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)});
},
.Inline => if (is_noinline) {
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
},
else => {},
}
const arch = target.cpu.arch;
if (@as(?[]const u8, switch (cc_resolved) {
.Unspecified, .C, .Naked, .Async, .Inline => null,
@ -10048,11 +10070,7 @@ fn finishFunc(
});
}
if (cc_resolved == .Inline and is_noinline) {
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
}
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
// lower this fn type.
@ -10205,12 +10223,10 @@ fn analyzeAs(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand = try sema.resolveInst(zir_operand);
if (zir_dest_type == .var_args_param_type) return operand;
const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) {
error.GenericPoison => return operand,
else => |e| return e,
};
if (operand_air_inst == .var_args_param_type) return operand;
const dest_ty = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) {
error.GenericPoison => return operand,
else => |e| return e,
@ -22688,7 +22704,16 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
.storage = .{ .elems = new_elems },
} }));
}
if (try sema.typeRequiresComptime(ptr_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src_decl.toSrcLoc(src, mod), ptr_ty);
break :msg msg;
});
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (!is_vector) {
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
@ -24274,6 +24299,14 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(mod)});
if (!dest_ty.hasRuntimeBits(mod)) {
const empty_aggregate = try mod.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = &[_]InternPool.Index{} },
} });
return Air.internedToRef(empty_aggregate);
}
const operand = try sema.resolveInst(extra.rhs);
const scalar_ty = dest_ty.childType(mod);
const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src);
@ -26211,7 +26244,7 @@ fn zirWasmMemoryGrow(
const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{
.needed_comptime_reason = "wasm memory size index must be comptime-known",
}));
const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src);
const delta = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.rhs), delta_src);
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
@ -32507,7 +32540,7 @@ fn analyzeSlice(
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
if (try sema.resolveValue(end)) |end_val| {
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
const len_s_val = try mod.intValue(
Type.usize,
array_ty.arrayLenIncludingSentinel(mod),
@ -35405,7 +35438,7 @@ pub fn resolveUnionAlignment(
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
@ -35465,7 +35498,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
else => return err,
});
const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const explicit_align = union_type.fieldAlign(ip, field_index);
const field_align = if (explicit_align != .none)
explicit_align
else
@ -35635,8 +35668,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
const ty_ip = ty.toIntern();
switch (ty_ip) {
.var_args_param_type => unreachable,
.none => unreachable,
.u0_type,
@ -37155,7 +37186,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.empty_struct,
.generic_poison,
// invalid
.var_args_param_type,
.none,
=> unreachable,

View file

@ -14316,7 +14316,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.mmx => {},
.sse => switch (ty.zigTypeTag(mod)) {
else => {
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, .other), .none);
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
assert(std.mem.indexOfNone(abi.Class, classes, &.{
.integer, .sse, .memory, .float, .float_combine,
}) == null);
@ -18450,7 +18450,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, .arg), .none);
const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, self.target.*, .arg), .none);
switch (classes[0]) {
.integer => {
assert(classes.len == 1);
@ -18800,7 +18800,7 @@ fn resolveCallingConventionValues(
var ret_tracking_i: usize = 0;
const classes = switch (resolved_cc) {
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none),
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, self.target.*, .ret), .none),
.Win64 => &.{abi.classifyWindows(ret_ty, mod)},
else => unreachable,
};
@ -18875,7 +18875,7 @@ fn resolveCallingConventionValues(
var arg_mcv_i: usize = 0;
const classes = switch (resolved_cc) {
.SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none),
.SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .arg), .none),
.Win64 => &.{abi.classifyWindows(ty, mod)},
else => unreachable,
};
@ -19090,7 +19090,7 @@ fn memSize(self: *Self, ty: Type) Memory.Size {
fn splitType(self: *Self, ty: Type) ![2]Type {
const mod = self.bin_file.comp.module.?;
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, .other), .none);
const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
var parts: [2]Type = undefined;
if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
part.* = switch (class) {

View file

@ -11,6 +11,37 @@ pub const Class = enum {
float,
float_combine,
integer_per_element,
fn isX87(class: Class) bool {
return switch (class) {
.x87, .x87up, .complex_x87 => true,
else => false,
};
}
/// Combine a field class with the prev one.
fn combineSystemV(prev_class: Class, next_class: Class) Class {
// "If both classes are equal, this is the resulting class."
if (prev_class == next_class)
return if (prev_class == .float) .float_combine else prev_class;
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (prev_class == .none) return next_class;
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (prev_class == .memory or next_class == .memory) return .memory;
// "If one of the classes is INTEGER, the result is the INTEGER."
if (prev_class == .integer or next_class == .integer) return .integer;
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (prev_class.isX87() or next_class.isX87()) return .memory;
// "Otherwise class SSE is used."
return .sse;
}
};
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
@ -69,9 +100,7 @@ pub const Context = enum { ret, arg, field, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
@ -231,121 +260,30 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
}
return memory_class;
},
.Struct => {
.Struct, .Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const loaded_struct = ip.loadStructType(ty.toIntern());
const ty_size = ty.abiSize(zcu);
if (loaded_struct.layout == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
},
}
if (ty_size > 64)
return memory_class;
var byte_offset: u64 = 0;
classifySystemVStruct(&result, &byte_offset, loaded_struct, zcu);
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isnt SSE or any other eightbyte isnt SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const union_obj = zcu.typeToUnion(ty).?;
const ty_size = zcu.unionAbiSize(union_obj);
if (union_obj.getLayout(ip) == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
}
if (ty_size > 64)
return memory_class;
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
if (field_align != .none and
field_align.compare(.lt, Type.fromInterned(field_ty).abiAlignment(zcu)))
{
return memory_class;
}
// Combine this field with the previous one.
const field_class = classifySystemV(Type.fromInterned(field_ty), zcu, .field);
for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
continue;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_item.* == .none) {
result_item.* = field_item;
continue;
}
if (field_item == .none) {
continue;
}
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_item.* == .memory or field_item == .memory) {
result_item.* = .memory;
continue;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_item.* == .integer or field_item == .integer) {
result_item.* = .integer;
continue;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_item.* == .x87 or
result_item.* == .x87up or
result_item.* == .complex_x87 or
field_item == .x87 or
field_item == .x87up or
field_item == .complex_x87)
{
result_item.* = .memory;
continue;
}
// "Otherwise class SSE is used."
result_item.* = .sse;
}
}
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (zcu.typeToUnion(ty)) |loaded_union|
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
else
unreachable;
// Post-merger cleanup
@ -391,78 +329,85 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, ctx: Context) [8]Class {
fn classifySystemVStruct(
result: *[8]Class,
byte_offset: *u64,
starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType,
zcu: *Zcu,
) void {
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
const field_align = loaded_struct.fieldAlign(ip, field_index);
byte_offset.* = std.mem.alignForward(
byte_offset = std.mem.alignForward(
u64,
byte_offset.*,
byte_offset,
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
if (field_loaded_struct.layout != .@"packed") {
classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu);
continue;
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_class = std.mem.sliceTo(&classifySystemV(field_ty, zcu, .field), .none);
const field_size = field_ty.abiSize(zcu);
combine: {
// Combine this field with the previous one.
const result_class = &result[@intCast(byte_offset.* / 8)];
// "If both classes are equal, this is the resulting class."
if (result_class.* == field_class[0]) {
if (result_class.* == .float) {
result_class.* = .float_combine;
}
break :combine;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_class.* == .none) {
result_class.* = field_class[0];
break :combine;
}
assert(field_class[0] != .none);
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_class.* == .memory or field_class[0] == .memory) {
result_class.* = .memory;
break :combine;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_class.* == .integer or field_class[0] == .integer) {
result_class.* = .integer;
break :combine;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_class.* == .x87 or
result_class.* == .x87up or
result_class.* == .complex_x87 or
field_class[0] == .x87 or
field_class[0] == .x87up or
field_class[0] == .complex_x87)
{
result_class.* = .memory;
break :combine;
}
// "Otherwise class SSE is used."
result_class.* = .sse;
}
@memcpy(result[@intCast(byte_offset.* / 8 + 1)..][0 .. field_class.len - 1], field_class[1..]);
byte_offset.* += field_size;
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(zcu);
}
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
std.debug.assert(final_byte_offset == std.mem.alignForward(
u64,
byte_offset,
loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
));
return final_byte_offset;
}
fn classifySystemVUnion(
result: *[8]Class,
starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}
return starting_byte_offset + loaded_union.size(ip).*;
}
pub const SysV = struct {

View file

@ -3176,6 +3176,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
const zcu = f.object.dg.zcu;
const ip = &zcu.intern_pool;
const air_tags = f.air.instructions.items(.tag);
const air_datas = f.air.instructions.items(.data);
for (body) |inst| {
if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip))
@ -3203,23 +3204,23 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.sub => try airBinOp(f, inst, "-", "sub", .none),
.mul => try airBinOp(f, inst, "*", "mul", .none),
.neg => try airFloatNeg(f, inst),
.neg => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "neg", .none),
.div_float => try airBinBuiltinCall(f, inst, "div", .none),
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(zcu);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
break :blk if (lhs_scalar_ty.isInt(zcu))
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
try airBinBuiltinCall(f, inst, "fmod", .none);
},
.div_floor => try airBinBuiltinCall(f, inst, "div_floor", .none),
.mod => try airBinBuiltinCall(f, inst, "mod", .none),
.abs => try airAbs(f, inst),
.abs => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "abs", .none),
.add_wrap => try airBinBuiltinCall(f, inst, "addw", .bits),
.sub_wrap => try airBinBuiltinCall(f, inst, "subw", .bits),
@ -3230,19 +3231,19 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.mul_sat => try airBinBuiltinCall(f, inst, "muls", .bits),
.shl_sat => try airBinBuiltinCall(f, inst, "shls", .bits),
.sqrt => try airUnFloatOp(f, inst, "sqrt"),
.sin => try airUnFloatOp(f, inst, "sin"),
.cos => try airUnFloatOp(f, inst, "cos"),
.tan => try airUnFloatOp(f, inst, "tan"),
.exp => try airUnFloatOp(f, inst, "exp"),
.exp2 => try airUnFloatOp(f, inst, "exp2"),
.log => try airUnFloatOp(f, inst, "log"),
.log2 => try airUnFloatOp(f, inst, "log2"),
.log10 => try airUnFloatOp(f, inst, "log10"),
.floor => try airUnFloatOp(f, inst, "floor"),
.ceil => try airUnFloatOp(f, inst, "ceil"),
.round => try airUnFloatOp(f, inst, "round"),
.trunc_float => try airUnFloatOp(f, inst, "trunc"),
.sqrt => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "sqrt", .none),
.sin => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "sin", .none),
.cos => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "cos", .none),
.tan => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "tan", .none),
.exp => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "exp", .none),
.exp2 => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "exp2", .none),
.log => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "log", .none),
.log2 => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "log2", .none),
.log10 => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "log10", .none),
.floor => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "floor", .none),
.ceil => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "ceil", .none),
.round => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "round", .none),
.trunc_float => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].un_op, "trunc", .none),
.mul_add => try airMulAdd(f, inst),
@ -3251,21 +3252,21 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.mul_with_overflow => try airOverflow(f, inst, "mul", .bits),
.shl_with_overflow => try airOverflow(f, inst, "shl", .bits),
.min => try airMinMax(f, inst, '<', "fmin"),
.max => try airMinMax(f, inst, '>', "fmax"),
.min => try airMinMax(f, inst, '<', "min"),
.max => try airMinMax(f, inst, '>', "max"),
.slice => try airSlice(f, inst),
.cmp_gt => try airCmpOp(f, inst, f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op, .gt),
.cmp_gte => try airCmpOp(f, inst, f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op, .gte),
.cmp_lt => try airCmpOp(f, inst, f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op, .lt),
.cmp_lte => try airCmpOp(f, inst, f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op, .lte),
.cmp_gt => try airCmpOp(f, inst, air_datas[@intFromEnum(inst)].bin_op, .gt),
.cmp_gte => try airCmpOp(f, inst, air_datas[@intFromEnum(inst)].bin_op, .gte),
.cmp_lt => try airCmpOp(f, inst, air_datas[@intFromEnum(inst)].bin_op, .lt),
.cmp_lte => try airCmpOp(f, inst, air_datas[@intFromEnum(inst)].bin_op, .lte),
.cmp_eq => try airEquality(f, inst, .eq),
.cmp_neq => try airEquality(f, inst, .neq),
.cmp_vector => blk: {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
break :blk try airCmpOp(f, inst, extra, extra.compareOperator());
},
@ -3324,11 +3325,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.memcpy => try airMemcpy(f, inst),
.set_union_tag => try airSetUnionTag(f, inst),
.get_union_tag => try airGetUnionTag(f, inst),
.clz => try airUnBuiltinCall(f, inst, "clz", .bits),
.ctz => try airUnBuiltinCall(f, inst, "ctz", .bits),
.popcount => try airUnBuiltinCall(f, inst, "popcount", .bits),
.byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .bits),
.bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .bits),
.clz => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "clz", .bits),
.ctz => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "ctz", .bits),
.popcount => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "popcount", .bits),
.byte_swap => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "byte_swap", .bits),
.bit_reverse => try airUnBuiltinCall(f, inst, air_datas[@intFromEnum(inst)].ty_op.operand, "bit_reverse", .bits),
.tag_name => try airTagName(f, inst),
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
@ -4139,7 +4140,7 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = f.typeOf(ty_op.operand);
const scalar_ty = operand_ty.scalarType(zcu);
if (scalar_ty.toIntern() != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits);
if (scalar_ty.toIntern() != .bool_type) return try airUnBuiltinCall(f, inst, ty_op.operand, "not", .bits);
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@ -4389,10 +4390,8 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
if (inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@ -6276,17 +6275,17 @@ fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airUnBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
operand_ref: Air.Inst.Ref,
operation: []const u8,
info: BuiltinInfo,
) !CValue {
const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const operand = try f.resolveInst(operand_ref);
try reap(f, inst, &.{operand_ref});
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const operand_ty = f.typeOf(ty_op.operand);
const operand_ty = f.typeOf(operand_ref);
const scalar_ty = operand_ty.scalarType(zcu);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
@ -6799,11 +6798,27 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
const src_ty = f.typeOf(bin_op.rhs);
const writer = f.object.writer();
if (dest_ty.ptrSize(zcu) != .One) {
try writer.writeAll("if (");
try writeArrayLen(f, writer, dest_ptr, dest_ty);
try writer.writeAll(" != 0) ");
}
try writer.writeAll("memcpy(");
try writeSliceOrPtr(f, writer, dest_ptr, dest_ty);
try writer.writeAll(", ");
try writeSliceOrPtr(f, writer, src_ptr, src_ty);
try writer.writeAll(", ");
try writeArrayLen(f, writer, dest_ptr, dest_ty);
try writer.writeAll(" * sizeof(");
try f.renderType(writer, dest_ty.elemType2(zcu));
try writer.writeAll("));\n");
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_ty: Type) !void {
const zcu = f.object.dg.zcu;
switch (dest_ty.ptrSize(zcu)) {
.One => try writer.print("{}", .{
try f.fmtIntLiteral(try zcu.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))),
@ -6811,12 +6826,6 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
.Many, .C => unreachable,
.Slice => try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }),
}
try writer.writeAll(" * sizeof(");
try f.renderType(writer, dest_ty.elemType2(zcu));
try writer.writeAll("));\n");
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
@ -6999,7 +7008,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const use_operator = scalar_ty.bitSize(zcu) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
float_op: Func,
builtin: Func,
infix: []const u8,
ternary: []const u8,
@ -7008,30 +7016,22 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
.Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
.Min => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .ternary = " < " } else .{
.builtin = .{ .operation = "min" },
},
.Float => .{ .float_op = .{ .operation = "fmin" } },
.Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" } },
.Float => .{ .builtin = .{ .operation = "min" } },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .ternary = " > " } else .{
.builtin = .{ .operation = "max" },
},
.Float => .{ .float_op = .{ .operation = "fmax" } },
.Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" } },
.Float => .{ .builtin = .{ .operation = "max" } },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .infix = " += " } else .{
.builtin = .{ .operation = "addw", .info = .bits },
},
.Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits } },
.Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (use_operator) .{ .infix = " *= " } else .{
.builtin = .{ .operation = "mulw", .info = .bits },
},
.Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits } },
.Float => .{ .builtin = .{ .operation = "mul" } },
else => unreachable,
},
@ -7095,17 +7095,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const v = try Vectorize.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, accum, .Other);
switch (op) {
.float_op => |func| {
try writer.writeAll(" = zig_float_fn_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.print("_{s}(", .{func.operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
try v.elem(f, writer);
try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
try writer.writeByte(')');
},
.builtin => |func| {
try writer.print(" = zig_{s}_", .{func.operation});
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
@ -7386,110 +7375,6 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = f.object.dg.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const operand_ty = f.typeOf(un_op);
const scalar_ty = operand_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, operand_ty);
const v = try Vectorize.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
try writer.writeAll(" = zig_neg_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
try writer.writeAll(");\n");
try v.end(f, inst, writer);
return local;
}
fn airAbs(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = f.object.dg.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
const ty = f.typeOf(ty_op.operand);
const scalar_ty = ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (ty.zigTypeTag(zcu) == .Vector) {
return f.fail("TODO implement airAbs for '{}'", .{ty.fmt(zcu)});
} else {
return airUnBuiltinCall(f, inst, "abs", .none);
},
.Float => return unFloatOp(f, inst, operand, ty, "fabs"),
else => unreachable,
}
}
fn unFloatOp(f: *Function, inst: Air.Inst.Index, operand: CValue, ty: Type, operation: []const u8) !CValue {
const zcu = f.object.dg.zcu;
const scalar_ty = ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, ty);
const v = try Vectorize.start(f, inst, writer, ty);
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
try writer.writeAll(" = zig_float_fn_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.print("_{s}(", .{operation});
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
try writer.writeAll(");\n");
try v.end(f, inst, writer);
return local;
}
fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const inst_ty = f.typeOfIndex(inst);
return unFloatOp(f, inst, operand, inst_ty, operation);
}
fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
const zcu = f.object.dg.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
try writer.writeAll(" = zig_float_fn_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.print("_{s}(", .{operation});
try f.writeCValue(writer, lhs, .FunctionArgument);
try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
try v.elem(f, writer);
try writer.writeAll(");\n");
try v.end(f, inst, writer);
return local;
}
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = f.object.dg.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@ -7508,9 +7393,9 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
const v = try Vectorize.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
try writer.writeAll(" = zig_float_fn_");
try writer.writeAll(" = zig_fma_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeAll("_fma(");
try writer.writeByte('(');
try f.writeCValue(writer, mulend1, .FunctionArgument);
try v.elem(f, writer);
try writer.writeAll(", ");

View file

@ -1468,7 +1468,6 @@ pub const Pool = struct {
.bool_false,
.empty_struct,
.generic_poison,
.var_args_param_type,
.none,
=> unreachable,
@ -1858,7 +1857,7 @@ pub const Pool = struct {
loaded_tag.names.get(ip)[field_index].toSlice(ip),
);
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, @intCast(field_index)),
.@"align" = loaded_union.fieldAlign(ip, field_index),
.abi = field_type.abiAlignment(zcu),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{

View file

@ -1384,7 +1384,7 @@ pub const Object = struct {
const namespace = zcu.namespacePtr(decl.src_namespace);
const owner_mod = namespace.file_scope.mod;
const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?;
const target = zcu.getTarget();
const target = owner_mod.resolved_target.result;
const ip = &zcu.intern_pool;
var dg: DeclGen = .{
@ -1456,7 +1456,7 @@ pub const Object = struct {
var llvm_arg_i: u32 = 0;
// This gets the LLVM values from the function and stores them in `dg.args`.
const sret = firstParamSRet(fn_info, zcu);
const sret = firstParamSRet(fn_info, zcu, target);
const ret_ptr: Builder.Value = if (sret) param: {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@ -2755,7 +2755,7 @@ pub const Object = struct {
// Return type goes first.
if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(mod)) {
const sret = firstParamSRet(fn_info, mod);
const sret = firstParamSRet(fn_info, mod, target);
const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
@ -2881,7 +2881,7 @@ pub const Object = struct {
assert(decl.has_tv);
const fn_info = zcu.typeToFunc(zig_fn_type).?;
const target = owner_mod.resolved_target.result;
const sret = firstParamSRet(fn_info, zcu);
const sret = firstParamSRet(fn_info, zcu, target);
const is_extern = decl.isExtern(zcu);
const function_index = try o.builder.addFunction(
@ -3235,7 +3235,6 @@ pub const Object = struct {
.bool_false,
.empty_struct,
.generic_poison,
.var_args_param_type,
.none,
=> unreachable,
else => switch (ip.indexToKey(t.toIntern())) {
@ -3604,7 +3603,7 @@ pub const Object = struct {
var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_params.deinit(o.gpa);
if (firstParamSRet(fn_info, mod)) {
if (firstParamSRet(fn_info, mod, target)) {
try llvm_params.append(o.gpa, .ptr);
}
@ -4434,6 +4433,10 @@ pub const Object = struct {
if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.flags.is_allowzero) {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
}
if (fn_info.cc == .Interrupt) {
const child_type = try lowerType(o, Type.fromInterned(ptr_info.child));
try attributes.addParamAttr(llvm_arg_i, .{ .byval = child_type }, &o.builder);
}
if (ptr_info.flags.is_const) {
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
}
@ -4646,8 +4649,8 @@ pub const DeclGen = struct {
debug_global_var,
debug_expression,
);
if (!is_internal_linkage or decl.isExtern(zcu))
variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder);
variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder);
try o.debug_globals.append(o.gpa, debug_global_var_expression);
}
}
@ -5130,7 +5133,7 @@ pub const FuncGen = struct {
const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = mod.getTarget();
const sret = firstParamSRet(fn_info, mod);
const sret = firstParamSRet(fn_info, mod, target);
var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
defer llvm_args.deinit();
@ -7496,7 +7499,8 @@ pub const FuncGen = struct {
const o = self.dg.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const index = pl_op.payload;
return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.size", &.{.i32}, &.{
const llvm_usize = try o.lowerType(Type.usize);
return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.size", &.{llvm_usize}, &.{
try o.builder.intValue(.i32, index),
}, "");
}
@ -7505,7 +7509,8 @@ pub const FuncGen = struct {
const o = self.dg.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const index = pl_op.payload;
return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.grow", &.{.i32}, &.{
const llvm_isize = try o.lowerType(Type.isize);
return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.grow", &.{llvm_isize}, &.{
try o.builder.intValue(.i32, index), try self.resolveInst(pl_op.operand),
}, "");
}
@ -9993,20 +9998,21 @@ pub const FuncGen = struct {
return self.wip.conv(.unsigned, small_int_val, int_llvm_ty, "");
}
const tag_int = blk: {
const tag_int_val = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
break :blk tag_int_val.toUnsignedInt(mod);
break :blk try tag_val.intFromEnum(tag_ty, mod);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
return .none;
}
assert(!isByRef(union_ty, mod));
return o.builder.intValue(union_llvm_ty, tag_int);
var big_int_space: Value.BigIntSpace = undefined;
const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod);
return try o.builder.bigIntValue(union_llvm_ty, tag_big_int);
}
assert(isByRef(union_ty, mod));
// The llvm type of the alloca will be the named LLVM union type, and will not
@ -10080,7 +10086,9 @@ pub const FuncGen = struct {
const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) };
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
const llvm_tag = try o.builder.intValue(tag_ty, tag_int);
var big_int_space: Value.BigIntSpace = undefined;
const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod);
const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int);
const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
@ -10865,38 +10873,38 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => return isByRef(return_type, mod),
return switch (fn_info.cc) {
.Unspecified, .Inline => isByRef(return_type, zcu),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.mips, .mipsel => false,
.x86 => isByRef(return_type, zcu),
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
else => return firstParamSRetSystemV(return_type, mod),
.windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
else => firstParamSRetSystemV(return_type, zcu, target),
},
.wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
.wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
.aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
.memory, .i64_array => true,
.i32_array => |size| size != 1,
.byval => false,
},
.riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
.riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
else => false, // TODO investigate C ABI for other architectures
},
.SysV => return firstParamSRetSystemV(return_type, mod),
.Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
.Stdcall => return !isScalar(mod, return_type),
else => return false,
}
.SysV => firstParamSRetSystemV(return_type, zcu, target),
.Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
.Stdcall => !isScalar(zcu, return_type),
else => false,
};
}
fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
const class = x86_64_abi.classifySystemV(ty, mod, .ret);
fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@ -10922,6 +10930,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => return o.lowerType(return_type),
.x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(o, fn_info),
else => return lowerSystemVFnRetTy(o, fn_info),
@ -11014,7 +11023,8 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
const target = mod.getTarget();
const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret);
if (classes[0] == .memory) return .void;
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
@ -11098,8 +11108,8 @@ const ParamTypeIterator = struct {
pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const mod = it.object.module;
const ip = &mod.intern_pool;
const zcu = it.object.module;
const ip = &zcu.intern_pool;
const ty = it.fn_info.param_types.get(ip)[it.zig_index];
it.byval_attr = false;
return nextInner(it, Type.fromInterned(ty));
@ -11107,8 +11117,8 @@ const ParamTypeIterator = struct {
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
const zcu = it.object.module;
const ip = &zcu.intern_pool;
if (it.zig_index >= it.fn_info.param_types.len) {
if (it.zig_index >= args.len) {
return null;
@ -11121,10 +11131,10 @@ const ParamTypeIterator = struct {
}
fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const target = mod.getTarget();
const zcu = it.object.module;
const target = zcu.getTarget();
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
it.zig_index += 1;
return .no_bits;
}
@ -11132,12 +11142,12 @@ const ParamTypeIterator = struct {
.Unspecified, .Inline => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.isSlice(mod) or
(ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod) and !ty.ptrAllowsZero(mod)))
if (ty.isSlice(zcu) or
(ty.zigTypeTag(zcu) == .Optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
{
it.llvm_index += 1;
return .slice;
} else if (isByRef(ty, mod)) {
} else if (isByRef(ty, zcu)) {
return .byref;
} else {
return .byval;
@ -11146,87 +11156,85 @@ const ParamTypeIterator = struct {
.Async => {
@panic("TODO implement async function lowering in the LLVM backend");
},
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
.C => switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
.x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
if (isScalar(zcu, ty)) {
return .byval;
},
.x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
if (isScalar(mod, ty)) {
return .byval;
}
const classes = wasm_c_abi.classifyType(ty, mod);
if (classes[0] == .indirect) {
}
const classes = wasm_c_abi.classifyType(ty, zcu);
if (classes[0] == .indirect) {
return .byref;
}
return .abi_sized_int;
},
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
switch (aarch64_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
it.types_len = 1;
it.types_buffer[0] = .i64;
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
}
},
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
}
return .abi_sized_int;
},
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
switch (aarch64_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
it.types_len = 1;
it.types_buffer[0] = .i64;
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
}
},
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
switch (arm_c_abi.classifyType(ty, mod, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
},
.byval => return .byval,
.i32_array => |size| return Lowering{ .i32_array = size },
.i64_array => |size| return Lowering{ .i64_array = size },
}
},
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
switch (riscv_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
.double_integer => return Lowering{ .i64_array = 2 },
.fields => {
it.types_len = 0;
for (0..ty.structFieldCount(mod)) |field_index| {
const field_ty = ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
it.llvm_index += it.types_len - 1;
return .multiple_llvm_types;
},
}
},
// TODO investigate C ABI for other architectures
else => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
}
},
.byval => return .byval,
.i32_array => |size| return Lowering{ .i32_array = size },
.i64_array => |size| return Lowering{ .i64_array = size },
}
},
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
switch (riscv_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
.double_integer => return Lowering{ .i64_array = 2 },
.fields => {
it.types_len = 0;
for (0..ty.structFieldCount(zcu)) |field_index| {
const field_ty = ty.structFieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
it.llvm_index += it.types_len - 1;
return .multiple_llvm_types;
},
}
},
// TODO investigate C ABI for other architectures
else => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
},
.Win64 => return it.nextWin64(ty),
.SysV => return it.nextSystemV(ty),
@ -11234,7 +11242,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
return .byval;
} else {
it.byval_attr = true;
@ -11250,10 +11258,10 @@ const ParamTypeIterator = struct {
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
const mod = it.object.module;
switch (x86_64_abi.classifyWindows(ty, mod)) {
const zcu = it.object.module;
switch (x86_64_abi.classifyWindows(ty, zcu)) {
.integer => {
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@ -11283,16 +11291,17 @@ const ParamTypeIterator = struct {
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
const zcu = it.object.module;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
if (isScalar(mod, ty)) {
if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;

View file

@ -19,6 +19,8 @@ const InternPool = @import("InternPool.zig");
const Type = @import("type.zig").Type;
const Value = @import("Value.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
const lldMain = @import("main.zig").lldMain;
const Package = @import("Package.zig");
/// When adding a new field, remember to update `hashAddSystemLibs`.
/// These are *always* dynamically linked. Static libraries will be
@ -982,3 +984,113 @@ pub const File = struct {
pub const NvPtx = @import("link/NvPtx.zig");
pub const Dwarf = @import("link/Dwarf.zig");
};
pub fn spawnLld(
comp: *Compilation,
arena: Allocator,
argv: []const []const u8,
) !void {
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv[1..]);
}
// If possible, we run LLD as a child process because it does not always
// behave properly as a library, unfortunately.
// https://github.com/ziglang/zig/issues/3825
if (!std.process.can_spawn) {
const exit_code = try lldMain(arena, argv, false);
if (exit_code == 0) return;
if (comp.clang_passthrough_mode) std.process.exit(exit_code);
return error.LLDReportedFailure;
}
var stderr: []u8 = &.{};
defer comp.gpa.free(stderr);
var child = std.process.Child.init(argv, arena);
const term = (if (comp.clang_passthrough_mode) term: {
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
break :term child.spawnAndWait();
} else term: {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
child.spawn() catch |err| break :term err;
stderr = try child.stderr.?.reader().readAllAlloc(comp.gpa, std.math.maxInt(usize));
break :term child.wait();
}) catch |first_err| term: {
const err = switch (first_err) {
error.NameTooLong => err: {
const s = fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
const rsp_path = "tmp" ++ s ++ Package.Manifest.hex64(rand_int) ++ ".rsp";
const rsp_file = try comp.local_cache_directory.handle.createFileZ(rsp_path, .{});
defer comp.local_cache_directory.handle.deleteFileZ(rsp_path) catch |err|
log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
{
defer rsp_file.close();
var rsp_buf = std.io.bufferedWriter(rsp_file.writer());
const rsp_writer = rsp_buf.writer();
for (argv[2..]) |arg| {
try rsp_writer.writeByte('"');
for (arg) |c| {
switch (c) {
'\"', '\\' => try rsp_writer.writeByte('\\'),
else => {},
}
try rsp_writer.writeByte(c);
}
try rsp_writer.writeByte('"');
try rsp_writer.writeByte('\n');
}
try rsp_buf.flush();
}
var rsp_child = std.process.Child.init(&.{ argv[0], argv[1], try std.fmt.allocPrint(
arena,
"@{s}",
.{try comp.local_cache_directory.join(arena, &.{rsp_path})},
) }, arena);
if (comp.clang_passthrough_mode) {
rsp_child.stdin_behavior = .Inherit;
rsp_child.stdout_behavior = .Inherit;
rsp_child.stderr_behavior = .Inherit;
break :term rsp_child.spawnAndWait() catch |err| break :err err;
} else {
rsp_child.stdin_behavior = .Ignore;
rsp_child.stdout_behavior = .Ignore;
rsp_child.stderr_behavior = .Pipe;
rsp_child.spawn() catch |err| break :err err;
stderr = try rsp_child.stderr.?.reader().readAllAlloc(comp.gpa, std.math.maxInt(usize));
break :term rsp_child.wait() catch |err| break :err err;
}
},
else => first_err,
};
log.err("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| if (code != 0) {
if (comp.clang_passthrough_mode) std.process.exit(code);
comp.lockAndParseLldStderr(argv[1], stderr);
return error.LLDReportedFailure;
},
else => {
if (comp.clang_passthrough_mode) std.process.abort();
log.err("{s} terminated with stderr:\n{s}", .{ argv[0], stderr });
return error.LLDCrashed;
},
}
if (stderr.len > 0) log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}

View file

@ -9,7 +9,6 @@ const Cache = std.Build.Cache;
const mingw = @import("../../mingw.zig");
const link = @import("../../link.zig");
const lldMain = @import("../../main.zig").lldMain;
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
@ -502,74 +501,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node)
return error.DllImportLibraryNotFound;
}
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv.items[1..]);
}
if (std.process.can_spawn) {
// If possible, we run LLD as a child process because it does not always
// behave properly as a library, unfortunately.
// https://github.com/ziglang/zig/issues/3825
var child = std.ChildProcess.init(argv.items, arena);
if (comp.clang_passthrough_mode) {
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = child.spawnAndWait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
std.process.exit(code);
}
},
else => std.process.abort(),
}
} else {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
try child.spawn();
const stderr = try child.stderr.?.reader().readAllAlloc(arena, std.math.maxInt(usize));
const term = child.wait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
comp.lockAndParseLldStderr(linker_command, stderr);
return error.LLDReportedFailure;
}
},
else => {
log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
return error.LLDCrashed;
},
}
if (stderr.len != 0) {
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}
}
} else {
const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
} else {
return error.LLDReportedFailure;
}
}
}
try link.spawnLld(comp, arena, argv.items);
}
if (!self.base.disable_lld_caching) {

View file

@ -205,10 +205,19 @@ num_ifunc_dynrelocs: usize = 0,
/// List of atoms that are owned directly by the linker.
atoms: std.ArrayListUnmanaged(Atom) = .{},
atoms_extra: std.ArrayListUnmanaged(u32) = .{},
/// List of range extension thunks.
thunks: std.ArrayListUnmanaged(Thunk) = .{},
/// List of output merge sections with deduped contents.
merge_sections: std.ArrayListUnmanaged(MergeSection) = .{},
/// List of output merge subsections.
/// Each subsection is akin to Atom but belongs to a MergeSection.
merge_subsections: std.ArrayListUnmanaged(MergeSubsection) = .{},
/// List of input merge sections as parsed from input relocatables.
merge_input_sections: std.ArrayListUnmanaged(InputMergeSection) = .{},
/// Table of last atom index in a section and matching atom free list if any.
last_atom_and_free_list_table: LastAtomAndFreeListTable = .{},
@ -369,6 +378,7 @@ pub fn createEmpty(
try self.symbols_extra.append(gpa, 0);
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{});
try self.atoms_extra.append(gpa, 0);
// Append null file at index 0
try self.files.append(gpa, .null);
// Append null byte to string tables
@ -378,6 +388,8 @@ pub fn createEmpty(
_ = try self.addSection(.{ .name = "" });
// Append null symbol in output symtab
try self.symtab.append(gpa, null_sym);
// Append null input merge section.
try self.merge_input_sections.append(gpa, .{});
if (!is_obj_or_ar) {
try self.dynstrtab.append(gpa, 0);
@ -491,7 +503,20 @@ pub fn deinit(self: *Elf) void {
self.start_stop_indexes.deinit(gpa);
self.atoms.deinit(gpa);
self.atoms_extra.deinit(gpa);
for (self.thunks.items) |*th| {
th.deinit(gpa);
}
self.thunks.deinit(gpa);
for (self.merge_sections.items) |*sect| {
sect.deinit(gpa);
}
self.merge_sections.deinit(gpa);
self.merge_subsections.deinit(gpa);
for (self.merge_input_sections.items) |*sect| {
sect.deinit(gpa);
}
self.merge_input_sections.deinit(gpa);
for (self.last_atom_and_free_list_table.values()) |*value| {
value.free_list.deinit(gpa);
}
@ -623,7 +648,6 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
const ptr_size = self.ptrWidthBytes();
const target = self.base.comp.root_mod.resolved_target.result;
const ptr_bit_width = target.ptrBitWidth();
const is_linux = target.os.tag == .linux;
const zig_object = self.zigObjectPtr().?;
const fillSection = struct {
@ -659,9 +683,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
}
if (self.phdr_zig_got_index == null) {
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
const alignment = self.page_size;
const filesz = @as(u64, ptr_size) * options.symbol_count_hint;
const off = self.findFreeSpace(filesz, alignment);
self.phdr_zig_got_index = try self.addPhdr(.{
@ -676,7 +698,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
}
if (self.phdr_zig_load_ro_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_ro_index = try self.addPhdr(.{
@ -691,7 +713,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
}
if (self.phdr_zig_load_rw_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_rw_index = try self.addPhdr(.{
@ -706,7 +728,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
}
if (self.phdr_zig_load_zerofill_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
const alignment = self.page_size;
self.phdr_zig_load_zerofill_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
@ -1289,6 +1311,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
// symbol for potential resolution at load-time.
self.resolveSymbols();
self.markEhFrameAtomsDead();
try self.resolveMergeSections();
try self.convertCommonSymbols();
self.markImportsExports();
@ -1313,7 +1336,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
else => |e| return e,
};
try self.addCommentString();
try self.finalizeMergeSections();
try self.initOutputSections();
try self.initMergeSections();
try self.addLinkerDefinedSymbols();
self.claimUnresolved();
@ -1332,6 +1358,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
self.sortDynamicSymtab();
try self.setHashSections();
try self.setVersionSymtab();
try self.updateMergeSectionSizes();
try self.updateSectionSizes();
try self.allocatePhdrTable();
@ -1359,7 +1386,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const code = try zig_object.codeAlloc(self, atom_index);
defer gpa.free(code);
const file_offset = shdr.sh_offset + atom_ptr.value;
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
@ -1377,6 +1404,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
try self.writeMergeSections();
self.writeSyntheticSections() catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.UnsupportedCpuArch => {
@ -2480,7 +2508,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
try argv.append("-pie");
}
if (is_dyn_lib and target.os.tag == .netbsd) {
if (is_exe_or_dyn_lib and target.os.tag == .netbsd) {
// Add options to produce shared objects with only 2 PT_LOAD segments.
// NetBSD expects 2 PT_LOAD segments in a shared object, otherwise
// ld.elf_so fails loading dynamic libraries with "not found" error.
@ -2698,74 +2726,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
try argv.append("-Bsymbolic");
}
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv.items[1..]);
}
if (std.process.can_spawn) {
// If possible, we run LLD as a child process because it does not always
// behave properly as a library, unfortunately.
// https://github.com/ziglang/zig/issues/3825
var child = std.ChildProcess.init(argv.items, arena);
if (comp.clang_passthrough_mode) {
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = child.spawnAndWait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
std.process.exit(code);
}
},
else => std.process.abort(),
}
} else {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
try child.spawn();
const stderr = try child.stderr.?.reader().readAllAlloc(arena, std.math.maxInt(usize));
const term = child.wait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
comp.lockAndParseLldStderr(linker_command, stderr);
return error.LLDReportedFailure;
}
},
else => {
log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
return error.LLDCrashed;
},
}
if (stderr.len != 0) {
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}
}
} else {
const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
} else {
return error.LLDReportedFailure;
}
}
}
try link.spawnLld(comp, arena, argv.items);
}
if (!self.base.disable_lld_caching) {
@ -2946,7 +2907,10 @@ pub fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
index += 4;
const e_entry = if (self.entry_index) |entry_index| self.symbol(entry_index).address(.{}, self) else 0;
const e_entry = if (self.entry_index) |entry_index|
@as(u64, @intCast(self.symbol(entry_index).address(.{}, self)))
else
0;
const phdr_table_offset = if (self.phdr_table_index) |phndx| self.phdrs.items[phndx].p_offset else 0;
switch (self.ptr_width) {
.p32 => {
@ -3132,14 +3096,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
if (self.dynamic_section_index) |shndx| {
const shdr = &self.shdrs.items[shndx];
const symbol_ptr = self.symbol(self.dynamic_index.?);
symbol_ptr.value = shdr.sh_addr;
symbol_ptr.value = @intCast(shdr.sh_addr);
symbol_ptr.output_section_index = shndx;
}
// __ehdr_start
{
const symbol_ptr = self.symbol(self.ehdr_start_index.?);
symbol_ptr.value = self.image_base;
symbol_ptr.value = @intCast(self.image_base);
symbol_ptr.output_section_index = 1;
}
@ -3149,9 +3113,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const end_sym = self.symbol(self.init_array_end_index.?);
const shdr = &self.shdrs.items[shndx];
start_sym.output_section_index = shndx;
start_sym.value = shdr.sh_addr;
start_sym.value = @intCast(shdr.sh_addr);
end_sym.output_section_index = shndx;
end_sym.value = shdr.sh_addr + shdr.sh_size;
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
}
// __fini_array_start, __fini_array_end
@ -3160,9 +3124,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const end_sym = self.symbol(self.fini_array_end_index.?);
const shdr = &self.shdrs.items[shndx];
start_sym.output_section_index = shndx;
start_sym.value = shdr.sh_addr;
start_sym.value = @intCast(shdr.sh_addr);
end_sym.output_section_index = shndx;
end_sym.value = shdr.sh_addr + shdr.sh_size;
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
}
// __preinit_array_start, __preinit_array_end
@ -3171,9 +3135,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const end_sym = self.symbol(self.preinit_array_end_index.?);
const shdr = &self.shdrs.items[shndx];
start_sym.output_section_index = shndx;
start_sym.value = shdr.sh_addr;
start_sym.value = @intCast(shdr.sh_addr);
end_sym.output_section_index = shndx;
end_sym.value = shdr.sh_addr + shdr.sh_size;
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
}
// _GLOBAL_OFFSET_TABLE_
@ -3181,14 +3145,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
if (self.got_plt_section_index) |shndx| {
const shdr = self.shdrs.items[shndx];
const sym = self.symbol(self.got_index.?);
sym.value = shdr.sh_addr;
sym.value = @intCast(shdr.sh_addr);
sym.output_section_index = shndx;
}
} else {
if (self.got_section_index) |shndx| {
const shdr = self.shdrs.items[shndx];
const sym = self.symbol(self.got_index.?);
sym.value = shdr.sh_addr;
sym.value = @intCast(shdr.sh_addr);
sym.output_section_index = shndx;
}
}
@ -3197,7 +3161,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
if (self.plt_section_index) |shndx| {
const shdr = &self.shdrs.items[shndx];
const symbol_ptr = self.symbol(self.plt_index.?);
symbol_ptr.value = shdr.sh_addr;
symbol_ptr.value = @intCast(shdr.sh_addr);
symbol_ptr.output_section_index = shndx;
}
@ -3205,7 +3169,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
if (self.dso_handle_index) |index| {
const shdr = &self.shdrs.items[1];
const symbol_ptr = self.symbol(index);
symbol_ptr.value = shdr.sh_addr;
symbol_ptr.value = @intCast(shdr.sh_addr);
symbol_ptr.output_section_index = 0;
}
@ -3213,7 +3177,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
if (self.eh_frame_hdr_section_index) |shndx| {
const shdr = &self.shdrs.items[shndx];
const symbol_ptr = self.symbol(self.gnu_eh_frame_hdr_index.?);
symbol_ptr.value = shdr.sh_addr;
symbol_ptr.value = @intCast(shdr.sh_addr);
symbol_ptr.output_section_index = shndx;
}
@ -3225,9 +3189,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
const start_sym = self.symbol(self.rela_iplt_start_index.?);
const end_sym = self.symbol(self.rela_iplt_end_index.?);
start_sym.value = start_addr;
start_sym.value = @intCast(start_addr);
start_sym.output_section_index = shndx;
end_sym.value = end_addr;
end_sym.value = @intCast(end_addr);
end_sym.output_section_index = shndx;
}
@ -3236,7 +3200,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const end_symbol = self.symbol(self.end_index.?);
for (self.shdrs.items, 0..) |shdr, shndx| {
if (shdr.sh_flags & elf.SHF_ALLOC != 0) {
end_symbol.value = shdr.sh_addr + shdr.sh_size;
end_symbol.value = @intCast(shdr.sh_addr + shdr.sh_size);
end_symbol.output_section_index = @intCast(shndx);
}
}
@ -3251,9 +3215,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const stop = self.symbol(self.start_stop_indexes.items[index + 1]);
const shndx = self.sectionByName(name["__start_".len..]).?;
const shdr = &self.shdrs.items[shndx];
start.value = shdr.sh_addr;
start.value = @intCast(shdr.sh_addr);
start.output_section_index = shndx;
stop.value = shdr.sh_addr + shdr.sh_size;
stop.value = @intCast(shdr.sh_addr + shdr.sh_size);
stop.output_section_index = shndx;
}
}
@ -3263,7 +3227,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
const sym = self.symbol(index);
if (self.sectionByName(".sdata")) |shndx| {
const shdr = self.shdrs.items[shndx];
sym.value = shdr.sh_addr + 0x800;
sym.value = @intCast(shdr.sh_addr + 0x800);
sym.output_section_index = shndx;
} else {
sym.value = 0;
@ -3293,12 +3257,122 @@ fn checkDuplicates(self: *Elf) !void {
try self.reportDuplicates(dupes);
}
pub fn addCommentString(self: *Elf) !void {
const msec_index = try self.getOrCreateMergeSection(".comment", elf.SHF_MERGE | elf.SHF_STRINGS, elf.SHT_PROGBITS);
const msec = self.mergeSection(msec_index);
const res = try msec.insertZ(self.base.comp.gpa, "zig " ++ builtin.zig_version_string);
if (res.found_existing) return;
const msub_index = try self.addMergeSubsection();
const msub = self.mergeSubsection(msub_index);
msub.merge_section_index = msec_index;
msub.string_index = res.key.pos;
msub.alignment = .@"1";
msub.size = res.key.len;
msub.entsize = 1;
msub.alive = true;
res.sub.* = msub_index;
}
pub fn resolveMergeSections(self: *Elf) !void {
const tracy = trace(@src());
defer tracy.end();
var has_errors = false;
for (self.objects.items) |index| {
const file_ptr = self.file(index).?;
if (!file_ptr.isAlive()) continue;
file_ptr.object.initMergeSections(self) catch |err| switch (err) {
error.MalformedObject => has_errors = true,
else => |e| return e,
};
}
if (has_errors) return error.FlushFailure;
for (self.objects.items) |index| {
const file_ptr = self.file(index).?;
if (!file_ptr.isAlive()) continue;
file_ptr.object.resolveMergeSubsections(self) catch |err| switch (err) {
error.MalformedObject => has_errors = true,
else => |e| return e,
};
}
if (has_errors) return error.FlushFailure;
}
pub fn finalizeMergeSections(self: *Elf) !void {
for (self.merge_sections.items) |*msec| {
try msec.finalize(self);
}
}
pub fn updateMergeSectionSizes(self: *Elf) !void {
for (self.merge_sections.items) |*msec| {
const shdr = &self.shdrs.items[msec.output_section_index];
for (msec.subsections.items) |msub_index| {
const msub = self.mergeSubsection(msub_index);
assert(msub.alive);
const offset = msub.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
msub.value = @intCast(offset);
shdr.sh_size += padding + msub.size;
shdr.sh_addralign = @max(shdr.sh_addralign, msub.alignment.toByteUnits() orelse 1);
}
}
}
pub fn writeMergeSections(self: *Elf) !void {
const gpa = self.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
for (self.merge_sections.items) |msec| {
const shdr = self.shdrs.items[msec.output_section_index];
const size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
try buffer.ensureTotalCapacity(size);
buffer.appendNTimesAssumeCapacity(0, size);
for (msec.subsections.items) |msub_index| {
const msub = self.mergeSubsection(msub_index);
assert(msub.alive);
const string = msub.getString(self);
const off = math.cast(usize, msub.value) orelse return error.Overflow;
@memcpy(buffer.items[off..][0..string.len], string);
}
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
buffer.clearRetainingCapacity();
}
}
fn initOutputSections(self: *Elf) !void {
for (self.objects.items) |index| {
try self.file(index).?.object.initOutputSections(self);
}
}
pub fn initMergeSections(self: *Elf) !void {
for (self.merge_sections.items) |*msec| {
if (msec.subsections.items.len == 0) continue;
const name = msec.name(self);
const shndx = self.sectionByName(name) orelse try self.addSection(.{
.name = name,
.type = msec.type,
.flags = msec.flags,
});
msec.output_section_index = shndx;
var entsize = self.mergeSubsection(msec.subsections.items[0]).entsize;
for (msec.subsections.items) |index| {
const msub = self.mergeSubsection(index);
entsize = @min(entsize, msub.entsize);
}
const shdr = &self.shdrs.items[shndx];
shdr.sh_entsize = entsize;
}
}
fn initSyntheticSections(self: *Elf) !void {
const comp = self.base.comp;
const target = comp.root_mod.resolved_target.result;
@ -3965,6 +4039,10 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
}
}
for (self.merge_sections.items) |*msec| {
msec.output_section_index = backlinks[msec.output_section_index];
}
{
var output_rela_sections = try self.output_rela_sections.clone(gpa);
defer output_rela_sections.deinit(gpa);
@ -4052,7 +4130,7 @@ fn updateSectionSizes(self: *Elf) !void {
if (!atom_ptr.flags.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
atom_ptr.value = @intCast(offset);
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
@ -4535,7 +4613,7 @@ fn writeAtoms(self: *Elf) !void {
const atom_ptr = self.atom(atom_index).?;
assert(atom_ptr.flags.alive);
const offset = math.cast(usize, atom_ptr.value - base_offset) orelse
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
@ -4576,7 +4654,7 @@ fn writeAtoms(self: *Elf) !void {
const thunk_size = th.size(self);
try buffer.ensureUnusedCapacity(thunk_size);
const shdr = self.shdrs.items[th.output_section_index];
const offset = th.value + shdr.sh_offset;
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
try th.write(self, buffer.writer());
assert(buffer.items.len == thunk_size);
try self.base.file.?.pwriteAll(buffer.items, offset);
@ -4611,6 +4689,7 @@ pub fn updateSymtabSize(self: *Elf) !void {
if (self.eh_frame_section_index) |_| {
nlocals += 1;
}
nlocals += @intCast(self.merge_sections.items.len);
if (self.requiresThunks()) for (self.thunks.items) |*th| {
th.output_symtab_ctx.ilocal = nlocals + 1;
@ -4947,12 +5026,30 @@ fn writeSectionSymbols(self: *Elf) void {
};
ilocal += 1;
}
for (self.merge_sections.items) |msec| {
const shdr = self.shdrs.items[msec.output_section_index];
const out_sym = &self.symtab.items[ilocal];
out_sym.* = .{
.st_name = 0,
.st_value = shdr.sh_addr,
.st_info = elf.STT_SECTION,
.st_shndx = @intCast(msec.output_section_index),
.st_size = 0,
.st_other = 0,
};
ilocal += 1;
}
}
pub fn sectionSymbolOutputSymtabIndex(self: Elf, shndx: u32) u32 {
if (self.eh_frame_section_index) |index| {
if (index == shndx) return @intCast(self.output_sections.keys().len + 1);
}
const base: usize = if (self.eh_frame_section_index == null) 0 else 1;
for (self.merge_sections.items, 0..) |msec, index| {
if (msec.output_section_index == shndx) return @intCast(self.output_sections.keys().len + 1 + index + base);
}
return @intCast(self.output_sections.getIndex(shndx).? + 1);
}
@ -5003,9 +5100,9 @@ fn getLDMOption(target: std.Target) ?[]const u8 {
switch (target.cpu.arch) {
.x86 => return "elf_i386",
.aarch64 => return "aarch64linux",
.aarch64_be => return "aarch64_be_linux",
.aarch64_be => return "aarch64linuxb",
.arm, .thumb => return "armelf_linux_eabi",
.armeb, .thumbeb => return "armebelf_linux_eabi",
.armeb, .thumbeb => return "armelfb_linux_eabi",
.powerpc => return "elf32ppclinux",
.powerpc64 => return "elf64ppc",
.powerpc64le => return "elf64lppc",
@ -5458,6 +5555,50 @@ pub fn addAtom(self: *Elf) !Atom.Index {
return index;
}
pub fn addAtomExtra(self: *Elf, extra: Atom.Extra) !u32 {
const fields = @typeInfo(Atom.Extra).Struct.fields;
try self.atoms_extra.ensureUnusedCapacity(self.base.comp.gpa, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
pub fn addAtomExtraAssumeCapacity(self: *Elf, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
const fields = @typeInfo(Atom.Extra).Struct.fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
else => @compileError("bad field type"),
});
}
return index;
}
pub fn atomExtra(self: *Elf, index: u32) ?Atom.Extra {
if (index == 0) return null;
const fields = @typeInfo(Atom.Extra).Struct.fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => self.atoms_extra.items[i],
else => @compileError("bad field type"),
};
i += 1;
}
return result;
}
pub fn setAtomExtra(self: *Elf, index: u32, extra: Atom.Extra) void {
assert(index > 0);
const fields = @typeInfo(Atom.Extra).Struct.fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
else => @compileError("bad field type"),
};
}
}
pub fn addThunk(self: *Elf) !Thunk.Index {
const index = @as(Thunk.Index, @intCast(self.thunks.items.len));
const th = try self.thunks.addOne(self.base.comp.gpa);
@ -5637,35 +5778,88 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO
return &self.comdat_groups_owners.items[index];
}
pub fn gotAddress(self: *Elf) u64 {
pub fn addInputMergeSection(self: *Elf) !InputMergeSection.Index {
const index: InputMergeSection.Index = @intCast(self.merge_input_sections.items.len);
const msec = try self.merge_input_sections.addOne(self.base.comp.gpa);
msec.* = .{};
return index;
}
pub fn inputMergeSection(self: *Elf, index: InputMergeSection.Index) ?*InputMergeSection {
if (index == 0) return null;
return &self.merge_input_sections.items[index];
}
pub fn addMergeSubsection(self: *Elf) !MergeSubsection.Index {
const index: MergeSubsection.Index = @intCast(self.merge_subsections.items.len);
const msec = try self.merge_subsections.addOne(self.base.comp.gpa);
msec.* = .{};
return index;
}
pub fn mergeSubsection(self: *Elf, index: MergeSubsection.Index) *MergeSubsection {
assert(index < self.merge_subsections.items.len);
return &self.merge_subsections.items[index];
}
pub fn getOrCreateMergeSection(self: *Elf, name: []const u8, flags: u64, @"type": u32) !MergeSection.Index {
const gpa = self.base.comp.gpa;
const out_name = name: {
if (self.base.isRelocatable()) break :name name;
if (mem.eql(u8, name, ".rodata") or mem.startsWith(u8, name, ".rodata"))
break :name if (flags & elf.SHF_STRINGS != 0) ".rodata.str" else ".rodata.cst";
break :name name;
};
const out_off = try self.strings.insert(gpa, out_name);
const out_flags = flags & ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP);
for (self.merge_sections.items, 0..) |msec, index| {
if (msec.name_offset == out_off) return @intCast(index);
}
const index = @as(MergeSection.Index, @intCast(self.merge_sections.items.len));
const msec = try self.merge_sections.addOne(gpa);
msec.* = .{
.name_offset = out_off,
.flags = out_flags,
.type = @"type",
};
return index;
}
pub fn mergeSection(self: *Elf, index: MergeSection.Index) *MergeSection {
assert(index < self.merge_sections.items.len);
return &self.merge_sections.items[index];
}
pub fn gotAddress(self: *Elf) i64 {
const shndx = blk: {
if (self.getTarget().cpu.arch == .x86_64 and self.got_plt_section_index != null)
break :blk self.got_plt_section_index.?;
break :blk if (self.got_section_index) |shndx| shndx else null;
};
return if (shndx) |index| self.shdrs.items[index].sh_addr else 0;
return if (shndx) |index| @intCast(self.shdrs.items[index].sh_addr) else 0;
}
pub fn tpAddress(self: *Elf) u64 {
pub fn tpAddress(self: *Elf) i64 {
const index = self.phdr_tls_index orelse return 0;
const phdr = self.phdrs.items[index];
return switch (self.getTarget().cpu.arch) {
const addr = switch (self.getTarget().cpu.arch) {
.x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align),
.aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
else => @panic("TODO implement getTpAddress for this arch"),
};
return @intCast(addr);
}
pub fn dtpAddress(self: *Elf) u64 {
pub fn dtpAddress(self: *Elf) i64 {
const index = self.phdr_tls_index orelse return 0;
const phdr = self.phdrs.items[index];
return phdr.p_vaddr;
return @intCast(phdr.p_vaddr);
}
pub fn tlsAddress(self: *Elf) u64 {
pub fn tlsAddress(self: *Elf) i64 {
const index = self.phdr_tls_index orelse return 0;
const phdr = self.phdrs.items[index];
return phdr.p_vaddr;
return @intCast(phdr.p_vaddr);
}
const ErrorWithNotes = struct {
@ -6043,6 +6237,11 @@ fn fmtDumpState(
try writer.print(" shdr({d}) : COMDAT({d})\n", .{ cg.shndx, cg.cg_index });
}
try writer.writeAll("\nOutput merge sections\n");
for (self.merge_sections.items) |msec| {
try writer.print(" shdr({d}) : {}\n", .{ msec.output_section_index, msec.fmt(self) });
}
try writer.writeAll("\nOutput shdrs\n");
for (self.shdrs.items, 0..) |shdr, shndx| {
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
@ -6234,7 +6433,7 @@ const eh_frame = @import("Elf/eh_frame.zig");
const gc = @import("Elf/gc.zig");
const glibc = @import("../glibc.zig");
const link = @import("../link.zig");
const lldMain = @import("../main.zig").lldMain;
const merge_section = @import("Elf/merge_section.zig");
const musl = @import("../musl.zig");
const relocatable = @import("Elf/relocatable.zig");
const relocation = @import("Elf/relocation.zig");
@ -6260,10 +6459,13 @@ const GnuHashSection = synthetic_sections.GnuHashSection;
const GotSection = synthetic_sections.GotSection;
const GotPltSection = synthetic_sections.GotPltSection;
const HashSection = synthetic_sections.HashSection;
const InputMergeSection = merge_section.InputMergeSection;
const LdScript = @import("Elf/LdScript.zig");
const LinkerDefined = @import("Elf/LinkerDefined.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const MergeSection = merge_section.MergeSection;
const MergeSubsection = merge_section.MergeSubsection;
const Module = @import("../Module.zig");
const Object = @import("Elf/Object.zig");
const InternPool = @import("../InternPool.zig");

View file

@ -64,6 +64,7 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: []const u8, handle_index: Fil
.archive = .{
.path = try gpa.dupe(u8, path),
.offset = pos,
.size = obj_size,
},
.path = try gpa.dupe(u8, name),
.file_handle = handle_index,

View file

@ -1,5 +1,5 @@
/// Address allocated for this Atom.
value: u64 = 0,
value: i64 = 0,
/// Name of this Atom.
name_offset: u32 = 0,
@ -22,32 +22,19 @@ output_section_index: u32 = 0,
/// Index of the input section containing this atom's relocs.
relocs_section_index: u32 = 0,
/// Start index of the relocations belonging to this atom.
rel_index: u32 = 0,
/// Number of relocations belonging to this atom.
rel_num: u32 = 0,
/// Index of this atom in the linker's atoms table.
atom_index: Index = 0,
/// Index of the thunk for this atom.
thunk_index: Thunk.Index = 0,
/// Flags we use for state tracking.
flags: Flags = .{},
/// Start index of FDEs referencing this atom.
fde_start: u32 = 0,
/// End index of FDEs referencing this atom.
fde_end: u32 = 0,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev_index: Index = 0,
next_index: Index = 0,
/// Flags we use for state tracking.
flags: Flags = .{},
extra_index: u32 = 0,
pub const Alignment = @import("../../InternPool.zig").Alignment;
pub fn name(self: Atom, elf_file: *Elf) []const u8 {
@ -57,10 +44,22 @@ pub fn name(self: Atom, elf_file: *Elf) []const u8 {
};
}
pub fn address(self: Atom, elf_file: *Elf) u64 {
pub fn address(self: Atom, elf_file: *Elf) i64 {
const shndx = self.outputShndx() orelse return self.value;
const shdr = elf_file.shdrs.items[shndx];
return shdr.sh_addr + self.value;
return @as(i64, @intCast(shdr.sh_addr)) + self.value;
}
pub fn debugTombstoneValue(self: Atom, target: Symbol, elf_file: *Elf) ?u64 {
if (target.mergeSubsection(elf_file)) |msub| {
if (msub.alive) return null;
}
if (target.atom(elf_file)) |atom_ptr| {
if (atom_ptr.flags.alive) return null;
}
const atom_name = self.name(elf_file);
if (!mem.startsWith(u8, atom_name, ".debug")) return null;
return if (mem.eql(u8, atom_name, ".debug_loc") or mem.eql(u8, atom_name, ".debug_ranges")) 1 else 0;
}
pub fn file(self: Atom, elf_file: *Elf) ?File {
@ -68,7 +67,9 @@ pub fn file(self: Atom, elf_file: *Elf) ?File {
}
pub fn thunk(self: Atom, elf_file: *Elf) *Thunk {
return elf_file.thunk(self.thunk_index);
assert(self.flags.thunk);
const extras = self.extra(elf_file).?;
return elf_file.thunk(extras.thunk);
}
pub fn inputShdr(self: Atom, elf_file: *Elf) elf.Elf64_Shdr {
@ -102,13 +103,13 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
next.address(elf_file)
else
std.math.maxInt(u32);
return next_addr - self.address(elf_file);
return @intCast(next_addr - self.address(elf_file));
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
// No need to keep a free list node for the last block.
const next = elf_file.atom(self.next_index) orelse return false;
const cap = next.address(elf_file) - self.address(elf_file);
const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file));
const ideal_cap = Elf.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
@ -141,8 +142,8 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(elf_file);
const ideal_capacity = Elf.padToIdeal(cap);
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = big_atom.value + cap;
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
@ -167,14 +168,14 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
break :blk @intCast(new_start_vaddr);
} else if (elf_file.atom(last_atom_index.*)) |last| {
const ideal_capacity = Elf.padToIdeal(last.size);
const ideal_capacity_end_vaddr = last.value + ideal_capacity;
const ideal_capacity_end_vaddr = @as(u64, @intCast(last.value)) + ideal_capacity;
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last.atom_index;
break :blk new_start_vaddr;
break :blk @intCast(new_start_vaddr);
} else {
break :blk 0;
}
@ -184,7 +185,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
self.atom_index,
self.name(elf_file),
self.address(elf_file),
self.address(elf_file) + self.size,
self.address(elf_file) + @as(i64, @intCast(self.size)),
});
const expand_section = if (atom_placement) |placement_index|
@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
else
true;
if (expand_section) {
const needed_size = self.value + self.size;
const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
try elf_file.growAllocSection(self.outputShndx().?, needed_size);
last_atom_index.* = self.atom_index;
@ -242,7 +243,7 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
}
pub fn grow(self: *Atom, elf_file: *Elf) !void {
if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
try self.allocate(elf_file);
}
@ -309,11 +310,14 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
const shndx = self.relocsShndx() orelse return &[0]elf.Elf64_Rela{};
return switch (self.file(elf_file).?) {
.zig_object => |x| x.relocs.items[shndx].items,
.object => |x| x.relocs.items[self.rel_index..][0..self.rel_num],
switch (self.file(elf_file).?) {
.zig_object => |x| return x.relocs.items[shndx].items,
.object => |x| {
const extras = self.extra(elf_file).?;
return x.relocs.items[extras.rel_index..][0..extras.rel_count];
},
else => unreachable,
};
}
}
pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.Elf64_Rela)) !void {
@ -329,11 +333,14 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
};
const target = elf_file.symbol(target_index);
const r_type = rel.r_type();
const r_offset = self.value + rel.r_offset;
const r_offset: u64 = @intCast(self.value + @as(i64, @intCast(rel.r_offset)));
var r_addend = rel.r_addend;
var r_sym: u32 = 0;
switch (target.type(elf_file)) {
elf.STT_SECTION => {
elf.STT_SECTION => if (target.mergeSubsection(elf_file)) |msub| {
r_addend += @intCast(target.address(.{}, elf_file));
r_sym = elf_file.sectionSymbolOutputSymtabIndex(msub.mergeSection(elf_file).output_section_index);
} else {
r_addend += @intCast(target.address(.{}, elf_file));
r_sym = elf_file.sectionSymbolOutputSymtabIndex(target.outputShndx().?);
},
@ -359,9 +366,10 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
}
pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
if (self.fde_start == self.fde_end) return &[0]Fde{};
if (!self.flags.fde) return &[0]Fde{};
const extras = self.extra(elf_file).?;
const object = self.file(elf_file).?.object;
return object.fdes.items[self.fde_start..self.fde_end];
return object.fdes.items[extras.fde_start..][0..extras.fde_count];
}
pub fn markFdesDead(self: Atom, elf_file: *Elf) void {
@ -419,6 +427,12 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
};
const symbol = elf_file.symbol(symbol_index);
const is_synthetic_symbol = switch (file_ptr) {
.zig_object => false, // TODO: implement this once we support merge sections in ZigObject
.object => |x| rel.r_sym() >= x.symtab.items.len,
else => unreachable,
};
// Check for violation of One Definition Rule for COMDATs.
if (symbol.file(elf_file) == null) {
// TODO convert into an error
@ -431,7 +445,8 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
}
// Report an undefined symbol.
if (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)) continue;
if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)))
continue;
if (symbol.isIFunc(elf_file)) {
symbol.flags.needs_got = true;
@ -601,7 +616,10 @@ fn outputType(elf_file: *Elf) u2 {
return switch (elf_file.base.comp.config.output_mode) {
.Obj => unreachable,
.Lib => 0,
.Exe => if (comp.config.pie) 1 else 2,
.Exe => switch (elf_file.getTarget().os.tag) {
.haiku => 0,
else => if (comp.config.pie) 1 else 2,
},
};
}
@ -740,21 +758,21 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
// Address of the source atom.
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
const S = target.address(.{}, elf_file);
// Address of the global offset table.
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
const GOT = elf_file.gotAddress();
// Address of the .zig.got table entry if any.
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
const ZIG_GOT = target.zigGotAddress(elf_file);
// Relative offset to the start of the global offset table.
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
const G = target.gotAddress(elf_file) - GOT;
// // Address of the thread pointer.
const TP = @as(i64, @intCast(elf_file.tpAddress()));
const TP = elf_file.tpAddress();
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
const DTP = elf_file.dtpAddress();
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
relocation.fmtRelocType(rel.r_type(), cpu_arch),
@ -811,9 +829,9 @@ fn resolveDynAbsReloc(
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
const P = self.address(elf_file) + rel.r_offset;
const P: u64 = @intCast(self.address(elf_file) + @as(i64, @intCast(rel.r_offset)));
const A = rel.r_addend;
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
const S = target.address(.{}, elf_file);
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
const num_dynrelocs = switch (self.file(elf_file).?) {
@ -881,7 +899,7 @@ fn resolveDynAbsReloc(
},
.ifunc => {
const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
const S_ = target.address(.{ .plt = false }, elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.type = relocation.encode(.irel, cpu_arch),
@ -921,6 +939,11 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
else => unreachable,
};
const target = elf_file.symbol(target_index);
const is_synthetic_symbol = switch (file_ptr) {
.zig_object => false, // TODO: implement this once we support merge sections in ZigObject
.object => |x| rel.r_sym() >= x.symtab.items.len,
else => unreachable,
};
// Check for violation of One Definition Rule for COMDATs.
if (target.file(elf_file) == null) {
@ -934,20 +957,21 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
}
// Report an undefined symbol.
if (try self.reportUndefined(elf_file, target, target_index, rel, undefs)) continue;
if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, target, target_index, rel, undefs)))
continue;
// We will use equation format to resolve relocations:
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
//
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
// Addend from the relocation.
const A = rel.r_addend;
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
const S = target.address(.{}, elf_file);
// Address of the global offset table.
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
const GOT = elf_file.gotAddress();
// Address of the dynamic thread pointer.
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
const DTP = elf_file.dtpAddress();
const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP, 0 };
@ -981,6 +1005,35 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
if (has_reloc_errors) return error.RelocFailure;
}
const AddExtraOpts = struct {
thunk: ?u32 = null,
fde_start: ?u32 = null,
fde_count: ?u32 = null,
rel_index: ?u32 = null,
rel_count: ?u32 = null,
};
pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) !void {
if (atom.extra(elf_file) == null) {
atom.extra_index = try elf_file.addAtomExtra(.{});
}
var extras = atom.extra(elf_file).?;
inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extras, field.name) = x;
}
}
atom.setExtra(extras, elf_file);
}
pub inline fn extra(atom: Atom, elf_file: *Elf) ?Extra {
return elf_file.atomExtra(atom.extra_index);
}
pub inline fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
elf_file.setAtomExtra(atom.extra_index, extras);
}
pub fn format(
atom: Atom,
comptime unused_fmt_string: []const u8,
@ -1020,12 +1073,13 @@ fn format2(
atom.atom_index, atom.name(elf_file), atom.address(elf_file),
atom.output_section_index, atom.alignment, atom.size,
});
if (atom.fde_start != atom.fde_end) {
if (atom.flags.fde) {
try writer.writeAll(" : fdes{ ");
for (atom.fdes(elf_file), atom.fde_start..) |fde, i| {
const extras = atom.extra(elf_file).?;
for (atom.fdes(elf_file), extras.fde_start..) |fde, i| {
try writer.print("{d}", .{i});
if (!fde.alive) try writer.writeAll("([*])");
if (i < atom.fde_end - 1) try writer.writeAll(", ");
if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
@ -1042,6 +1096,12 @@ pub const Flags = packed struct {
/// Specifies if the atom has been visited during garbage collection.
visited: bool = false,
/// Whether this atom has a range extension thunk.
thunk: bool = false,
/// Whether this atom has FDE records.
fde: bool = false,
};
const x86_64 = struct {
@ -1232,10 +1292,10 @@ const x86_64 = struct {
.TLSGD => {
if (target.flags.has_tlsgd) {
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
const S_ = target.tlsGdAddress(elf_file);
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
const S_ = target.gotTpAddress(elf_file);
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
} else {
try x86_64.relaxTlsGdToLe(
@ -1251,13 +1311,13 @@ const x86_64 = struct {
.TLSLD => {
if (elf_file.got.tlsld_index) |entry_index| {
const tlsld_entry = elf_file.got.entries.items[entry_index];
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
const S_ = tlsld_entry.address(elf_file);
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
try x86_64.relaxTlsLdToLe(
atom,
&.{ rel, it.next().? },
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
@as(i32, @intCast(TP - elf_file.tlsAddress())),
elf_file,
stream,
);
@ -1266,7 +1326,7 @@ const x86_64 = struct {
.GOTPC32_TLSDESC => {
if (target.flags.has_tlsdesc) {
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
const S_ = target.tlsDescAddress(elf_file);
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch {
@ -1290,7 +1350,7 @@ const x86_64 = struct {
.GOTTPOFF => {
if (target.flags.has_gottp) {
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
const S_ = target.gotTpAddress(elf_file);
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
} else {
x86_64.relaxGotTpOff(code[r_offset - 3 ..]);
@ -1333,9 +1393,18 @@ const x86_64 = struct {
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => try cwriter.writeInt(i64, S + A, .little),
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
try cwriter.writeInt(u64, value, .little)
else
try cwriter.writeInt(i64, S + A, .little),
.DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
try cwriter.writeInt(u64, value, .little)
else
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
.DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
try cwriter.writeInt(u64, value, .little)
else
try cwriter.writeInt(i64, S + A - DTP, .little),
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
.SIZE32 => {
@ -1717,7 +1786,7 @@ const aarch64 = struct {
.object => |x| x.symbols.items[rel.r_sym()],
else => unreachable,
};
const S_: i64 = @intCast(th.targetAddress(target_index, elf_file));
const S_ = th.targetAddress(target_index, elf_file);
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
aarch64_util.writeBranchImm(disp, code);
@ -1735,16 +1804,12 @@ const aarch64 = struct {
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
const saddr = @as(u64, @intCast(P));
const taddr = @as(u64, @intCast(S + A));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
aarch64_util.writeAdrpInst(pages, code);
},
.ADR_GOT_PAGE => if (target.flags.has_got) {
const saddr = @as(u64, @intCast(P));
const taddr = @as(u64, @intCast(G + GOT + A));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
aarch64_util.writeAdrpInst(pages, code);
} else {
// TODO: relax
@ -1799,46 +1864,38 @@ const aarch64 = struct {
},
.TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_: i64 = @intCast(target.gotTpAddress(elf_file));
const saddr: u64 = @intCast(P);
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
},
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_: i64 = @intCast(target.gotTpAddress(elf_file));
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(offset, code);
},
.TLSGD_ADR_PAGE21 => {
const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
const saddr: u64 = @intCast(P);
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
},
.TLSGD_ADD_LO12_NC => {
const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const offset: u12 = @truncate(taddr);
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(offset, code);
},
.TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) {
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
const saddr: u64 = @intCast(P);
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
aarch64_util.writeAdrpInst(pages, code);
} else {
relocs_log.debug(" relaxing adrp => nop", .{});
@ -1848,10 +1905,9 @@ const aarch64 = struct {
.TLSDESC_LD64_LO12 => {
if (target.flags.has_tlsdesc) {
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(offset, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
@ -1861,10 +1917,9 @@ const aarch64 = struct {
.TLSDESC_ADD_LO12 => {
if (target.flags.has_tlsdesc) {
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
const taddr: u64 = @intCast(S_ + A);
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
const offset: u12 = @truncate(taddr);
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(offset, code);
} else {
const old_inst = Instruction{
@ -1909,7 +1964,6 @@ const aarch64 = struct {
) !void {
_ = it;
_ = code;
_ = target;
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
const cwriter = stream.writer();
@ -1919,7 +1973,10 @@ const aarch64 = struct {
switch (r_type) {
.NONE => unreachable,
.ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.ABS64 => try cwriter.writeInt(i64, S + A, .little),
.ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
try cwriter.writeInt(u64, value, .little)
else
try cwriter.writeInt(i64, S + A, .little),
else => try atom.reportUnhandledRelocError(rel, elf_file),
}
}
@ -2044,7 +2101,7 @@ const riscv = struct {
const atom_addr = atom.address(elf_file);
const pos = it.pos;
const pair = while (it.prev()) |pair| {
if (S == atom_addr + pair.r_offset) break pair;
if (S == atom_addr + @as(i64, @intCast(pair.r_offset))) break pair;
} else {
// TODO: implement searching forward
var err = try elf_file.addErrorWithNotes(1);
@ -2062,10 +2119,10 @@ const riscv = struct {
.object => |x| elf_file.symbol(x.symbols.items[pair.r_sym()]),
else => unreachable,
};
const S_ = @as(i64, @intCast(target_.address(.{}, elf_file)));
const S_ = target_.address(.{}, elf_file);
const A_ = pair.r_addend;
const P_ = @as(i64, @intCast(atom_addr + pair.r_offset));
const G_ = @as(i64, @intCast(target_.gotAddress(elf_file))) - GOT;
const P_ = atom_addr + @as(i64, @intCast(pair.r_offset));
const G_ = target_.gotAddress(elf_file) - GOT;
const disp = switch (@as(elf.R_RISCV, @enumFromInt(pair.r_type()))) {
.PCREL_HI20 => math.cast(i32, S_ + A_ - P_) orelse return error.Overflow,
.GOT_HI20 => math.cast(i32, G_ + GOT + A_ - P_) orelse return error.Overflow,
@ -2093,7 +2150,6 @@ const riscv = struct {
code: []u8,
stream: anytype,
) !void {
_ = target;
_ = it;
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
@ -2108,7 +2164,10 @@ const riscv = struct {
.NONE => unreachable,
.@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
.@"64" => try cwriter.writeInt(i64, S + A, .little),
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
try cwriter.writeInt(u64, value, .little)
else
try cwriter.writeInt(i64, S + A, .little),
.ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
.SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
@ -2167,6 +2226,23 @@ const RelocsIterator = struct {
}
};
pub const Extra = struct {
/// Index of the range extension thunk of this atom.
thunk: u32 = 0,
/// Start index of FDEs referencing this atom.
fde_start: u32 = 0,
/// Count of FDEs referencing this atom.
fde_count: u32 = 0,
/// Start index of relocations belonging to this atom.
rel_index: u32 = 0,
/// Count of relocations belonging to this atom.
rel_count: u32 = 0,
};
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;

View file

@ -60,10 +60,10 @@ pub fn updateSymtabSize(self: *LinkerDefined, elf_file: *Elf) !void {
if (file_ptr.index() != self.index) continue;
global.flags.output_symtab = true;
if (global.isLocal(elf_file)) {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
self.output_symtab_ctx.nlocals += 1;
} else {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
self.output_symtab_ctx.nglobals += 1;
}
self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;

View file

@ -15,6 +15,8 @@ comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup.Index) = .{},
comdat_group_data: std.ArrayListUnmanaged(u32) = .{},
relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
merge_sections: std.ArrayListUnmanaged(InputMergeSection.Index) = .{},
fdes: std.ArrayListUnmanaged(Fde) = .{},
cies: std.ArrayListUnmanaged(Cie) = .{},
eh_frame_data: std.ArrayListUnmanaged(u8) = .{},
@ -51,6 +53,7 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
self.fdes.deinit(allocator);
self.cies.deinit(allocator);
self.eh_frame_data.deinit(allocator);
self.merge_sections.deinit(allocator);
}
pub fn parse(self: *Object, elf_file: *Elf) !void {
@ -242,11 +245,12 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
const relocs = try self.preadRelocsAlloc(allocator, handle, @intCast(i));
defer allocator.free(relocs);
atom.relocs_section_index = @intCast(i);
atom.rel_index = @intCast(self.relocs.items.len);
atom.rel_num = @intCast(relocs.len);
const rel_index: u32 = @intCast(self.relocs.items.len);
const rel_count: u32 = @intCast(relocs.len);
try atom.addExtra(.{ .rel_index = rel_index, .rel_count = rel_count }, elf_file);
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[atom.rel_index..][0..atom.rel_num]);
sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
}
}
},
@ -279,8 +283,7 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
const name = blk: {
const name = self.getString(shdr.sh_name);
if (elf_file.base.isRelocatable()) break :blk name;
if (shdr.sh_flags & elf.SHF_MERGE != 0 and shdr.sh_flags & elf.SHF_STRINGS == 0)
break :blk name; // TODO: consider dropping SHF_STRINGS once ICF is implemented
if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
const sh_name_prefixes: []const [:0]const u8 = &.{
".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
@ -334,7 +337,6 @@ fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
const name = self.getString(shdr.sh_name);
const ignore = blk: {
if (mem.startsWith(u8, name, ".note")) break :blk true;
if (mem.startsWith(u8, name, ".comment")) break :blk true;
if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
@ -353,7 +355,7 @@ fn initSymtab(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
const index = try elf_file.addSymbol();
self.symbols.appendAssumeCapacity(index);
const sym_ptr = elf_file.symbol(index);
sym_ptr.value = sym.st_value;
sym_ptr.value = @intCast(sym.st_value);
sym_ptr.name_offset = sym.st_name;
sym_ptr.esym_index = @as(u32, @intCast(i));
sym_ptr.atom_index = if (sym.st_shndx == elf.SHN_ABS) 0 else self.atoms.items[sym.st_shndx];
@ -445,13 +447,14 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
while (i < self.fdes.items.len) {
const fde = self.fdes.items[i];
const atom = fde.atom(elf_file);
atom.fde_start = i;
const start = i;
i += 1;
while (i < self.fdes.items.len) : (i += 1) {
const next_fde = self.fdes.items[i];
if (atom.atom_index != next_fde.atom(elf_file).atom_index) break;
}
atom.fde_end = i;
try atom.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
atom.flags.fde = true;
}
}
@ -545,7 +548,7 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void {
elf.SHN_ABS, elf.SHN_COMMON => 0,
else => self.atoms.items[esym.st_shndx],
};
global.value = esym.st_value;
global.value = @intCast(esym.st_value);
global.atom_index = atom_index;
global.esym_index = esym_index;
global.file_index = self.index;
@ -657,6 +660,178 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO
}
}
pub fn initMergeSections(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
try self.merge_sections.resize(gpa, self.shdrs.items.len);
@memset(self.merge_sections.items, 0);
for (self.shdrs.items, 0..) |shdr, shndx| {
if (shdr.sh_flags & elf.SHF_MERGE == 0) continue;
const atom_index = self.atoms.items[shndx];
const atom_ptr = elf_file.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
if (atom_ptr.relocs(elf_file).len > 0) continue;
const imsec_idx = try elf_file.addInputMergeSection();
const imsec = elf_file.inputMergeSection(imsec_idx).?;
self.merge_sections.items[shndx] = imsec_idx;
imsec.merge_section_index = try elf_file.getOrCreateMergeSection(atom_ptr.name(elf_file), shdr.sh_flags, shdr.sh_type);
imsec.atom_index = atom_index;
const data = try self.codeDecompressAlloc(elf_file, atom_index);
defer gpa.free(data);
if (shdr.sh_flags & elf.SHF_STRINGS != 0) {
const sh_entsize: u32 = switch (shdr.sh_entsize) {
// According to mold's source code, GHC emits MS sections with sh_entsize = 0.
// This actually can also happen for output created with `-r` mode.
0 => 1,
else => |x| @intCast(x),
};
const isNull = struct {
fn isNull(slice: []u8) bool {
for (slice) |x| if (x != 0) return false;
return true;
}
}.isNull;
var start: u32 = 0;
while (start < data.len) {
var end = start;
while (end < data.len - sh_entsize and !isNull(data[end .. end + sh_entsize])) : (end += sh_entsize) {}
if (!isNull(data[end .. end + sh_entsize])) {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "string not null terminated", .{});
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.MalformedObject;
}
end += sh_entsize;
const string = data[start..end];
try imsec.insert(gpa, string);
try imsec.offsets.append(gpa, start);
start = end;
}
} else {
const sh_entsize: u32 = @intCast(shdr.sh_entsize);
if (sh_entsize == 0) continue; // Malformed, don't split but don't error out
if (shdr.sh_size % sh_entsize != 0) {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "size not a multiple of sh_entsize", .{});
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.MalformedObject;
}
var pos: u32 = 0;
while (pos < data.len) : (pos += sh_entsize) {
const string = data.ptr[pos..][0..sh_entsize];
try imsec.insert(gpa, string);
try imsec.offsets.append(gpa, pos);
}
}
atom_ptr.flags.alive = false;
}
}
pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
for (self.merge_sections.items) |index| {
const imsec = elf_file.inputMergeSection(index) orelse continue;
if (imsec.offsets.items.len == 0) continue;
const msec = elf_file.mergeSection(imsec.merge_section_index);
const atom_ptr = elf_file.atom(imsec.atom_index).?;
const isec = atom_ptr.inputShdr(elf_file);
try imsec.subsections.resize(gpa, imsec.strings.items.len);
for (imsec.strings.items, imsec.subsections.items) |str, *imsec_msub| {
const string = imsec.bytes.items[str.pos..][0..str.len];
const res = try msec.insert(gpa, string);
if (!res.found_existing) {
const msub_index = try elf_file.addMergeSubsection();
const msub = elf_file.mergeSubsection(msub_index);
msub.merge_section_index = imsec.merge_section_index;
msub.string_index = res.key.pos;
msub.alignment = atom_ptr.alignment;
msub.size = res.key.len;
msub.entsize = math.cast(u32, isec.sh_entsize) orelse return error.Overflow;
msub.alive = !elf_file.base.gc_sections or isec.sh_flags & elf.SHF_ALLOC == 0;
res.sub.* = msub_index;
}
imsec_msub.* = res.sub.*;
}
imsec.clearAndFree(gpa);
}
for (self.symtab.items, 0..) |*esym, idx| {
const sym_index = self.symbols.items[idx];
const sym = elf_file.symbol(sym_index);
if (esym.st_shndx == elf.SHN_COMMON or esym.st_shndx == elf.SHN_UNDEF or esym.st_shndx == elf.SHN_ABS) continue;
const imsec_index = self.merge_sections.items[esym.st_shndx];
const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
if (imsec.offsets.items.len == 0) continue;
const msub_index, const offset = imsec.findSubsection(@intCast(esym.st_value)) orelse {
var err = try elf_file.addErrorWithNotes(2);
try err.addMsg(elf_file, "invalid symbol value: {x}", .{esym.st_value});
try err.addNote(elf_file, "for symbol {s}", .{sym.name(elf_file)});
try err.addNote(elf_file, "in {}", .{self.fmtPath()});
return error.MalformedObject;
};
try sym.addExtra(.{ .subsection = msub_index }, elf_file);
sym.flags.merge_subsection = true;
sym.value = offset;
}
for (self.atoms.items) |atom_index| {
const atom_ptr = elf_file.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
const extras = atom_ptr.extra(elf_file) orelse continue;
const relocs = self.relocs.items[extras.rel_index..][0..extras.rel_count];
for (relocs) |*rel| {
const esym = self.symtab.items[rel.r_sym()];
if (esym.st_type() != elf.STT_SECTION) continue;
const imsec_index = self.merge_sections.items[esym.st_shndx];
const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
if (imsec.offsets.items.len == 0) continue;
const msub_index, const offset = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "invalid relocation at offset 0x{x}", .{rel.r_offset});
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
return error.MalformedObject;
};
const msub = elf_file.mergeSubsection(msub_index);
const msec = msub.mergeSection(elf_file);
const out_sym_idx: u64 = @intCast(self.symbols.items.len);
try self.symbols.ensureUnusedCapacity(gpa, 1);
const name = try std.fmt.allocPrint(gpa, "{s}$subsection{d}", .{ msec.name(elf_file), msub_index });
defer gpa.free(name);
const sym_index = try elf_file.addSymbol();
const sym = elf_file.symbol(sym_index);
sym.* = .{
.value = @bitCast(@as(i64, @intCast(offset)) - rel.r_addend),
.name_offset = try self.addString(gpa, name),
.esym_index = rel.r_sym(),
.file_index = self.index,
};
try sym.addExtra(.{ .subsection = msub_index }, elf_file);
sym.flags.merge_subsection = true;
self.symbols.addOneAssumeCapacity().* = sym_index;
rel.r_info = (out_sym_idx << 32) | rel.r_type();
}
}
}
/// We will create dummy shdrs per each resolved common symbols to make it
/// play nicely with the rest of the system.
pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
@ -747,6 +922,11 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);
if (local.mergeSubsection(elf_file)) |msub| {
if (!msub.alive) continue;
local.output_section_index = msub.mergeSection(elf_file).output_section_index;
continue;
}
const atom = local.atom(elf_file) orelse continue;
if (!atom.flags.alive) continue;
local.output_section_index = atom.output_section_index;
@ -754,11 +934,23 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
for (self.globals()) |global_index| {
const global = elf_file.symbol(global_index);
if (global.file(elf_file).?.index() != self.index) continue;
if (global.mergeSubsection(elf_file)) |msub| {
if (!msub.alive) continue;
global.output_section_index = msub.mergeSection(elf_file).output_section_index;
continue;
}
const atom = global.atom(elf_file) orelse continue;
if (!atom.flags.alive) continue;
if (global.file(elf_file).?.index() != self.index) continue;
global.output_section_index = atom.output_section_index;
}
for (self.symbols.items[self.symtab.items.len..]) |local_index| {
const local = elf_file.symbol(local_index);
const msub = local.mergeSubsection(elf_file).?;
if (!msub.alive) continue;
local.output_section_index = msub.mergeSection(elf_file).output_section_index;
}
}
pub fn initRelaSections(self: Object, elf_file: *Elf) !void {
@ -817,13 +1009,15 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf
}
pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
const handle = elf_file.fileHandle(self.file_handle);
const size = (try handle.stat()).size;
self.output_ar_state.size = size;
self.output_ar_state.size = if (self.archive) |ar| ar.size else size: {
const handle = elf_file.fileHandle(self.file_handle);
break :size (try handle.stat()).size;
};
}
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
const name = self.path;
const hdr = Archive.setArHdr(.{
.name = if (name.len <= Archive.max_member_name_len)
@ -837,15 +1031,23 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
const gpa = elf_file.base.comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
const amt = try handle.preadAll(data, 0);
const amt = try handle.preadAll(data, offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
const isAlive = struct {
fn isAlive(sym: *const Symbol, ctx: *Elf) bool {
if (sym.mergeSubsection(ctx)) |msub| return msub.alive;
if (sym.atom(ctx)) |atom_ptr| return atom_ptr.flags.alive;
return true;
}
}.isAlive;
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);
if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
if (!isAlive(local, elf_file)) continue;
const esym = local.elfSym(elf_file);
switch (esym.st_type()) {
elf.STT_SECTION => continue,
@ -853,7 +1055,7 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
else => {},
}
local.flags.output_symtab = true;
try local.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
try local.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
self.output_symtab_ctx.nlocals += 1;
self.output_symtab_ctx.strsize += @as(u32, @intCast(local.name(elf_file).len)) + 1;
}
@ -862,13 +1064,13 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
const global = elf_file.symbol(global_index);
const file_ptr = global.file(elf_file) orelse continue;
if (file_ptr.index() != self.index) continue;
if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
if (!isAlive(global, elf_file)) continue;
global.flags.output_symtab = true;
if (global.isLocal(elf_file)) {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
self.output_symtab_ctx.nlocals += 1;
} else {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
self.output_symtab_ctx.nglobals += 1;
}
self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
@ -902,14 +1104,16 @@ pub fn writeSymtab(self: Object, elf_file: *Elf) void {
pub fn locals(self: Object) []const Symbol.Index {
if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
const end = self.first_global orelse self.symbols.items.len;
assert(self.symbols.items.len >= self.symtab.items.len);
const end = self.first_global orelse self.symtab.items.len;
return self.symbols.items[0..end];
}
pub fn globals(self: Object) []const Symbol.Index {
if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
const start = self.first_global orelse self.symbols.items.len;
return self.symbols.items[start..];
assert(self.symbols.items.len >= self.symtab.items.len);
const start = self.first_global orelse self.symtab.items.len;
return self.symbols.items[start..self.symtab.items.len];
}
/// Returns atom's code and optionally uncompresses data if required (for compressed sections).
@ -954,6 +1158,14 @@ pub fn getString(self: Object, off: u32) [:0]const u8 {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
}
fn addString(self: *Object, allocator: Allocator, str: []const u8) !u32 {
const off: u32 = @intCast(self.strtab.items.len);
try self.strtab.ensureUnusedCapacity(allocator, str.len + 1);
self.strtab.appendSliceAssumeCapacity(str);
self.strtab.appendAssumeCapacity(0);
return off;
}
/// Caller owns the memory.
fn preadShdrContentsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
@ -1139,6 +1351,7 @@ fn formatPath(
const InArchive = struct {
path: []const u8,
offset: u64,
size: u32,
};
const Object = @This();
@ -1159,5 +1372,6 @@ const Cie = eh_frame.Cie;
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;
const File = @import("file.zig").File;
const InputMergeSection = @import("merge_section.zig").InputMergeSection;
const Symbol = @import("Symbol.zig");
const Alignment = Atom.Alignment;

View file

@ -231,7 +231,7 @@ pub fn resolveSymbols(self: *SharedObject, elf_file: *Elf) void {
const global = elf_file.symbol(index);
if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
global.value = this_sym.st_value;
global.value = @intCast(this_sym.st_value);
global.atom_index = 0;
global.esym_index = esym_index;
global.version_index = self.versyms.items[esym_index];
@ -269,7 +269,7 @@ pub fn updateSymtabSize(self: *SharedObject, elf_file: *Elf) !void {
if (file_ptr.index() != self.index) continue;
if (global.isLocal(elf_file)) continue;
global.flags.output_symtab = true;
try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
self.output_symtab_ctx.nglobals += 1;
self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
}

View file

@ -1,7 +1,7 @@
//! Represents a defined symbol.
/// Allocated address value of this symbol.
value: u64 = 0,
value: i64 = 0,
/// Offset into the linker's string table.
name_offset: u32 = 0,
@ -14,7 +14,7 @@ file_index: File.Index = 0,
/// Use `atom` to get the pointer to the atom.
atom_index: Atom.Index = 0,
/// Assigned output section index for this atom.
/// Assigned output section index for this symbol.
output_section_index: u32 = 0,
/// Index of the source symbol this symbol references.
@ -33,7 +33,8 @@ extra_index: u32 = 0,
pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
const file_ptr = symbol.file(elf_file).?;
if (file_ptr == .shared_object) return symbol.elfSym(elf_file).st_shndx == elf.SHN_ABS;
return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.outputShndx() == null and
return !symbol.flags.import and symbol.atom(elf_file) == null and
symbol.mergeSubsection(elf_file) == null and symbol.outputShndx() == null and
file_ptr != .linker_defined;
}
@ -70,6 +71,12 @@ pub fn atom(symbol: Symbol, elf_file: *Elf) ?*Atom {
return elf_file.atom(symbol.atom_index);
}
pub fn mergeSubsection(symbol: Symbol, elf_file: *Elf) ?*MergeSubsection {
if (!symbol.flags.merge_subsection) return null;
const extras = symbol.extra(elf_file).?;
return elf_file.mergeSubsection(extras.subsection);
}
pub fn file(symbol: Symbol, elf_file: *Elf) ?File {
return elf_file.file(symbol.file_index);
}
@ -92,7 +99,11 @@ pub fn symbolRank(symbol: Symbol, elf_file: *Elf) u32 {
return file_ptr.symbolRank(sym, in_archive);
}
pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) u64 {
pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) i64 {
if (symbol.mergeSubsection(elf_file)) |msub| {
if (!msub.alive) return 0;
return msub.address(elf_file) + symbol.value;
}
if (symbol.flags.has_copy_rel) {
return symbol.copyRelAddress(elf_file);
}
@ -108,19 +119,23 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf
if (!atom_ptr.flags.alive) {
if (mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame")) {
const sym_name = symbol.name(elf_file);
const sh_addr, const sh_size = blk: {
const shndx = elf_file.eh_frame_section_index orelse break :blk .{ 0, 0 };
const shdr = elf_file.shdrs.items[shndx];
break :blk .{ shdr.sh_addr, shdr.sh_size };
};
if (mem.startsWith(u8, sym_name, "__EH_FRAME_BEGIN__") or
mem.startsWith(u8, sym_name, "__EH_FRAME_LIST__") or
mem.startsWith(u8, sym_name, ".eh_frame_seg") or
symbol.elfSym(elf_file).st_type() == elf.STT_SECTION)
{
return elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr;
return @intCast(sh_addr);
}
if (mem.startsWith(u8, sym_name, "__FRAME_END__") or
mem.startsWith(u8, sym_name, "__EH_FRAME_LIST_END__"))
{
const shdr = elf_file.shdrs.items[elf_file.eh_frame_section_index.?];
return shdr.sh_addr + shdr.sh_size;
return @intCast(sh_addr + sh_size);
}
// TODO I think we potentially should error here
@ -143,65 +158,57 @@ pub fn outputSymtabIndex(symbol: Symbol, elf_file: *Elf) ?u32 {
return if (symbol.isLocal(elf_file)) idx + symtab_ctx.ilocal else idx + symtab_ctx.iglobal;
}
pub fn setOutputSymtabIndex(symbol: *Symbol, index: u32, elf_file: *Elf) !void {
if (symbol.extra(elf_file)) |extras| {
var new_extras = extras;
new_extras.symtab = index;
symbol.setExtra(new_extras, elf_file);
} else try symbol.addExtra(.{ .symtab = index }, elf_file);
}
pub fn gotAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_got) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.got];
return entry.address(elf_file);
}
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.plt_got_section_index.?];
const cpu_arch = elf_file.getTarget().cpu.arch;
return shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch);
return @intCast(shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch));
}
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
const cpu_arch = elf_file.getTarget().cpu.arch;
return shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch);
return @intCast(shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch));
}
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.got_plt_section_index.?];
return shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size;
return @intCast(shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size);
}
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_copy_rel) return 0;
const shdr = elf_file.shdrs.items[elf_file.copy_rel_section_index.?];
return shdr.sh_addr + symbol.value;
return @as(i64, @intCast(shdr.sh_addr)) + symbol.value;
}
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_tlsgd) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsgd];
return entry.address(elf_file);
}
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_gottp) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.gottp];
return entry.address(elf_file);
}
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_tlsdesc) return 0;
const extras = symbol.extra(elf_file).?;
const entry = elf_file.got.entries.items[extras.tlsdesc];
@ -221,7 +228,7 @@ pub fn getOrCreateZigGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *E
return .{ .found_existing = false, .index = index };
}
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
if (!symbol.flags.has_zig_got) return 0;
const extras = symbol.extra(elf_file).?;
return elf_file.zig_got.entryAddress(extras.zig_got, elf_file);
@ -240,8 +247,31 @@ pub fn dsoAlignment(symbol: Symbol, elf_file: *Elf) !u64 {
@min(alignment, try std.math.powi(u64, 2, @ctz(esym.st_value)));
}
pub fn addExtra(symbol: *Symbol, extras: Extra, elf_file: *Elf) !void {
symbol.extra_index = try elf_file.addSymbolExtra(extras);
const AddExtraOpts = struct {
got: ?u32 = null,
plt: ?u32 = null,
plt_got: ?u32 = null,
dynamic: ?u32 = null,
symtab: ?u32 = null,
copy_rel: ?u32 = null,
tlsgd: ?u32 = null,
gottp: ?u32 = null,
tlsdesc: ?u32 = null,
zig_got: ?u32 = null,
subsection: ?u32 = null,
};
pub fn addExtra(symbol: *Symbol, opts: AddExtraOpts, elf_file: *Elf) !void {
if (symbol.extra(elf_file) == null) {
symbol.extra_index = try elf_file.addSymbolExtra(.{});
}
var extras = symbol.extra(elf_file).?;
inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extras, field.name) = x;
}
}
symbol.setExtra(extras, elf_file);
}
pub fn extra(symbol: Symbol, elf_file: *Elf) ?Extra {
@ -266,6 +296,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
if (symbol.flags.has_copy_rel) break :blk @intCast(elf_file.copy_rel_section_index.?);
if (file_ptr == .shared_object or esym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
if (elf_file.base.isRelocatable() and esym.st_shndx == elf.SHN_COMMON) break :blk elf.SHN_COMMON;
if (symbol.mergeSubsection(elf_file)) |msub| break :blk @intCast(msub.mergeSection(elf_file).output_section_index);
if (symbol.atom(elf_file) == null and file_ptr != .linker_defined) break :blk elf.SHN_ABS;
break :blk @intCast(symbol.outputShndx() orelse elf.SHN_UNDEF);
};
@ -284,7 +315,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
out.st_info = (st_bind << 4) | st_type;
out.st_other = esym.st_other;
out.st_shndx = st_shndx;
out.st_value = st_value;
out.st_value = @intCast(st_value);
out.st_size = esym.st_size;
}
@ -436,6 +467,9 @@ pub const Flags = packed struct {
/// TODO this is really not needed if only we operated on esyms between
/// codegen and ZigObject.
is_tls: bool = false,
/// Whether the symbol is a merge subsection.
merge_subsection: bool = false,
};
pub const Extra = struct {
@ -449,6 +483,7 @@ pub const Extra = struct {
gottp: u32 = 0,
tlsdesc: u32 = 0,
zig_got: u32 = 0,
subsection: u32 = 0,
};
pub const Index = u32;
@ -465,6 +500,7 @@ const File = @import("file.zig").File;
const GotSection = synthetic_sections.GotSection;
const GotPltSection = synthetic_sections.GotPltSection;
const LinkerDefined = @import("LinkerDefined.zig");
const MergeSubsection = @import("merge_section.zig").MergeSubsection;
const Object = @import("Object.zig");
const PltSection = synthetic_sections.PltSection;
const PltGotSection = synthetic_sections.PltGotSection;

View file

@ -343,7 +343,7 @@ pub fn resolveSymbols(self: *ZigObject, elf_file: *Elf) void {
atom.outputShndx().?
else
elf.SHN_UNDEF;
global.value = esym.st_value;
global.value = @intCast(esym.st_value);
global.atom_index = atom_index;
global.esym_index = esym_index;
global.file_index = self.index;
@ -566,7 +566,7 @@ pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
else => {},
}
local.flags.output_symtab = true;
try local.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
try local.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
self.output_symtab_ctx.nlocals += 1;
self.output_symtab_ctx.strsize += @as(u32, @intCast(local.name(elf_file).len)) + 1;
}
@ -578,10 +578,10 @@ pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
global.flags.output_symtab = true;
if (global.isLocal(elf_file)) {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
self.output_symtab_ctx.nlocals += 1;
} else {
try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
self.output_symtab_ctx.nglobals += 1;
}
self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
@ -631,7 +631,7 @@ pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8
return code;
}
const file_offset = shdr.sh_offset + atom.value;
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom.value));
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
@ -659,7 +659,7 @@ pub fn getDeclVAddr(
.r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
return @intCast(vaddr);
}
pub fn getAnonDeclVAddr(
@ -678,7 +678,7 @@ pub fn getAnonDeclVAddr(
.r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
return @intCast(vaddr);
}
pub fn lowerAnonDecl(
@ -929,7 +929,7 @@ fn updateDeclCode(
if (old_size > 0 and elf_file.base.child_pid == null) {
const capacity = atom_ptr.capacity(elf_file);
const need_realloc = code.len > capacity or !required_alignment.check(atom_ptr.value);
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
if (need_realloc) {
try atom_ptr.grow(elf_file);
log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom_ptr.value });
@ -984,7 +984,7 @@ fn updateDeclCode(
const shdr = elf_file.shdrs.items[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = shdr.sh_offset + atom_ptr.value;
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
}
}
@ -1107,7 +1107,7 @@ pub fn updateFunc(
try self.dwarf.?.commitDeclState(
mod,
decl_index,
sym.address(.{}, elf_file),
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ds,
);
@ -1186,7 +1186,7 @@ pub fn updateDecl(
try self.dwarf.?.commitDeclState(
mod,
decl_index,
sym.address(.{}, elf_file),
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ds,
);
@ -1275,7 +1275,7 @@ fn updateLazySymbol(
}
const shdr = elf_file.shdrs.items[output_section_index];
const file_offset = shdr.sh_offset + atom_ptr.value;
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
}
@ -1373,7 +1373,7 @@ fn lowerConst(
local_esym.st_value = 0;
const shdr = elf_file.shdrs.items[output_section_index];
const file_offset = shdr.sh_offset + atom_ptr.value;
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
return .{ .ok = sym_index };
@ -1457,7 +1457,7 @@ pub fn updateExports(
const actual_esym_index = global_esym_index & symbol_mask;
const global_esym = &self.global_esyms.items(.elf_sym)[actual_esym_index];
global_esym.st_value = elf_file.symbol(sym_index).value;
global_esym.st_value = @intCast(elf_file.symbol(sym_index).value);
global_esym.st_shndx = esym.st_shndx;
global_esym.st_info = (stb_bits << 4) | stt_bits;
global_esym.st_name = name_off;

View file

@ -68,6 +68,10 @@ fn collectRoots(roots: *std.ArrayList(*Atom), files: []const File.Index, elf_fil
}
fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
if (sym.mergeSubsection(elf_file)) |msub| {
msub.alive = true;
return;
}
const atom = sym.atom(elf_file) orelse return;
if (markAtom(atom)) try roots.append(atom);
}
@ -96,6 +100,10 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
for (atom.relocs(elf_file)) |rel| {
const target_sym = elf_file.symbol(file.symbol(rel.r_sym()));
if (target_sym.mergeSubsection(elf_file)) |msub| {
msub.alive = true;
continue;
}
const target_atom = target_sym.atom(elf_file) orelse continue;
target_atom.flags.alive = true;
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });

View file

@ -0,0 +1,285 @@
pub const MergeSection = struct {
name_offset: u32 = 0,
type: u32 = 0,
flags: u64 = 0,
output_section_index: u32 = 0,
bytes: std.ArrayListUnmanaged(u8) = .{},
table: std.HashMapUnmanaged(
String,
MergeSubsection.Index,
IndexContext,
std.hash_map.default_max_load_percentage,
) = .{},
subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{},
pub fn deinit(msec: *MergeSection, allocator: Allocator) void {
msec.bytes.deinit(allocator);
msec.table.deinit(allocator);
msec.subsections.deinit(allocator);
}
pub fn name(msec: MergeSection, elf_file: *Elf) [:0]const u8 {
return elf_file.strings.getAssumeExists(msec.name_offset);
}
pub fn address(msec: MergeSection, elf_file: *Elf) i64 {
const shdr = elf_file.shdrs.items[msec.output_section_index];
return @intCast(shdr.sh_addr);
}
const InsertResult = struct {
found_existing: bool,
key: String,
sub: *MergeSubsection.Index,
};
pub fn insert(msec: *MergeSection, allocator: Allocator, string: []const u8) !InsertResult {
const gop = try msec.table.getOrPutContextAdapted(
allocator,
string,
IndexAdapter{ .bytes = msec.bytes.items },
IndexContext{ .bytes = msec.bytes.items },
);
if (!gop.found_existing) {
const index: u32 = @intCast(msec.bytes.items.len);
try msec.bytes.appendSlice(allocator, string);
gop.key_ptr.* = .{ .pos = index, .len = @intCast(string.len) };
}
return .{ .found_existing = gop.found_existing, .key = gop.key_ptr.*, .sub = gop.value_ptr };
}
pub fn insertZ(msec: *MergeSection, allocator: Allocator, string: []const u8) !InsertResult {
const with_null = try allocator.alloc(u8, string.len + 1);
defer allocator.free(with_null);
@memcpy(with_null[0..string.len], string);
with_null[string.len] = 0;
return msec.insert(allocator, with_null);
}
/// Finalizes the merge section and clears hash table.
/// Sorts all owned subsections.
pub fn finalize(msec: *MergeSection, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
try msec.subsections.ensureTotalCapacityPrecise(gpa, msec.table.count());
var it = msec.table.iterator();
while (it.next()) |entry| {
const msub = elf_file.mergeSubsection(entry.value_ptr.*);
if (!msub.alive) continue;
msec.subsections.appendAssumeCapacity(entry.value_ptr.*);
}
msec.table.clearAndFree(gpa);
const sortFn = struct {
pub fn sortFn(ctx: *Elf, lhs: MergeSubsection.Index, rhs: MergeSubsection.Index) bool {
const lhs_msub = ctx.mergeSubsection(lhs);
const rhs_msub = ctx.mergeSubsection(rhs);
if (lhs_msub.alignment.compareStrict(.eq, rhs_msub.alignment)) {
if (lhs_msub.size == rhs_msub.size) {
return mem.order(u8, lhs_msub.getString(ctx), rhs_msub.getString(ctx)) == .lt;
}
return lhs_msub.size < rhs_msub.size;
}
return lhs_msub.alignment.compareStrict(.lt, rhs_msub.alignment);
}
}.sortFn;
std.mem.sort(MergeSubsection.Index, msec.subsections.items, elf_file, sortFn);
}
pub const IndexContext = struct {
bytes: []const u8,
pub fn eql(_: @This(), a: String, b: String) bool {
return a.pos == b.pos;
}
pub fn hash(ctx: @This(), key: String) u64 {
const str = ctx.bytes[key.pos..][0..key.len];
return std.hash_map.hashString(str);
}
};
pub const IndexAdapter = struct {
bytes: []const u8,
pub fn eql(ctx: @This(), a: []const u8, b: String) bool {
const str = ctx.bytes[b.pos..][0..b.len];
return mem.eql(u8, a, str);
}
pub fn hash(_: @This(), adapted_key: []const u8) u64 {
return std.hash_map.hashString(adapted_key);
}
};
pub fn format(
msec: MergeSection,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = msec;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format MergeSection directly");
}
pub fn fmt(msec: MergeSection, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{
.msec = msec,
.elf_file = elf_file,
} };
}
const FormatContext = struct {
msec: MergeSection,
elf_file: *Elf,
};
pub fn format2(
ctx: FormatContext,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
_ = unused_fmt_string;
const msec = ctx.msec;
const elf_file = ctx.elf_file;
try writer.print("{s} : @{x} : type({x}) : flags({x})\n", .{
msec.name(elf_file),
msec.address(elf_file),
msec.type,
msec.flags,
});
for (msec.subsections.items) |index| {
try writer.print(" {}\n", .{elf_file.mergeSubsection(index).fmt(elf_file)});
}
}
pub const Index = u32;
};
pub const MergeSubsection = struct {
value: i64 = 0,
merge_section_index: MergeSection.Index = 0,
string_index: u32 = 0,
size: u32 = 0,
alignment: Atom.Alignment = .@"1",
entsize: u32 = 0,
alive: bool = false,
pub fn address(msub: MergeSubsection, elf_file: *Elf) i64 {
return msub.mergeSection(elf_file).address(elf_file) + msub.value;
}
pub fn mergeSection(msub: MergeSubsection, elf_file: *Elf) *MergeSection {
return elf_file.mergeSection(msub.merge_section_index);
}
pub fn getString(msub: MergeSubsection, elf_file: *Elf) []const u8 {
const msec = msub.mergeSection(elf_file);
return msec.bytes.items[msub.string_index..][0..msub.size];
}
pub fn format(
msub: MergeSubsection,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = msub;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format MergeSubsection directly");
}
pub fn fmt(msub: MergeSubsection, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{
.msub = msub,
.elf_file = elf_file,
} };
}
const FormatContext = struct {
msub: MergeSubsection,
elf_file: *Elf,
};
pub fn format2(
ctx: FormatContext,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
_ = unused_fmt_string;
const msub = ctx.msub;
const elf_file = ctx.elf_file;
try writer.print("@{x} : align({x}) : size({x})", .{
msub.address(elf_file),
msub.alignment,
msub.size,
});
if (!msub.alive) try writer.writeAll(" : [*]");
}
pub const Index = u32;
};
pub const InputMergeSection = struct {
merge_section_index: MergeSection.Index = 0,
atom_index: Atom.Index = 0,
offsets: std.ArrayListUnmanaged(u32) = .{},
subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{},
bytes: std.ArrayListUnmanaged(u8) = .{},
strings: std.ArrayListUnmanaged(String) = .{},
pub fn deinit(imsec: *InputMergeSection, allocator: Allocator) void {
imsec.offsets.deinit(allocator);
imsec.subsections.deinit(allocator);
imsec.bytes.deinit(allocator);
imsec.strings.deinit(allocator);
}
pub fn clearAndFree(imsec: *InputMergeSection, allocator: Allocator) void {
imsec.bytes.clearAndFree(allocator);
// TODO: imsec.strings.clearAndFree(allocator);
}
pub fn findSubsection(imsec: InputMergeSection, offset: u32) ?struct { MergeSubsection.Index, u32 } {
// TODO: binary search
for (imsec.offsets.items, 0..) |off, index| {
if (offset < off) return .{
imsec.subsections.items[index - 1],
offset - imsec.offsets.items[index - 1],
};
}
const last = imsec.offsets.items.len - 1;
const last_off = imsec.offsets.items[last];
const last_len = imsec.strings.items[last].len;
if (offset < last_off + last_len) return .{ imsec.subsections.items[last], offset - last_off };
return null;
}
pub fn insert(imsec: *InputMergeSection, allocator: Allocator, string: []const u8) !void {
const index: u32 = @intCast(imsec.bytes.items.len);
try imsec.bytes.appendSlice(allocator, string);
try imsec.strings.append(allocator, .{ .pos = index, .len = @intCast(string.len) });
}
pub const Index = u32;
};
const String = struct { pos: u32, len: u32 };
const assert = std.debug.assert;
const mem = std.mem;
const std = @import("std");
const Allocator = mem.Allocator;
const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");

View file

@ -34,12 +34,16 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
// First, we flush relocatable object file generated with our backends.
if (elf_file.zigObjectPtr()) |zig_object| {
zig_object.resolveSymbols(elf_file);
try elf_file.addCommentString();
try elf_file.finalizeMergeSections();
zig_object.claimUnresolvedObject(elf_file);
try elf_file.initMergeSections();
try elf_file.initSymtab();
try elf_file.initShStrtab();
try elf_file.sortShdrs();
try zig_object.addAtomsToRelaSections(elf_file);
try elf_file.updateMergeSectionSizes();
try updateSectionSizes(elf_file);
try allocateAllocSections(elf_file);
@ -49,6 +53,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
state_log.debug("{}", .{elf_file.dumpState()});
}
try elf_file.writeMergeSections();
try writeSyntheticSections(elf_file);
try elf_file.writeShdrTable();
try elf_file.writeElfHeader();
@ -179,9 +184,13 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
// input Object files.
elf_file.resolveSymbols();
elf_file.markEhFrameAtomsDead();
try elf_file.resolveMergeSections();
try elf_file.addCommentString();
try elf_file.finalizeMergeSections();
claimUnresolved(elf_file);
try initSections(elf_file);
try elf_file.initMergeSections();
try elf_file.sortShdrs();
if (elf_file.zigObjectPtr()) |zig_object| {
try zig_object.addAtomsToRelaSections(elf_file);
@ -191,6 +200,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
try object.addAtomsToOutputSections(elf_file);
try object.addAtomsToRelaSections(elf_file);
}
try elf_file.updateMergeSectionSizes();
try updateSectionSizes(elf_file);
try allocateAllocSections(elf_file);
@ -201,6 +211,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
}
try writeAtoms(elf_file);
try elf_file.writeMergeSections();
try writeSyntheticSections(elf_file);
try elf_file.writeShdrTable();
try elf_file.writeElfHeader();
@ -328,7 +339,7 @@ fn updateSectionSizes(elf_file: *Elf) !void {
if (!atom_ptr.flags.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
atom_ptr.value = @intCast(offset);
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
@ -434,7 +445,7 @@ fn writeAtoms(elf_file: *Elf) !void {
const atom_ptr = elf_file.atom(atom_index).?;
assert(atom_ptr.flags.alive);
const offset = math.cast(usize, atom_ptr.value - shdr.sh_addr - base_offset) orelse
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;

View file

@ -259,11 +259,7 @@ pub const ZigGotSection = struct {
if (elf_file.isEffectivelyDynLib() or (elf_file.base.isExe() and comp.config.pie)) {
zig_got.flags.needs_rela = true;
}
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.zig_got = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .zig_got = index }, elf_file);
try symbol.addExtra(.{ .zig_got = index }, elf_file);
return index;
}
@ -274,11 +270,11 @@ pub const ZigGotSection = struct {
return shdr.sh_offset + @as(u64, entry_size) * index;
}
pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) u64 {
pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) i64 {
_ = zig_got;
const entry_size = elf_file.archPtrWidthBytes();
const shdr = elf_file.shdrs.items[elf_file.zig_got_section_index.?];
return shdr.sh_addr + @as(u64, entry_size) * index;
return @as(i64, @intCast(shdr.sh_addr)) + entry_size * index;
}
pub fn size(zig_got: ZigGotSection, elf_file: *Elf) usize {
@ -295,23 +291,23 @@ pub const ZigGotSection = struct {
const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
const off = zig_got.entryOffset(index, elf_file);
const vaddr = zig_got.entryAddress(index, elf_file);
const vaddr: u64 = @intCast(zig_got.entryAddress(index, elf_file));
const entry = zig_got.entries.items[index];
const value = elf_file.symbol(entry).address(.{}, elf_file);
switch (entry_size) {
2 => {
var buf: [2]u8 = undefined;
std.mem.writeInt(u16, &buf, @as(u16, @intCast(value)), endian);
std.mem.writeInt(u16, &buf, @intCast(value), endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
},
4 => {
var buf: [4]u8 = undefined;
std.mem.writeInt(u32, &buf, @as(u32, @intCast(value)), endian);
std.mem.writeInt(u32, &buf, @intCast(value), endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
},
8 => {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, value, endian);
std.mem.writeInt(u64, &buf, @intCast(value), endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
if (elf_file.base.child_pid) |pid| {
@ -360,9 +356,9 @@ pub const ZigGotSection = struct {
const symbol = elf_file.symbol(entry);
const offset = symbol.zigGotAddress(elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.offset = @intCast(offset),
.type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.address(.{ .plt = false }, elf_file)),
.addend = symbol.address(.{ .plt = false }, elf_file),
});
}
}
@ -390,7 +386,7 @@ pub const ZigGotSection = struct {
.st_info = elf.STT_OBJECT,
.st_other = 0,
.st_shndx = @intCast(elf_file.zig_got_section_index.?),
.st_value = st_value,
.st_value = @intCast(st_value),
.st_size = st_size,
};
}
@ -461,10 +457,10 @@ pub const GotSection = struct {
};
}
pub fn address(entry: Entry, elf_file: *Elf) u64 {
const ptr_bytes = @as(u64, elf_file.archPtrWidthBytes());
pub fn address(entry: Entry, elf_file: *Elf) i64 {
const ptr_bytes = elf_file.archPtrWidthBytes();
const shdr = &elf_file.shdrs.items[elf_file.got_section_index.?];
return shdr.sh_addr + @as(u64, entry.cell_index) * ptr_bytes;
return @as(i64, @intCast(shdr.sh_addr)) + entry.cell_index * ptr_bytes;
}
};
@ -499,11 +495,7 @@ pub const GotSection = struct {
{
got.flags.needs_rela = true;
}
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.got = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .got = index }, elf_file);
try symbol.addExtra(.{ .got = index }, elf_file);
return index;
}
@ -529,11 +521,7 @@ pub const GotSection = struct {
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_tlsgd = true;
if (symbol.flags.import or elf_file.isEffectivelyDynLib()) got.flags.needs_rela = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.tlsgd = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .tlsgd = index }, elf_file);
try symbol.addExtra(.{ .tlsgd = index }, elf_file);
}
pub fn addGotTpSymbol(got: *GotSection, sym_index: Symbol.Index, elf_file: *Elf) !void {
@ -546,11 +534,7 @@ pub const GotSection = struct {
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_gottp = true;
if (symbol.flags.import or elf_file.isEffectivelyDynLib()) got.flags.needs_rela = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.gottp = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .gottp = index }, elf_file);
try symbol.addExtra(.{ .gottp = index }, elf_file);
}
pub fn addTlsDescSymbol(got: *GotSection, sym_index: Symbol.Index, elf_file: *Elf) !void {
@ -563,11 +547,7 @@ pub const GotSection = struct {
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_tlsdesc = true;
got.flags.needs_rela = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.tlsdesc = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .tlsdesc = index }, elf_file);
try symbol.addExtra(.{ .tlsdesc = index }, elf_file);
}
pub fn size(got: GotSection, elf_file: *Elf) usize {
@ -628,8 +608,7 @@ pub const GotSection = struct {
0;
try writeInt(offset, elf_file, writer);
} else {
const offset = @as(i64, @intCast(symbol.?.address(.{}, elf_file))) -
@as(i64, @intCast(elf_file.tpAddress()));
const offset = symbol.?.address(.{}, elf_file) - elf_file.tpAddress();
try writeInt(offset, elf_file, writer);
}
},
@ -640,7 +619,7 @@ pub const GotSection = struct {
} else {
try writeInt(0, elf_file, writer);
const offset = if (apply_relocs)
@as(i64, @intCast(symbol.?.address(.{}, elf_file))) - @as(i64, @intCast(elf_file.tlsAddress()))
symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()
else
0;
try writeInt(offset, elf_file, writer);
@ -666,7 +645,7 @@ pub const GotSection = struct {
switch (entry.tag) {
.got => {
const offset = symbol.?.gotAddress(elf_file);
const offset: u64 = @intCast(symbol.?.gotAddress(elf_file));
if (symbol.?.flags.import) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
@ -679,7 +658,7 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = relocation.encode(.irel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
.addend = symbol.?.address(.{ .plt = false }, elf_file),
});
continue;
}
@ -689,14 +668,14 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
.addend = symbol.?.address(.{ .plt = false }, elf_file),
});
}
},
.tlsld => {
if (is_dyn_lib) {
const offset = entry.address(elf_file);
const offset: u64 = @intCast(entry.address(elf_file));
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = relocation.encode(.dtpmod, cpu_arch),
@ -705,7 +684,7 @@ pub const GotSection = struct {
},
.tlsgd => {
const offset = symbol.?.tlsGdAddress(elf_file);
const offset: u64 = @intCast(symbol.?.tlsGdAddress(elf_file));
if (symbol.?.flags.import) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
@ -727,7 +706,7 @@ pub const GotSection = struct {
},
.gottp => {
const offset = symbol.?.gotTpAddress(elf_file);
const offset: u64 = @intCast(symbol.?.gotTpAddress(elf_file));
if (symbol.?.flags.import) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
@ -738,18 +717,18 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = relocation.encode(.tpoff, cpu_arch),
.addend = @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
.addend = symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
});
}
},
.tlsdesc => {
const offset = symbol.?.tlsDescAddress(elf_file);
const offset: u64 = @intCast(symbol.?.tlsDescAddress(elf_file));
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = if (symbol.?.flags.import) extra.?.dynamic else 0,
.type = relocation.encode(.tlsdesc, cpu_arch),
.addend = if (symbol.?.flags.import) 0 else @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
.addend = if (symbol.?.flags.import) 0 else symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
});
},
}
@ -826,7 +805,7 @@ pub const GotSection = struct {
.st_info = elf.STT_OBJECT,
.st_other = 0,
.st_shndx = @intCast(elf_file.got_section_index.?),
.st_value = st_value,
.st_value = @intCast(st_value),
.st_size = st_size,
};
}
@ -877,11 +856,7 @@ pub const PltSection = struct {
const index = @as(u32, @intCast(plt.symbols.items.len));
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_plt = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.plt = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .plt = index }, elf_file);
try symbol.addExtra(.{ .plt = index }, elf_file);
try plt.symbols.append(gpa, sym_index);
}
@ -924,7 +899,7 @@ pub const PltSection = struct {
const sym = elf_file.symbol(sym_index);
assert(sym.flags.import);
const extra = sym.extra(elf_file).?;
const r_offset = sym.gotPltAddress(elf_file);
const r_offset: u64 = @intCast(sym.gotPltAddress(elf_file));
const r_sym: u64 = extra.dynamic;
const r_type = relocation.encode(.jump_slot, cpu_arch);
elf_file.rela_plt.appendAssumeCapacity(.{
@ -960,7 +935,7 @@ pub const PltSection = struct {
.st_info = elf.STT_FUNC,
.st_other = 0,
.st_shndx = @intCast(elf_file.plt_section_index.?),
.st_value = sym.pltAddress(elf_file),
.st_value = @intCast(sym.pltAddress(elf_file)),
.st_size = entrySize(cpu_arch),
};
}
@ -1033,13 +1008,13 @@ pub const PltSection = struct {
const aarch64 = struct {
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
{
const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
const plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr);
const got_plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr);
// TODO: relax if possible
// .got.plt[2]
const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
const ldr_off = try math.divExact(u12, @truncate(got_plt_addr + 16), 8);
const add_off: u12 = @truncate(got_plt_addr + 16);
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
const preamble = &[_]Instruction{
Instruction.stp(
@ -1067,8 +1042,8 @@ pub const PltSection = struct {
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
const ldr_off = try math.divExact(u12, @truncate(target_addr), 8);
const add_off: u12 = @truncate(target_addr);
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const insts = &[_]Instruction{
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
@ -1101,7 +1076,7 @@ pub const GotPltSection = struct {
{
// [0]: _DYNAMIC
const symbol = elf_file.symbol(elf_file.dynamic_index.?);
try writer.writeInt(u64, symbol.address(.{}, elf_file), .little);
try writer.writeInt(u64, @intCast(symbol.address(.{}, elf_file)), .little);
}
// [1]: 0x0
// [2]: 0x0
@ -1132,11 +1107,7 @@ pub const PltGotSection = struct {
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_plt = true;
symbol.flags.has_got = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.plt_got = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .plt_got = index }, elf_file);
try symbol.addExtra(.{ .plt_got = index }, elf_file);
try plt_got.symbols.append(gpa, sym_index);
}
@ -1181,7 +1152,7 @@ pub const PltGotSection = struct {
.st_info = elf.STT_FUNC,
.st_other = 0,
.st_shndx = @intCast(elf_file.plt_got_section_index.?),
.st_value = sym.pltGotAddress(elf_file),
.st_value = @intCast(sym.pltGotAddress(elf_file)),
.st_size = 16,
};
}
@ -1212,7 +1183,7 @@ pub const PltGotSection = struct {
const target_addr = sym.gotAddress(elf_file);
const source_addr = sym.pltGotAddress(elf_file);
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
const off = try math.divExact(u12, @truncate(target_addr), 8);
const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
const insts = &[_]Instruction{
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
@ -1248,12 +1219,7 @@ pub const CopyRelSection = struct {
symbol.flags.@"export" = true;
symbol.flags.has_copy_rel = true;
symbol.flags.weak = false;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.copy_rel = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .copy_rel = index }, elf_file);
try symbol.addExtra(.{ .copy_rel = index }, elf_file);
try copy_rel.symbols.append(gpa, sym_index);
const shared_object = symbol.file(elf_file).?.shared_object;
@ -1280,9 +1246,9 @@ pub const CopyRelSection = struct {
const symbol = elf_file.symbol(sym_index);
const shared_object = symbol.file(elf_file).?.shared_object;
const alignment = try symbol.dsoAlignment(elf_file);
symbol.value = mem.alignForward(u64, shdr.sh_size, alignment);
symbol.value = @intCast(mem.alignForward(u64, shdr.sh_size, alignment));
shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
shdr.sh_size = symbol.value + symbol.elfSym(elf_file).st_size;
shdr.sh_size = @as(u64, @intCast(symbol.value)) + symbol.elfSym(elf_file).st_size;
const aliases = shared_object.symbolAliases(sym_index, elf_file);
for (aliases) |alias| {
@ -1303,7 +1269,7 @@ pub const CopyRelSection = struct {
assert(sym.flags.import and sym.flags.has_copy_rel);
const extra = sym.extra(elf_file).?;
elf_file.addRelaDynAssumeCapacity(.{
.offset = sym.address(.{}, elf_file),
.offset = @intCast(sym.address(.{}, elf_file)),
.sym = extra.dynamic,
.type = relocation.encode(.copy, cpu_arch),
});
@ -1335,11 +1301,7 @@ pub const DynsymSection = struct {
const index = @as(u32, @intCast(dynsym.entries.items.len + 1));
const sym = elf_file.symbol(sym_index);
sym.flags.has_dynamic = true;
if (sym.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.dynamic = index;
sym.setExtra(new_extra, elf_file);
} else try sym.addExtra(.{ .dynamic = index }, elf_file);
try sym.addExtra(.{ .dynamic = index }, elf_file);
const off = try elf_file.insertDynString(sym.name(elf_file));
try dynsym.entries.append(gpa, .{ .symbol_index = sym_index, .off = off });
}

View file

@ -7,7 +7,7 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
assert(atoms.len > 0);
for (atoms) |atom_index| {
elf_file.atom(atom_index).?.value = @bitCast(@as(i64, -1));
elf_file.atom(atom_index).?.value = -1;
}
var i: usize = 0;
@ -22,7 +22,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
const atom_index = atoms[i];
const atom = elf_file.atom(atom_index).?;
assert(atom.flags.alive);
if (atom.alignment.forward(shdr.sh_size) - start_atom.value >= max_distance) break;
if (@as(i64, @intCast(atom.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
break;
atom.value = try advance(shdr, atom.size, atom.alignment);
}
@ -50,7 +51,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
};
try thunk.symbols.put(gpa, target, {});
}
atom.thunk_index = thunk_index;
try atom.addExtra(.{ .thunk = thunk_index }, elf_file);
atom.flags.thunk = true;
}
thunk.value = try advance(shdr, thunk.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
@ -59,12 +61,12 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
}
}
fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 {
fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !i64 {
const offset = alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
shdr.sh_size += padding + size;
shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
return offset;
return @intCast(offset);
}
/// A branch will need an extender if its target is larger than
@ -78,7 +80,7 @@ fn maxAllowedDistance(cpu_arch: std.Target.Cpu.Arch) u32 {
}
pub const Thunk = struct {
value: u64 = 0,
value: i64 = 0,
output_section_index: u32 = 0,
symbols: std.AutoArrayHashMapUnmanaged(Symbol.Index, void) = .{},
output_symtab_ctx: Elf.SymtabCtx = .{},
@ -92,14 +94,14 @@ pub const Thunk = struct {
return thunk.symbols.keys().len * trampolineSize(cpu_arch);
}
pub fn address(thunk: Thunk, elf_file: *Elf) u64 {
pub fn address(thunk: Thunk, elf_file: *Elf) i64 {
const shdr = elf_file.shdrs.items[thunk.output_section_index];
return shdr.sh_addr + thunk.value;
return @as(i64, @intCast(shdr.sh_addr)) + thunk.value;
}
pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) u64 {
pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) i64 {
const cpu_arch = elf_file.getTarget().cpu.arch;
return thunk.address(elf_file) + thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch);
return thunk.address(elf_file) + @as(i64, @intCast(thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch)));
}
pub fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
@ -131,7 +133,7 @@ pub const Thunk = struct {
.st_info = elf.STT_FUNC,
.st_other = 0,
.st_shndx = @intCast(thunk.output_section_index),
.st_value = thunk.targetAddress(sym_index, elf_file),
.st_value = @intCast(thunk.targetAddress(sym_index, elf_file)),
.st_size = trampolineSize(cpu_arch),
};
}
@ -204,9 +206,9 @@ const aarch64 = struct {
if (target.flags.has_plt) return false;
if (atom.output_section_index != target.output_section_index) return false;
const target_atom = target.atom(elf_file).?;
if (target_atom.value == @as(u64, @bitCast(@as(i64, -1)))) return false;
const saddr = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
const taddr: i64 = @intCast(target.address(.{}, elf_file));
if (target_atom.value == -1) return false;
const saddr = atom.address(elf_file) + @as(i64, @intCast(rel.r_offset));
const taddr = target.address(.{}, elf_file);
_ = math.cast(i28, taddr + rel.r_addend - saddr) orelse return false;
return true;
}
@ -214,11 +216,11 @@ const aarch64 = struct {
fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
for (thunk.symbols.keys(), 0..) |sym_index, i| {
const sym = elf_file.symbol(sym_index);
const saddr = thunk.address(elf_file) + i * trampoline_size;
const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
const taddr = sym.address(.{}, elf_file);
const pages = try util.calcNumberOfPages(saddr, taddr);
try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
const off: u12 = @truncate(taddr);
const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
}

View file

@ -797,6 +797,14 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.appendSlice(&.{ "-e", entry_name });
}
try argv.append("-o");
try argv.append(full_out_path);
if (self.base.isDynLib() and self.base.allow_shlib_undefined) {
try argv.append("-undefined");
try argv.append("dynamic_lookup");
}
for (comp.objects) |obj| {
// TODO: verify this
if (obj.must_link) {
@ -813,19 +821,11 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(p);
}
if (comp.compiler_rt_lib) |lib| try argv.append(lib.full_object_path);
if (comp.compiler_rt_obj) |obj| try argv.append(obj.full_object_path);
if (comp.config.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
for (self.lib_dirs) |lib_dir| {
const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir});
try argv.append(arg);
}
try argv.append("-o");
try argv.append(full_out_path);
try argv.append("-lSystem");
for (comp.system_libs.keys()) |l_name| {
const info = comp.system_libs.get(l_name).?;
const arg = if (info.needed)
@ -837,9 +837,9 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(arg);
}
for (self.lib_dirs) |lib_dir| {
const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir});
try argv.append(arg);
for (self.framework_dirs) |f_dir| {
try argv.append("-F");
try argv.append(f_dir);
}
for (self.frameworks) |framework| {
@ -853,15 +853,15 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(arg);
}
for (self.framework_dirs) |f_dir| {
try argv.append("-F");
try argv.append(f_dir);
if (comp.config.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
}
if (self.base.isDynLib() and self.base.allow_shlib_undefined) {
try argv.append("-undefined");
try argv.append("dynamic_lookup");
}
try argv.append("-lSystem");
if (comp.compiler_rt_lib) |lib| try argv.append(lib.full_object_path);
if (comp.compiler_rt_obj) |obj| try argv.append(obj.full_object_path);
}
Compilation.dump_argv(argv.items);

View file

@ -70,6 +70,7 @@ pub fn parse(self: *Archive, macho_file: *MachO, path: []const u8, handle_index:
.archive = .{
.path = try gpa.dupe(u8, path),
.offset = pos,
.size = hdr_size,
},
.path = try gpa.dupe(u8, name),
.file_handle = handle_index,

View file

@ -770,7 +770,7 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(source, target)));
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
},

View file

@ -34,6 +34,7 @@ output_ar_state: Archive.ArState = .{},
const InArchive = struct {
path: []const u8,
offset: u64,
size: u32,
};
pub fn isObject(path: []const u8) !bool {
@ -1330,14 +1331,16 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M
}
pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
const file = macho_file.getFileHandle(self.file_handle);
const size = (try file.stat()).size;
self.output_ar_state.size = size;
self.output_ar_state.size = if (self.archive) |ar| ar.size else size: {
const file = macho_file.getFileHandle(self.file_handle);
break :size (try file.stat()).size;
};
}
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
// Header
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
try Archive.writeHeader(self.path, size, ar_format, writer);
// Data
const file = macho_file.getFileHandle(self.file_handle);
@ -1345,7 +1348,7 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ
const gpa = macho_file.base.comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
const amt = try file.preadAll(data, 0);
const amt = try file.preadAll(data, offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@ -1385,7 +1388,6 @@ pub fn calcSymtabSize(self: *Object, macho_file: *MachO) !void {
pub fn calcStabsSize(self: *Object, macho_file: *MachO) error{Overflow}!void {
if (self.dwarf_info) |dw| {
// TODO handle multiple CUs
const cu = dw.compile_units.items[0];
const comp_dir = try cu.getCompileDir(dw) orelse return;
const tu_name = try cu.getSourceFile(dw) orelse return;
@ -1504,7 +1506,6 @@ pub fn writeStabs(self: *const Object, macho_file: *MachO, ctx: anytype) error{O
var index = self.output_symtab_ctx.istab;
if (self.dwarf_info) |dw| {
// TODO handle multiple CUs
const cu = dw.compile_units.items[0];
const comp_dir = try cu.getCompileDir(dw) orelse return;
const tu_name = try cu.getSourceFile(dw) orelse return;
@ -1750,7 +1751,6 @@ pub fn hasEhFrameRecords(self: Object) bool {
return self.cies.items.len > 0;
}
/// TODO handle multiple CUs
pub fn hasDebugInfo(self: Object) bool {
if (self.dwarf_info) |dw| {
return dw.compile_units.items.len > 0;

View file

@ -267,7 +267,7 @@ pub const StubsSection = struct {
},
.aarch64 => {
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(
@ -411,7 +411,7 @@ pub const StubsHelperSection = struct {
.aarch64 => {
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(sect.addr, dyld_private_addr);
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
const off: u12 = @truncate(dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
@ -424,7 +424,7 @@ pub const StubsHelperSection = struct {
).toU32(), .little);
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
try writer.writeInt(u32, aarch64.Instruction.ldr(
@ -679,7 +679,7 @@ pub const ObjcStubsSection = struct {
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try aarch64.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(
@ -692,7 +692,7 @@ pub const ObjcStubsSection = struct {
const target_sym = macho_file.getSymbol(macho_file.objc_msg_send_index.?);
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try aarch64.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try math.divExact(u12, @truncate(target), 8);
try writer.writeInt(

View file

@ -99,7 +99,7 @@ pub const Thunk = struct {
const sym = macho_file.getSymbol(sym_index);
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try aarch64.calcNumberOfPages(saddr, taddr);
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off: u12 = @truncate(taddr);
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);

View file

@ -25,7 +25,7 @@ pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
mem.writeInt(u32, code, inst.toU32(), .little);
}
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;

View file

@ -89,6 +89,7 @@ test {
_ = @import("behavior/switch.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
_ = @import("behavior/switch_on_captured_error.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/threadlocal.zig");
_ = @import("behavior/truncate.zig");

View file

@ -3,9 +3,11 @@ const assert = std.debug.assert;
const expect = std.testing.expect;
const expectError = std.testing.expectError;
const expectEqual = std.testing.expectEqual;
const builtin = @import("builtin");
test "switch on error union catch capture" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
const Error = error{ A, B, C };
@ -16,6 +18,7 @@ test "switch on error union catch capture" {
try testCapture();
try testInline();
try testEmptyErrSet();
try testAddressOf();
}
fn testScalar() !void {
@ -252,6 +255,44 @@ test "switch on error union catch capture" {
try expectEqual(@as(u64, 0), b);
}
}
fn testAddressOf() !void {
{
const a: anyerror!usize = 0;
const ptr = &(a catch |e| switch (e) {
else => 3,
});
comptime assert(@TypeOf(ptr) == *const usize);
try expectEqual(ptr, &(a catch unreachable));
}
{
const a: anyerror!usize = error.A;
const ptr = &(a catch |e| switch (e) {
else => 3,
});
comptime assert(@TypeOf(ptr) == *const comptime_int);
try expectEqual(3, ptr.*);
}
{
var a: anyerror!usize = 0;
_ = &a;
const ptr = &(a catch |e| switch (e) {
else => return,
});
comptime assert(@TypeOf(ptr) == *usize);
ptr.* += 1;
try expectEqual(@as(usize, 1), a catch unreachable);
}
{
var a: anyerror!usize = error.A;
_ = &a;
const ptr = &(a catch |e| switch (e) {
else => return,
});
comptime assert(@TypeOf(ptr) == *usize);
unreachable;
}
}
};
try comptime S.doTheTest();
@ -260,6 +301,7 @@ test "switch on error union catch capture" {
test "switch on error union if else capture" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
const Error = error{ A, B, C };
@ -276,6 +318,7 @@ test "switch on error union if else capture" {
try testInlinePtr();
try testEmptyErrSet();
try testEmptyErrSetPtr();
try testAddressOf();
}
fn testScalar() !void {
@ -747,6 +790,45 @@ test "switch on error union if else capture" {
try expectEqual(@as(u64, 0), b);
}
}
fn testAddressOf() !void {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
{
const a: anyerror!usize = 0;
const ptr = &(if (a) |*v| v.* else |e| switch (e) {
else => 3,
});
comptime assert(@TypeOf(ptr) == *const usize);
try expectEqual(ptr, &(a catch unreachable));
}
{
const a: anyerror!usize = error.A;
const ptr = &(if (a) |*v| v.* else |e| switch (e) {
else => 3,
});
comptime assert(@TypeOf(ptr) == *const comptime_int);
try expectEqual(3, ptr.*);
}
{
var a: anyerror!usize = 0;
_ = &a;
const ptr = &(if (a) |*v| v.* else |e| switch (e) {
else => return,
});
comptime assert(@TypeOf(ptr) == *usize);
ptr.* += 1;
try expectEqual(@as(usize, 1), a catch unreachable);
}
{
var a: anyerror!usize = error.A;
_ = &a;
const ptr = &(if (a) |*v| v.* else |e| switch (e) {
else => return,
});
comptime assert(@TypeOf(ptr) == *usize);
unreachable;
}
}
};
try comptime S.doTheTest();

View file

@ -2301,3 +2301,25 @@ test "matching captures causes union equivalence" {
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(a.u == b.u);
}
test "signed enum tag with negative value" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Enum = enum(i8) {
a = -1,
};
const Union = union(Enum) {
a: i32,
};
var i: i32 = 0;
i = i;
const e = Union{ .a = i };
try expect(e.a == i);
}

View file

@ -269,6 +269,33 @@ void c_struct_f32_f32f32(struct Struct_f32_f32f32 s) {
assert_or_panic(s.b.d == 3.0f);
}
struct Struct_u32_Union_u32_u32u32 {
uint32_t a;
union {
struct {
uint32_t d, e;
} c;
} b;
};
struct Struct_u32_Union_u32_u32u32 zig_ret_struct_u32_union_u32_u32u32(void);
void zig_struct_u32_union_u32_u32u32(struct Struct_u32_Union_u32_u32u32);
struct Struct_u32_Union_u32_u32u32 c_ret_struct_u32_union_u32_u32u32(void) {
struct Struct_u32_Union_u32_u32u32 s;
s.a = 1;
s.b.c.d = 2;
s.b.c.e = 3;
return s;
}
void c_struct_u32_union_u32_u32u32(struct Struct_u32_Union_u32_u32u32 s) {
assert_or_panic(s.a == 1);
assert_or_panic(s.b.c.d == 2);
assert_or_panic(s.b.c.e == 3);
}
struct BigStruct {
uint64_t a;
uint64_t b;
@ -2664,6 +2691,16 @@ void run_c_tests(void) {
}
#endif
#if !defined(__powerpc__)
{
struct Struct_u32_Union_u32_u32u32 s = zig_ret_struct_u32_union_u32_u32u32();
assert_or_panic(s.a == 1);
assert_or_panic(s.b.c.d == 2);
assert_or_panic(s.b.c.e == 3);
zig_struct_u32_union_u32_u32u32(s);
}
#endif
{
struct BigStruct s = {1, 2, 3, 4, 5};
zig_big_struct(s);
@ -2678,7 +2715,7 @@ void run_c_tests(void) {
}
#endif
#if !defined __i386__ && !defined __arm__ && !defined __aarch64__ && \
#if !defined __arm__ && !defined __aarch64__ && \
!defined __mips__ && !defined __powerpc__ && !defined ZIG_RISCV64
{
struct MedStructInts s = {1, 2, 3};

View file

@ -10,11 +10,11 @@ const builtin = @import("builtin");
const print = std.debug.print;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const has_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isARM() and
const have_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isARM() and
!builtin.cpu.arch.isMIPS() and !builtin.cpu.arch.isPPC();
const has_f128 = builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin();
const has_f80 = builtin.cpu.arch.isX86();
const have_f128 = builtin.cpu.arch.isX86() and !builtin.os.tag.isDarwin();
const have_f80 = builtin.cpu.arch.isX86();
extern fn run_c_tests() void;
@ -53,13 +53,13 @@ test "C ABI integers" {
c_u16(0xfffe);
c_u32(0xfffffffd);
c_u64(0xfffffffffffffffc);
if (has_i128) c_struct_u128(.{ .value = 0xfffffffffffffffc });
if (have_i128) c_struct_u128(.{ .value = 0xfffffffffffffffc });
c_i8(-1);
c_i16(-2);
c_i32(-3);
c_i64(-4);
if (has_i128) c_struct_i128(.{ .value = -6 });
if (have_i128) c_struct_i128(.{ .value = -6 });
c_five_integers(12, 34, 56, 78, 90);
}
@ -186,7 +186,6 @@ const complex_abi_compatible = builtin.cpu.arch != .x86 and !builtin.cpu.arch.is
test "C ABI complex float" {
if (!complex_abi_compatible) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/8465
const a = ComplexFloat{ .real = 1.25, .imag = 2.6 };
const b = ComplexFloat{ .real = 11.3, .imag = -1.5 };
@ -401,6 +400,42 @@ test "C ABI struct f32 {f32,f32}" {
c_struct_f32_f32f32(.{ .a = 1.0, .b = .{ .c = 2.0, .d = 3.0 } });
}
const Struct_u32_Union_u32_u32u32 = extern struct {
a: u32,
b: extern union {
c: extern struct {
d: u32,
e: u32,
},
},
};
export fn zig_ret_struct_u32_union_u32_u32u32() Struct_u32_Union_u32_u32u32 {
return .{ .a = 1, .b = .{ .c = .{ .d = 2, .e = 3 } } };
}
export fn zig_struct_u32_union_u32_u32u32(s: Struct_u32_Union_u32_u32u32) void {
expect(s.a == 1) catch @panic("test failure");
expect(s.b.c.d == 2) catch @panic("test failure");
expect(s.b.c.e == 3) catch @panic("test failure");
}
extern fn c_ret_struct_u32_union_u32_u32u32() Struct_u32_Union_u32_u32u32;
extern fn c_struct_u32_union_u32_u32u32(Struct_u32_Union_u32_u32u32) void;
test "C ABI struct{u32,union{u32,struct{u32,u32}}}" {
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
const s = c_ret_struct_u32_union_u32_u32u32();
try expect(s.a == 1);
try expect(s.b.c.d == 2);
try expect(s.b.c.e == 3);
c_struct_u32_union_u32_u32u32(.{ .a = 1, .b = .{ .c = .{ .d = 2, .e = 3 } } });
}
const BigStruct = extern struct {
a: u64,
b: u64,
@ -470,7 +505,6 @@ extern fn c_med_struct_mixed(MedStructMixed) void;
extern fn c_ret_med_struct_mixed() MedStructMixed;
test "C ABI medium struct of ints and floats" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -538,7 +572,6 @@ extern fn c_med_struct_ints(MedStructInts) void;
extern fn c_ret_med_struct_ints() MedStructInts;
test "C ABI medium struct of ints" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -600,7 +633,7 @@ export fn zig_big_packed_struct(x: BigPackedStruct) void {
}
test "C ABI big packed struct" {
if (!has_i128) return error.SkipZigTest;
if (!have_i128) return error.SkipZigTest;
const s = BigPackedStruct{ .a = 1, .b = 2 };
c_big_packed_struct(s);
@ -943,7 +976,6 @@ extern fn c_float_array_struct(FloatArrayStruct) void;
extern fn c_ret_float_array_struct() FloatArrayStruct;
test "Float array like struct" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5318,7 +5350,6 @@ extern fn c_ptr_size_float_struct(Vector2) void;
extern fn c_ret_ptr_size_float_struct() Vector2;
test "C ABI pointer sized float struct" {
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5348,7 +5379,6 @@ test "DC: Zig passes to C" {
try expectOk(c_assert_DC(.{ .v1 = -0.25, .v2 = 15 }));
}
test "DC: Zig returns to C" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5363,7 +5393,6 @@ test "DC: C passes to Zig" {
try expectOk(c_send_DC());
}
test "DC: C returns to Zig" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5397,7 +5426,6 @@ test "CFF: Zig passes to C" {
try expectOk(c_assert_CFF(.{ .v1 = 39, .v2 = 0.875, .v3 = 1.0 }));
}
test "CFF: Zig returns to C" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -5414,7 +5442,6 @@ test "CFF: C passes to Zig" {
try expectOk(c_send_CFF());
}
test "CFF: C returns to Zig" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isRISCV() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
@ -5442,28 +5469,24 @@ pub export fn zig_ret_CFF() CFF {
const PD = extern struct { v1: ?*anyopaque, v2: f64 };
test "PD: Zig passes to C" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_assert_PD(.{ .v1 = null, .v2 = 0.5 }));
}
test "PD: Zig returns to C" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_assert_ret_PD());
}
test "PD: C passes to Zig" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
try expectOk(c_send_PD());
}
test "PD: C returns to Zig" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -5519,7 +5542,6 @@ const ByVal = extern struct {
extern fn c_func_ptr_byval(*anyopaque, *anyopaque, ByVal, c_ulong, *anyopaque, c_ulong) void;
test "C function that takes byval struct called via function pointer" {
if (builtin.cpu.arch == .x86 and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest;
if (builtin.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5551,7 +5573,6 @@ const f16_struct = extern struct {
};
extern fn c_f16_struct(f16_struct) f16_struct;
test "f16 struct" {
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.target.cpu.arch.isMIPS()) return error.SkipZigTest;
if (builtin.target.cpu.arch.isPPC()) return error.SkipZigTest;
if (builtin.target.cpu.arch.isPPC()) return error.SkipZigTest;
@ -5563,7 +5584,7 @@ test "f16 struct" {
extern fn c_f80(f80) f80;
test "f80 bare" {
if (!has_f80) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
const a = c_f80(12.34);
try expect(@as(f64, @floatCast(a)) == 56.78);
@ -5574,9 +5595,7 @@ const f80_struct = extern struct {
};
extern fn c_f80_struct(f80_struct) f80_struct;
test "f80 struct" {
if (!has_f80) return error.SkipZigTest;
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.mode != .Debug) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
const a = c_f80_struct(.{ .a = 12.34 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);
@ -5588,8 +5607,7 @@ const f80_extra_struct = extern struct {
};
extern fn c_f80_extra_struct(f80_extra_struct) f80_extra_struct;
test "f80 extra struct" {
if (!has_f80) return error.SkipZigTest;
if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
if (!have_f80) return error.SkipZigTest;
const a = c_f80_extra_struct(.{ .a = 12.34, .b = 42 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);
@ -5598,7 +5616,7 @@ test "f80 extra struct" {
extern fn c_f128(f128) f128;
test "f128 bare" {
if (!has_f128) return error.SkipZigTest;
if (!have_f128) return error.SkipZigTest;
const a = c_f128(12.34);
try expect(@as(f64, @floatCast(a)) == 56.78);
@ -5609,7 +5627,7 @@ const f128_struct = extern struct {
};
extern fn c_f128_struct(f128_struct) f128_struct;
test "f128 struct" {
if (!has_f128) return error.SkipZigTest;
if (!have_f128) return error.SkipZigTest;
const a = c_f128_struct(.{ .a = 12.34 });
try expect(@as(f64, @floatCast(a.a)) == 56.78);

View file

@ -0,0 +1,20 @@
export fn interrupt_param1(_: u32) callconv(.Interrupt) void {}
export fn interrupt_param2(_: *anyopaque, _: u32) callconv(.Interrupt) void {}
export fn interrupt_param3(_: *anyopaque, _: u64, _: u32) callconv(.Interrupt) void {}
export fn interrupt_ret(_: *anyopaque, _: u64) callconv(.Interrupt) u32 {
return 0;
}
export fn signal_param(_: u32) callconv(.Signal) void {}
export fn signal_ret() callconv(.Signal) noreturn {}
// error
// backend=stage2
// target=x86_64-linux
//
// :1:28: error: first parameter of function with 'Interrupt' calling convention must be a pointer type
// :2:43: error: second parameter of function with 'Interrupt' calling convention must be a 64-bit integer
// :3:51: error: 'Interrupt' calling convention supports up to 2 parameters, found 3
// :4:69: error: function with calling convention 'Interrupt' must return 'void' or 'noreturn'
// :8:24: error: parameters are not allowed with 'Signal' calling convention
// :9:34: error: callconv 'Signal' is only available on AVR, not x86_64

View file

@ -0,0 +1,16 @@
const GuSettings = struct {
fin: ?fn (c_int) callconv(.C) void,
};
pub export fn callbackFin(id: c_int, arg: ?*anyopaque) void {
const settings: ?*GuSettings = @as(?*GuSettings, @ptrFromInt(@intFromPtr(arg)));
if (settings.?.fin != null) {
settings.?.fin.?(id & 0xffff);
}
}
// error
// target=native
//
// :5:54: error: pointer to comptime-only type '?*tmp.GuSettings' must be comptime-known, but operand is runtime-known
// :2:10: note: struct requires comptime because of this field
// :2:10: note: use '*const fn (c_int) callconv(.C) void' for a function pointer type

View file

@ -0,0 +1,10 @@
export fn a() void {
var array: [0]void = undefined;
_ = array[0..undefined];
}
// error
// backend=stage2
// target=native
//
// :3:18: error: use of undefined value here causes undefined behavior

View file

@ -61,6 +61,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testAbsSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testCommentString(b, .{ .target = musl_target }));
elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
@ -72,6 +73,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testLinkingC(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingCpp(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingZig(b, .{ .target = musl_target }));
elf_step.dependOn(testMergeStrings(b, .{ .target = musl_target }));
elf_step.dependOn(testMergeStrings2(b, .{ .target = musl_target }));
// https://github.com/ziglang/zig/issues/17451
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = musl_target }));
elf_step.dependOn(testTlsStatic(b, .{ .target = musl_target }));
@ -81,6 +84,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testAsNeeded(b, .{ .target = gnu_target }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCanonicalPlt(b, .{ .target = gnu_target }));
elf_step.dependOn(testCommentString(b, .{ .target = gnu_target }));
elf_step.dependOn(testCopyrel(b, .{ .target = gnu_target }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCopyrelAlias(b, .{ .target = gnu_target }));
@ -152,6 +156,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testThunks(b, .{ .target = aarch64_musl }));
// x86_64 self-hosted backend
elf_step.dependOn(testCommentString(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testCommentStringStaticLib(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = x86_64_musl }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = x86_64_musl }));
elf_step.dependOn(testGcSectionsZig(b, .{ .use_llvm = false, .target = default_target }));
@ -362,6 +368,36 @@ fn testCanonicalPlt(b: *Build, opts: Options) *Step {
return test_step;
}
fn testCommentString(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "comment-string", opts);
const exe = addExecutable(b, opts, .{ .name = "main", .zig_source_bytes =
\\pub fn main() void {}
});
const check = exe.checkObject();
check.dumpSection(".comment");
check.checkContains("zig");
test_step.dependOn(&check.step);
return test_step;
}
fn testCommentStringStaticLib(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "comment-string-static-lib", opts);
const lib = addStaticLibrary(b, opts, .{ .name = "lib", .zig_source_bytes =
\\export fn foo() void {}
});
const check = lib.checkObject();
check.dumpSection(".comment");
check.checkContains("zig");
test_step.dependOn(&check.step);
return test_step;
}
fn testCommonSymbols(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "common-symbols", opts);
@ -2267,6 +2303,125 @@ fn testLinkingZig(b: *Build, opts: Options) *Step {
return test_step;
}
// Adapted from https://github.com/rui314/mold/blob/main/test/elf/mergeable-strings.sh
fn testMergeStrings(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "merge-strings", opts);
const obj1 = addObject(b, opts, .{ .name = "a.o" });
addCSourceBytes(obj1,
\\#include <uchar.h>
\\#include <wchar.h>
\\char *cstr1 = "foo";
\\wchar_t *wide1 = L"foo";
\\char16_t *utf16_1 = u"foo";
\\char32_t *utf32_1 = U"foo";
, &.{"-O2"});
obj1.linkLibC();
const obj2 = addObject(b, opts, .{ .name = "b.o" });
addCSourceBytes(obj2,
\\#include <stdio.h>
\\#include <assert.h>
\\#include <uchar.h>
\\#include <wchar.h>
\\extern char *cstr1;
\\extern wchar_t *wide1;
\\extern char16_t *utf16_1;
\\extern char32_t *utf32_1;
\\char *cstr2 = "foo";
\\wchar_t *wide2 = L"foo";
\\char16_t *utf16_2 = u"foo";
\\char32_t *utf32_2 = U"foo";
\\int main() {
\\ printf("%p %p %p %p %p %p %p %p\n",
\\ cstr1, cstr2, wide1, wide2, utf16_1, utf16_2, utf32_1, utf32_2);
\\ assert((void*)cstr1 == (void*)cstr2);
\\ assert((void*)wide1 == (void*)wide2);
\\ assert((void*)utf16_1 == (void*)utf16_2);
\\ assert((void*)utf32_1 == (void*)utf32_2);
\\ assert((void*)wide1 == (void*)utf32_1);
\\ assert((void*)cstr1 != (void*)wide1);
\\ assert((void*)cstr1 != (void*)utf32_1);
\\ assert((void*)wide1 != (void*)utf16_1);
\\}
, &.{"-O2"});
obj2.linkLibC();
const exe = addExecutable(b, opts, .{ .name = "main" });
exe.addObject(obj1);
exe.addObject(obj2);
exe.linkLibC();
const run = addRunArtifact(exe);
run.expectExitCode(0);
test_step.dependOn(&run.step);
return test_step;
}
fn testMergeStrings2(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "merge-strings2", opts);
const obj1 = addObject(b, opts, .{ .name = "a", .zig_source_bytes =
\\const std = @import("std");
\\export fn foo() void {
\\ var arr: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 };
\\ const slice = std.mem.sliceTo(&arr, 3);
\\ std.testing.expectEqualSlices(u16, arr[0..2], slice) catch unreachable;
\\}
});
const obj2 = addObject(b, opts, .{ .name = "b", .zig_source_bytes =
\\const std = @import("std");
\\extern fn foo() void;
\\pub fn main() void {
\\ foo();
\\ var arr: [5:0]u16 = [_:0]u16{ 5, 4, 3, 2, 1 };
\\ const slice = std.mem.sliceTo(&arr, 3);
\\ std.testing.expectEqualSlices(u16, arr[0..2], slice) catch unreachable;
\\}
});
{
const exe = addExecutable(b, opts, .{ .name = "main1" });
exe.addObject(obj1);
exe.addObject(obj2);
const run = addRunArtifact(exe);
run.expectExitCode(0);
test_step.dependOn(&run.step);
const check = exe.checkObject();
check.dumpSection(".rodata.str");
check.checkContains("\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x00\x00");
check.dumpSection(".rodata.str");
check.checkContains("\x05\x00\x04\x00\x03\x00\x02\x00\x01\x00\x00\x00");
test_step.dependOn(&check.step);
}
{
const obj3 = addObject(b, opts, .{ .name = "c" });
obj3.addObject(obj1);
obj3.addObject(obj2);
const exe = addExecutable(b, opts, .{ .name = "main2" });
exe.addObject(obj3);
const run = addRunArtifact(exe);
run.expectExitCode(0);
test_step.dependOn(&run.step);
const check = exe.checkObject();
check.dumpSection(".rodata.str");
check.checkContains("\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x00\x00");
check.dumpSection(".rodata.str");
check.checkContains("\x05\x00\x04\x00\x03\x00\x02\x00\x01\x00\x00\x00");
test_step.dependOn(&check.step);
}
return test_step;
}
fn testNoEhFrameHdr(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "no-eh-frame-hdr", opts);
@ -2528,6 +2683,33 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
return test_step;
}
// Adapted from https://github.com/rui314/mold/blob/main/test/elf/relocatable-mergeable-sections.sh
fn testRelocatableMergeStrings(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "relocatable-merge-strings", opts);
const obj1 = addObject(b, opts, .{ .name = "a", .asm_source_bytes =
\\.section .rodata.str1.1,"aMS",@progbits,1
\\val1:
\\.ascii "Hello \0"
\\.section .rodata.str1.1,"aMS",@progbits,1
\\val5:
\\.ascii "World \0"
\\.section .rodata.str1.1,"aMS",@progbits,1
\\val7:
\\.ascii "Hello \0"
});
const obj2 = addObject(b, opts, .{ .name = "b" });
obj2.addObject(obj1);
const check = obj2.checkObject();
check.dumpSection(".rodata.str1.1");
check.checkExact("Hello \x00World \x00");
test_step.dependOn(&check.step);
return test_step;
}
fn testRelocatableNoEhFrame(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "relocatable-no-eh-frame", opts);

View file

@ -91,7 +91,7 @@ fn checkStrlcpy() !void {
fn checkStrlcpy_v2_38() !void {
var buf: [99]u8 = undefined;
const used = c_string.strlcpy(&buf, "strlcpy works!", buf.len);
assert(used == 15);
assert(used == 14);
}
// atexit is part of libc_nonshared, so ensure its linked in correctly

View file

@ -1,21 +1,3 @@
pub fn build(b: *Build) void {
const test_step = b.step("test-link", "Run link tests");
b.default_step = test_step;
const has_macos_sdk = b.option(bool, "has_macos_sdk", "whether the host provides a macOS SDK in system path");
const has_ios_sdk = b.option(bool, "has_ios_sdk", "whether the host provides a iOS SDK in system path");
const has_symlinks_windows = b.option(bool, "has_symlinks_windows", "whether the host is windows and has symlinks enabled");
const build_opts: BuildOptions = .{
.has_macos_sdk = has_macos_sdk orelse false,
.has_ios_sdk = has_ios_sdk orelse false,
.has_symlinks_windows = has_symlinks_windows orelse false,
};
test_step.dependOn(@import("elf.zig").testAll(b, build_opts));
test_step.dependOn(@import("macho.zig").testAll(b, build_opts));
}
pub const BuildOptions = struct {
has_macos_sdk: bool,
has_ios_sdk: bool,

View file

@ -107,6 +107,9 @@
.windows_argv = .{
.path = "windows_argv",
},
.windows_bat_args = .{
.path = "windows_bat_args",
},
.self_exe_symlink = .{
.path = "self_exe_symlink",
},

View file

@ -0,0 +1,58 @@
const std = @import("std");
const builtin = @import("builtin");
pub fn build(b: *std.Build) !void {
const test_step = b.step("test", "Test it");
b.default_step = test_step;
const optimize: std.builtin.OptimizeMode = .Debug;
const target = b.host;
if (builtin.os.tag != .windows) return;
const echo_args = b.addExecutable(.{
.name = "echo-args",
.root_source_file = b.path("echo-args.zig"),
.optimize = optimize,
.target = target,
});
const test_exe = b.addExecutable(.{
.name = "test",
.root_source_file = b.path("test.zig"),
.optimize = optimize,
.target = target,
});
const run = b.addRunArtifact(test_exe);
run.addArtifactArg(echo_args);
run.expectExitCode(0);
run.skip_foreign_checks = true;
test_step.dependOn(&run.step);
const fuzz = b.addExecutable(.{
.name = "fuzz",
.root_source_file = b.path("fuzz.zig"),
.optimize = optimize,
.target = target,
});
const fuzz_max_iterations = b.option(u64, "iterations", "The max fuzz iterations (default: 100)") orelse 100;
const fuzz_iterations_arg = std.fmt.allocPrint(b.allocator, "{}", .{fuzz_max_iterations}) catch @panic("oom");
const fuzz_seed = b.option(u64, "seed", "Seed to use for the PRNG (default: random)") orelse seed: {
var buf: [8]u8 = undefined;
try std.posix.getrandom(&buf);
break :seed std.mem.readInt(u64, &buf, builtin.cpu.arch.endian());
};
const fuzz_seed_arg = std.fmt.allocPrint(b.allocator, "{}", .{fuzz_seed}) catch @panic("oom");
const fuzz_run = b.addRunArtifact(fuzz);
fuzz_run.addArtifactArg(echo_args);
fuzz_run.addArgs(&.{ fuzz_iterations_arg, fuzz_seed_arg });
fuzz_run.expectExitCode(0);
fuzz_run.skip_foreign_checks = true;
test_step.dependOn(&fuzz_run.step);
}

View file

@ -0,0 +1,14 @@
const std = @import("std");
pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
const stdout = std.io.getStdOut().writer();
var args = try std.process.argsAlloc(arena);
for (args[1..], 1..) |arg, i| {
try stdout.writeAll(arg);
if (i != args.len - 1) try stdout.writeByte('\x00');
}
}

View file

@ -0,0 +1,160 @@
const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer if (gpa.deinit() == .leak) @panic("found memory leaks");
const allocator = gpa.allocator();
var it = try std.process.argsWithAllocator(allocator);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
const child_exe_path = it.next() orelse unreachable;
const iterations: u64 = iterations: {
const arg = it.next() orelse "0";
break :iterations try std.fmt.parseUnsigned(u64, arg, 10);
};
var rand_seed = false;
const seed: u64 = seed: {
const seed_arg = it.next() orelse {
rand_seed = true;
var buf: [8]u8 = undefined;
try std.posix.getrandom(&buf);
break :seed std.mem.readInt(u64, &buf, builtin.cpu.arch.endian());
};
break :seed try std.fmt.parseUnsigned(u64, seed_arg, 10);
};
var random = std.rand.DefaultPrng.init(seed);
const rand = random.random();
// If the seed was not given via the CLI, then output the
// randomly chosen seed so that this run can be reproduced
if (rand_seed) {
std.debug.print("rand seed: {}\n", .{seed});
}
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.setAsCwd();
defer tmp.parent_dir.setAsCwd() catch {};
var buf = try std.ArrayList(u8).initCapacity(allocator, 128);
defer buf.deinit();
try buf.appendSlice("@echo off\n");
try buf.append('"');
try buf.appendSlice(child_exe_path);
try buf.append('"');
const preamble_len = buf.items.len;
try buf.appendSlice(" %*");
try tmp.dir.writeFile("args1.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(" %1 %2 %3 %4 %5 %6 %7 %8 %9");
try tmp.dir.writeFile("args2.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(" \"%~1\" \"%~2\" \"%~3\" \"%~4\" \"%~5\" \"%~6\" \"%~7\" \"%~8\" \"%~9\"");
try tmp.dir.writeFile("args3.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
var i: u64 = 0;
while (iterations == 0 or i < iterations) {
const rand_arg = try randomArg(allocator, rand);
defer allocator.free(rand_arg);
try testExec(allocator, &.{rand_arg}, null);
i += 1;
}
}
fn testExec(allocator: std.mem.Allocator, args: []const []const u8, env: ?*std.process.EnvMap) !void {
try testExecBat(allocator, "args1.bat", args, env);
try testExecBat(allocator, "args2.bat", args, env);
try testExecBat(allocator, "args3.bat", args, env);
}
fn testExecBat(allocator: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
var argv = try std.ArrayList([]const u8).initCapacity(allocator, 1 + args.len);
defer argv.deinit();
argv.appendAssumeCapacity(bat);
argv.appendSliceAssumeCapacity(args);
const can_have_trailing_empty_args = std.mem.eql(u8, bat, "args3.bat");
const result = try std.ChildProcess.run(.{
.allocator = allocator,
.env_map = env,
.argv = argv.items,
});
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try std.testing.expectEqualStrings("", result.stderr);
var it = std.mem.splitScalar(u8, result.stdout, '\x00');
var i: usize = 0;
while (it.next()) |actual_arg| {
if (i >= args.len and can_have_trailing_empty_args) {
try std.testing.expectEqualStrings("", actual_arg);
continue;
}
const expected_arg = args[i];
try std.testing.expectEqualSlices(u8, expected_arg, actual_arg);
i += 1;
}
}
fn randomArg(allocator: Allocator, rand: std.rand.Random) ![]const u8 {
const Choice = enum {
backslash,
quote,
space,
control,
printable,
surrogate_half,
non_ascii,
};
const choices = rand.uintAtMostBiased(u16, 256);
var buf = try std.ArrayList(u8).initCapacity(allocator, choices);
errdefer buf.deinit();
var last_codepoint: u21 = 0;
for (0..choices) |_| {
const choice = rand.enumValue(Choice);
const codepoint: u21 = switch (choice) {
.backslash => '\\',
.quote => '"',
.space => ' ',
.control => switch (rand.uintAtMostBiased(u8, 0x21)) {
// NUL/CR/LF can't roundtrip
'\x00', '\r', '\n' => ' ',
0x21 => '\x7F',
else => |b| b,
},
.printable => '!' + rand.uintAtMostBiased(u8, '~' - '!'),
.surrogate_half => rand.intRangeAtMostBiased(u16, 0xD800, 0xDFFF),
.non_ascii => rand.intRangeAtMostBiased(u21, 0x80, 0x10FFFF),
};
// Ensure that we always return well-formed WTF-8.
// Instead of concatenating to ensure well-formed WTF-8,
// we just skip encoding the low surrogate.
if (std.unicode.isSurrogateCodepoint(last_codepoint) and std.unicode.isSurrogateCodepoint(codepoint)) {
if (std.unicode.utf16IsHighSurrogate(@intCast(last_codepoint)) and std.unicode.utf16IsLowSurrogate(@intCast(codepoint))) {
continue;
}
}
try buf.ensureUnusedCapacity(4);
const unused_slice = buf.unusedCapacitySlice();
const len = std.unicode.wtf8Encode(codepoint, unused_slice) catch unreachable;
buf.items.len += len;
last_codepoint = codepoint;
}
return buf.toOwnedSlice();
}

View file

@ -0,0 +1,132 @@
const std = @import("std");
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer if (gpa.deinit() == .leak) @panic("found memory leaks");
const allocator = gpa.allocator();
var it = try std.process.argsWithAllocator(allocator);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
const child_exe_path = it.next() orelse unreachable;
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.setAsCwd();
defer tmp.parent_dir.setAsCwd() catch {};
var buf = try std.ArrayList(u8).initCapacity(allocator, 128);
defer buf.deinit();
try buf.appendSlice("@echo off\n");
try buf.append('"');
try buf.appendSlice(child_exe_path);
try buf.append('"');
const preamble_len = buf.items.len;
try buf.appendSlice(" %*");
try tmp.dir.writeFile("args1.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(" %1 %2 %3 %4 %5 %6 %7 %8 %9");
try tmp.dir.writeFile("args2.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(" \"%~1\" \"%~2\" \"%~3\" \"%~4\" \"%~5\" \"%~6\" \"%~7\" \"%~8\" \"%~9\"");
try tmp.dir.writeFile("args3.bat", buf.items);
buf.shrinkRetainingCapacity(preamble_len);
// Test cases are from https://github.com/rust-lang/rust/blob/master/tests/ui/std/windows-bat-args.rs
try testExecError(error.InvalidBatchScriptArg, allocator, &.{"\x00"});
try testExecError(error.InvalidBatchScriptArg, allocator, &.{"\n"});
try testExecError(error.InvalidBatchScriptArg, allocator, &.{"\r"});
try testExec(allocator, &.{ "a", "b" }, null);
try testExec(allocator, &.{ "c is for cat", "d is for dog" }, null);
try testExec(allocator, &.{ "\"", " \"" }, null);
try testExec(allocator, &.{ "\\", "\\" }, null);
try testExec(allocator, &.{">file.txt"}, null);
try testExec(allocator, &.{"whoami.exe"}, null);
try testExec(allocator, &.{"&a.exe"}, null);
try testExec(allocator, &.{"&echo hello "}, null);
try testExec(allocator, &.{ "&echo hello", "&whoami", ">file.txt" }, null);
try testExec(allocator, &.{"!TMP!"}, null);
try testExec(allocator, &.{"key=value"}, null);
try testExec(allocator, &.{"\"key=value\""}, null);
try testExec(allocator, &.{"key = value"}, null);
try testExec(allocator, &.{"key=[\"value\"]"}, null);
try testExec(allocator, &.{ "", "a=b" }, null);
try testExec(allocator, &.{"key=\"foo bar\""}, null);
try testExec(allocator, &.{"key=[\"my_value]"}, null);
try testExec(allocator, &.{"key=[\"my_value\",\"other-value\"]"}, null);
try testExec(allocator, &.{"key\\=value"}, null);
try testExec(allocator, &.{"key=\"&whoami\""}, null);
try testExec(allocator, &.{"key=\"value\"=5"}, null);
try testExec(allocator, &.{"key=[\">file.txt\"]"}, null);
try testExec(allocator, &.{"%hello"}, null);
try testExec(allocator, &.{"%PATH%"}, null);
try testExec(allocator, &.{"%%cd:~,%"}, null);
try testExec(allocator, &.{"%PATH%PATH%"}, null);
try testExec(allocator, &.{"\">file.txt"}, null);
try testExec(allocator, &.{"abc\"&echo hello"}, null);
try testExec(allocator, &.{"123\">file.txt"}, null);
try testExec(allocator, &.{"\"&echo hello&whoami.exe"}, null);
try testExec(allocator, &.{ "\"hello^\"world\"", "hello &echo oh no >file.txt" }, null);
try testExec(allocator, &.{"&whoami.exe"}, null);
var env = env: {
var env = try std.process.getEnvMap(allocator);
errdefer env.deinit();
// No escaping
try env.put("FOO", "123");
// Some possible escaping of %FOO% that could be expanded
// when escaping cmd.exe meta characters with ^
try env.put("FOO^", "123"); // only escaping %
try env.put("^F^O^O^", "123"); // escaping every char
break :env env;
};
defer env.deinit();
try testExec(allocator, &.{"%FOO%"}, &env);
// Ensure that none of the `>file.txt`s have caused file.txt to be created
try std.testing.expectError(error.FileNotFound, tmp.dir.access("file.txt", .{}));
}
fn testExecError(err: anyerror, allocator: std.mem.Allocator, args: []const []const u8) !void {
return std.testing.expectError(err, testExec(allocator, args, null));
}
fn testExec(allocator: std.mem.Allocator, args: []const []const u8, env: ?*std.process.EnvMap) !void {
try testExecBat(allocator, "args1.bat", args, env);
try testExecBat(allocator, "args2.bat", args, env);
try testExecBat(allocator, "args3.bat", args, env);
}
fn testExecBat(allocator: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
var argv = try std.ArrayList([]const u8).initCapacity(allocator, 1 + args.len);
defer argv.deinit();
argv.appendAssumeCapacity(bat);
argv.appendSliceAssumeCapacity(args);
const can_have_trailing_empty_args = std.mem.eql(u8, bat, "args3.bat");
const result = try std.ChildProcess.run(.{
.allocator = allocator,
.env_map = env,
.argv = argv.items,
});
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try std.testing.expectEqualStrings("", result.stderr);
var it = std.mem.splitScalar(u8, result.stdout, '\x00');
var i: usize = 0;
while (it.next()) |actual_arg| {
if (i >= args.len and can_have_trailing_empty_args) {
try std.testing.expectEqualStrings("", actual_arg);
continue;
}
const expected_arg = args[i];
try std.testing.expectEqualStrings(expected_arg, actual_arg);
i += 1;
}
}

View file

@ -10,11 +10,13 @@ pub fn build(b: *std.Build) void {
.abi = .gnu,
});
add(b, b.host, .any, test_step);
add(b, target, .any, test_step);
const generated_h_step = b.addWriteFile("generated.h", "#define GENERATED_DEFINE \"foo\"");
add(b, b.host, .gnu, test_step);
add(b, target, .gnu, test_step);
add(b, b.host, .any, test_step, generated_h_step);
add(b, target, .any, test_step, generated_h_step);
add(b, b.host, .gnu, test_step, generated_h_step);
add(b, target, .gnu, test_step, generated_h_step);
}
fn add(
@ -22,6 +24,7 @@ fn add(
target: std.Build.ResolvedTarget,
rc_includes: enum { any, gnu },
test_step: *std.Build.Step,
generated_h_step: *std.Build.Step.WriteFile,
) void {
const exe = b.addExecutable(.{
.name = "zig_resource_test",
@ -32,6 +35,9 @@ fn add(
exe.addWin32ResourceFile(.{
.file = b.path("res/zig.rc"),
.flags = &.{"/c65001"}, // UTF-8 code page
.include_paths = &.{
.{ .generated = &generated_h_step.generated_directory },
},
});
exe.rc_includes = switch (rc_includes) {
.any => .any,

View file

@ -1,3 +1,7 @@
// This include file is generated via build.zig, and it #defines GENERATED_DEFINE
#include "generated.h"
FOO RCDATA { GENERATED_DEFINE }
#define ICO_ID 1
// Nothing from windows.h is used in this .rc file,