Merge remote-tracking branch 'origin/master' into llvm11

master
Andrew Kelley 2020-09-03 23:52:19 -07:00
commit 338f155a02
56 changed files with 4775 additions and 2232 deletions

View File

@ -25,6 +25,8 @@ if(ZIG_PREFER_CLANG_CPP_DYLIB)
clang-cpp
PATHS
${CLANG_LIBDIRS}
/usr/lib/llvm/11/lib
/usr/lib/llvm/11/lib64
/usr/lib/llvm-11/lib
/usr/local/llvm110/lib
/usr/local/llvm11/lib

View File

@ -26,6 +26,8 @@ if(ZIG_PREFER_CLANG_CPP_DYLIB)
LLVM
PATHS
${LLVM_LIBDIRS}
/usr/lib/llvm/11/lib
/usr/lib/llvm/11/lib64
/usr/lib/llvm-11/lib
/usr/local/llvm11/lib
/usr/local/llvm110/lib

1087
lib/std/array_hash_map.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,8 @@ pub const BufSet = struct {
}
pub fn deinit(self: *BufSet) void {
for (self.hash_map.items()) |entry| {
var it = self.hash_map.iterator();
while (it.next()) |entry| {
self.free(entry.key);
}
self.hash_map.deinit();

View File

@ -261,6 +261,7 @@ pub const TypeInfo = union(enum) {
name: []const u8,
field_type: type,
default_value: anytype,
is_comptime: bool,
};
/// This data structure is used by the Zig language code generation and

View File

@ -330,3 +330,8 @@ pub const FILE = @Type(.Opaque);
pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void;
pub extern "c" fn dlclose(handle: *c_void) c_int;
pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void;
pub extern "c" fn sync() void;
pub extern "c" fn syncfs(fd: c_int) c_int;
pub extern "c" fn fsync(fd: c_int) c_int;
pub extern "c" fn fdatasync(fd: c_int) c_int;

View File

@ -44,10 +44,10 @@ pub const ChildProcess = struct {
stderr_behavior: StdIo,
/// Set to change the user id when spawning the child process.
uid: if (builtin.os.tag == .windows) void else ?u32,
uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t,
/// Set to change the group id when spawning the child process.
gid: if (builtin.os.tag == .windows) void else ?u32,
gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t,
/// Set to change the current working directory when spawning the child process.
cwd: ?[]const u8,

View File

@ -66,6 +66,7 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// - output numeric value in hexadecimal notation
/// - `s`: print a pointer-to-many as a c-string, use zero-termination
/// - `B` and `Bi`: output a memory size in either metric (1000) or power-of-two (1024) based notation. works for both float and integer values.
/// - `e` and `E`: if printing a string, escape non-printable characters
/// - `e`: output floating point value in scientific notation
/// - `d`: output numeric value in decimal notation
/// - `b`: output integer value in binary notation
@ -599,6 +600,16 @@ pub fn formatText(
try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
return;
} else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) {
for (bytes) |c| {
if (std.ascii.isPrint(c)) {
try writer.writeByte(c);
} else {
try writer.writeAll("\\x");
try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
}
return;
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
@ -1319,6 +1330,12 @@ test "slice" {
try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", .{"Test"});
}
test "escape non-printable" {
try testFmt("abc", "{e}", .{"abc"});
try testFmt("ab\\xffc", "{e}", .{"ab\xffc"});
try testFmt("ab\\xFFc", "{E}", .{"ab\xffc"});
}
test "pointer" {
{
const value = @intToPtr(*align(1) i32, 0xdeadbeef);

View File

@ -37,7 +37,9 @@
const std = @import("../std.zig");
const ascii = std.ascii;
const max_digits = 25;
// The mantissa field in FloatRepr is 64bit wide and holds only 19 digits
// without overflowing
const max_digits = 19;
const f64_plus_zero: u64 = 0x0000000000000000;
const f64_minus_zero: u64 = 0x8000000000000000;
@ -409,6 +411,7 @@ test "fmt.parseFloat" {
expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon));
expect(approxEq(T, try parseFloat(T, "2.71828182845904523536"), @as(T, 2.718281828459045), epsilon));
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -325,7 +325,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
break;
}
}
for (self.large_allocations.items()) |*large_alloc| {
var it = self.large_allocations.iterator();
while (it.next()) |large_alloc| {
log.err("Memory leak detected: {}", .{large_alloc.value.getStackTrace()});
leaks = true;
}
@ -584,7 +585,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.entries.items.len + 1,
self.large_allocations.count() + 1,
);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);

View File

@ -123,9 +123,9 @@ pub const Headers = struct {
pub fn deinit(self: *Self) void {
{
for (self.index.items()) |*entry| {
const dex = &entry.value;
dex.deinit(self.allocator);
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.deinit(self.allocator);
self.allocator.free(entry.key);
}
self.index.deinit(self.allocator);
@ -333,7 +333,8 @@ pub const Headers = struct {
fn rebuildIndex(self: *Self) void {
// clear out the indexes
for (self.index.items()) |*entry| {
var it = self.index.iterator();
while (it.next()) |entry| {
entry.value.shrinkRetainingCapacity(0);
}
// fill up indexes again; we know capacity is fine from before

View File

@ -169,6 +169,15 @@ pub const BitOutStream = BitWriter;
/// Deprecated: use `bitWriter`
pub const bitOutStream = bitWriter;
pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream;
pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream;
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;
pub const FindByteOutStream = @import("io/find_byte_out_stream.zig").FindByteOutStream;
pub const findByteOutStream = @import("io/find_byte_out_stream.zig").findByteOutStream;
pub const Packing = @import("io/serialization.zig").Packing;
pub const Serializer = @import("io/serialization.zig").Serializer;

View File

@ -0,0 +1,148 @@
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
const assert = std.debug.assert;
/// Automatically inserts indentation of written data by keeping
/// track of the current indentation level
pub fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
underlying_writer: UnderlyingWriter,
indent_count: usize = 0,
indent_delta: usize,
current_line_empty: bool = true,
indent_one_shot_count: usize = 0, // automatically popped when applied
applied_indent: usize = 0, // the most recently applied indent
indent_next_line: usize = 0, // not used until the next line
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.applyIndent();
return self.writeNoIndent(bytes);
}
// Change the indent delta without changing the final indentation level
pub fn setIndentDelta(self: *Self, indent_delta: usize) void {
if (self.indent_delta == indent_delta) {
return;
} else if (self.indent_delta > indent_delta) {
assert(self.indent_delta % indent_delta == 0);
self.indent_count = self.indent_count * (self.indent_delta / indent_delta);
} else {
// assert that the current indentation (in spaces) in a multiple of the new delta
assert((self.indent_count * self.indent_delta) % indent_delta == 0);
self.indent_count = self.indent_count / (indent_delta / self.indent_delta);
}
self.indent_delta = indent_delta;
}
fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.underlying_writer.writeAll(bytes);
if (bytes[bytes.len - 1] == '\n')
self.resetLine();
return bytes.len;
}
pub fn insertNewline(self: *Self) Error!void {
_ = try self.writeNoIndent("\n");
}
fn resetLine(self: *Self) void {
self.current_line_empty = true;
self.indent_next_line = 0;
}
/// Insert a newline unless the current line is blank
pub fn maybeInsertNewline(self: *Self) Error!void {
if (!self.current_line_empty)
try self.insertNewline();
}
/// Push default indentation
pub fn pushIndent(self: *Self) void {
// Doesn't actually write any indentation.
// Just primes the stream to be able to write the correct indentation if it needs to.
self.indent_count += 1;
}
/// Push an indent that is automatically popped after being applied
pub fn pushIndentOneShot(self: *Self) void {
self.indent_one_shot_count += 1;
self.pushIndent();
}
/// Turns all one-shot indents into regular indents
/// Returns number of indents that must now be manually popped
pub fn lockOneShotIndent(self: *Self) usize {
var locked_count = self.indent_one_shot_count;
self.indent_one_shot_count = 0;
return locked_count;
}
/// Push an indent that should not take effect until the next line
pub fn pushIndentNextLine(self: *Self) void {
self.indent_next_line += 1;
self.pushIndent();
}
pub fn popIndent(self: *Self) void {
assert(self.indent_count != 0);
self.indent_count -= 1;
if (self.indent_next_line > 0)
self.indent_next_line -= 1;
}
/// Writes ' ' bytes if the current line is empty
fn applyIndent(self: *Self) Error!void {
const current_indent = self.currentIndent();
if (self.current_line_empty and current_indent > 0) {
try self.underlying_writer.writeByteNTimes(' ', current_indent);
self.applied_indent = current_indent;
}
self.indent_count -= self.indent_one_shot_count;
self.indent_one_shot_count = 0;
self.current_line_empty = false;
}
/// Checks to see if the most recent indentation exceeds the currently pushed indents
pub fn isLineOverIndented(self: *Self) bool {
if (self.current_line_empty) return false;
return self.applied_indent > self.currentIndent();
}
fn currentIndent(self: *Self) usize {
var indent_current: usize = 0;
if (self.indent_count > 0) {
const indent_count = self.indent_count - self.indent_next_line;
indent_current = indent_count * self.indent_delta;
}
return indent_current;
}
};
}
pub fn autoIndentingStream(
indent_delta: usize,
underlying_writer: anytype,
) AutoIndentingStream(@TypeOf(underlying_writer)) {
return AutoIndentingStream(@TypeOf(underlying_writer)){
.underlying_writer = underlying_writer,
.indent_delta = indent_delta,
};
}

View File

@ -0,0 +1,55 @@
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
const assert = std.debug.assert;
/// Used to detect if the data written to a stream differs from a source buffer
pub fn ChangeDetectionStream(comptime WriterType: type) type {
return struct {
const Self = @This();
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
anything_changed: bool,
underlying_writer: WriterType,
source_index: usize,
source: []const u8,
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
if (!self.anything_changed) {
const end = self.source_index + bytes.len;
if (end > self.source.len) {
self.anything_changed = true;
} else {
const src_slice = self.source[self.source_index..end];
self.source_index += bytes.len;
if (!mem.eql(u8, bytes, src_slice)) {
self.anything_changed = true;
}
}
}
return self.underlying_writer.write(bytes);
}
pub fn changeDetected(self: *Self) bool {
return self.anything_changed or (self.source_index != self.source.len);
}
};
}
pub fn changeDetectionStream(
source: []const u8,
underlying_writer: anytype,
) ChangeDetectionStream(@TypeOf(underlying_writer)) {
return ChangeDetectionStream(@TypeOf(underlying_writer)){
.anything_changed = false,
.underlying_writer = underlying_writer,
.source_index = 0,
.source = source,
};
}

View File

@ -0,0 +1,40 @@
const std = @import("../std.zig");
const io = std.io;
const assert = std.debug.assert;
/// An OutStream that returns whether the given character has been written to it.
/// The contents are not written to anything.
pub fn FindByteOutStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
underlying_writer: UnderlyingWriter,
byte_found: bool,
byte: u8,
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
if (!self.byte_found) {
self.byte_found = blk: {
for (bytes) |b|
if (b == self.byte) break :blk true;
break :blk false;
};
}
return self.underlying_writer.write(bytes);
}
};
}
pub fn findByteOutStream(byte: u8, underlying_writer: anytype) FindByteOutStream(@TypeOf(underlying_writer)) {
return FindByteOutStream(@TypeOf(underlying_writer)){
.underlying_writer = underlying_writer,
.byte = byte,
.byte_found = false,
};
}

View File

@ -705,34 +705,34 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
pub fn cast(comptime DestType: type, target: anytype) DestType {
const TargetType = @TypeOf(target);
switch (@typeInfo(DestType)) {
.Pointer => {
.Pointer => |dest_ptr| {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
return @ptrCast(DestType, @alignCast(ptr.alignment, target));
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target));
return @ptrCast(DestType, @alignCast(dest_ptr, target));
}
},
else => {},
}
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
.Optional => |dest_opt| {
if (@typeInfo(dest_opt.child) == .Pointer) {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
return @ptrCast(DestType, @alignCast(ptr.alignment, target));
.Pointer => {
return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
},
.Optional => |target_opt| {
if (@typeInfo(target_opt.child) == .Pointer) {
return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target));
return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
}
},
else => {},

View File

@ -46,6 +46,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
??struct_field.field_type,
@as(?struct_field.field_type, null),
),
.is_comptime = false,
};
}
break :blk @Type(.{

View File

@ -1164,7 +1164,7 @@ fn linuxLookupNameFromDnsSearch(
}
const search = if (rc.search.isNull() or dots >= rc.ndots or mem.endsWith(u8, name, "."))
&[_]u8{}
""
else
rc.search.span();
@ -1641,6 +1641,9 @@ pub const StreamServer = struct {
/// by the socket buffer limits, not by the system memory.
SystemResources,
/// Socket is not listening for new connections.
SocketNotListening,
ProtocolFailure,
/// Firewall rules forbid connection.

View File

@ -2512,13 +2512,14 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
}
}
pub const SetIdError = error{
ResourceLimitReached,
pub const SetEidError = error{
InvalidUserId,
PermissionDenied,
} || UnexpectedError;
};
pub fn setuid(uid: u32) SetIdError!void {
pub const SetIdError = error{ResourceLimitReached} || SetEidError || UnexpectedError;
pub fn setuid(uid: uid_t) SetIdError!void {
switch (errno(system.setuid(uid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2528,7 +2529,16 @@ pub fn setuid(uid: u32) SetIdError!void {
}
}
pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
pub fn seteuid(uid: uid_t) SetEidError!void {
switch (errno(system.seteuid(uid))) {
0 => return,
EINVAL => return error.InvalidUserId,
EPERM => return error.PermissionDenied,
else => |err| return unexpectedErrno(err),
}
}
pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void {
switch (errno(system.setreuid(ruid, euid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2538,7 +2548,7 @@ pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
}
}
pub fn setgid(gid: u32) SetIdError!void {
pub fn setgid(gid: gid_t) SetIdError!void {
switch (errno(system.setgid(gid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2548,7 +2558,16 @@ pub fn setgid(gid: u32) SetIdError!void {
}
}
pub fn setregid(rgid: u32, egid: u32) SetIdError!void {
pub fn setegid(uid: uid_t) SetEidError!void {
switch (errno(system.setegid(uid))) {
0 => return,
EINVAL => return error.InvalidUserId,
EPERM => return error.PermissionDenied,
else => |err| return unexpectedErrno(err),
}
}
pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void {
switch (errno(system.setregid(rgid, egid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2802,6 +2821,9 @@ pub const AcceptError = error{
/// by the socket buffer limits, not by the system memory.
SystemResources,
/// Socket is not listening for new connections.
SocketNotListening,
ProtocolFailure,
/// Firewall rules forbid connection.
@ -2870,7 +2892,7 @@ pub fn accept(
EBADF => unreachable, // always a race condition
ECONNABORTED => return error.ConnectionAborted,
EFAULT => unreachable,
EINVAL => unreachable,
EINVAL => return error.SocketNotListening,
ENOTSOCK => unreachable,
EMFILE => return error.ProcessFdQuotaExceeded,
ENFILE => return error.SystemFdQuotaExceeded,
@ -5328,3 +5350,71 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
else => |err| return std.os.unexpectedErrno(err),
}
}
pub const SyncError = error{
InputOutput,
NoSpaceLeft,
DiskQuota,
AccessDenied,
} || UnexpectedError;
/// Write all pending file contents and metadata modifications to all filesystems.
pub fn sync() void {
system.sync();
}
/// Write all pending file contents and metadata modifications to the filesystem which contains the specified file.
pub fn syncfs(fd: fd_t) SyncError!void {
const rc = system.syncfs(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}
/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
pub fn fsync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
if (windows.kernel32.FlushFileBuffers(fd) != 0)
return;
switch (windows.kernel32.GetLastError()) {
.SUCCESS => return,
.INVALID_HANDLE => unreachable,
.ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
.UNEXP_NET_ERR => return error.InputOutput,
else => return error.InputOutput,
}
}
const rc = system.fsync(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}
/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
pub fn fdatasync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
return fsync(fd) catch |err| switch (err) {
SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
else => return err,
};
}
const rc = system.fdatasync(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}

View File

@ -7,9 +7,13 @@ const std = @import("../../std.zig");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
// See: https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/sys/_types.h.auto.html
// TODO: audit mode_t/pid_t, should likely be u16/i32
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
pub const uid_t = u32;
pub const gid_t = u32;
pub const in_port_t = u16;
pub const sa_family_t = u8;
@ -79,8 +83,8 @@ pub const Stat = extern struct {
mode: u16,
nlink: u16,
ino: ino_t,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
rdev: i32,
atimesec: isize,
atimensec: isize,

View File

@ -9,10 +9,17 @@ const maxInt = std.math.maxInt;
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
// See:
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h
// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
pub const uid_t = u32;
pub const gid_t = u32;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;
@ -151,8 +158,8 @@ pub const Stat = extern struct {
dev: c_uint,
mode: c_ushort,
padding1: u16,
uid: c_uint,
gid: c_uint,
uid: uid_t,
gid: gid_t,
rdev: c_uint,
atim: timespec,
mtim: timespec,
@ -511,7 +518,7 @@ pub const siginfo_t = extern struct {
si_errno: c_int,
si_code: c_int,
si_pid: c_int,
si_uid: c_uint,
si_uid: uid_t,
si_status: c_int,
si_addr: ?*c_void,
si_value: union_sigval,

View File

@ -6,8 +6,12 @@
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
// See https://svnweb.freebsd.org/base/head/sys/sys/_types.h?view=co
// TODO: audit pid_t/mode_t. They should likely be i32 and u16, respectively
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const uid_t = u32;
pub const gid_t = u32;
pub const mode_t = c_uint;
pub const socklen_t = u32;
@ -128,8 +132,8 @@ pub const Stat = extern struct {
mode: u16,
__pad0: u16,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
__pad1: u32,
rdev: u64,

View File

@ -29,7 +29,7 @@ const is_mips = builtin.arch.isMIPS();
pub const pid_t = i32;
pub const fd_t = i32;
pub const uid_t = i32;
pub const uid_t = u32;
pub const gid_t = u32;
pub const clock_t = isize;
@ -853,7 +853,7 @@ pub const signalfd_siginfo = extern struct {
errno: i32,
code: i32,
pid: u32,
uid: u32,
uid: uid_t,
fd: i32,
tid: u32,
band: u32,
@ -1491,10 +1491,10 @@ pub const Statx = extern struct {
nlink: u32,
/// User ID of owner
uid: u32,
uid: uid_t,
/// Group ID of owner
gid: u32,
gid: gid_t,
/// File type and mode
mode: u16,

View File

@ -7,6 +7,7 @@
const std = @import("../../../std.zig");
const pid_t = linux.pid_t;
const uid_t = linux.uid_t;
const gid_t = linux.gid_t;
const clock_t = linux.clock_t;
const stack_t = linux.stack_t;
const sigset_t = linux.sigset_t;
@ -523,8 +524,8 @@ pub const Stat = extern struct {
nlink: usize,
mode: u32,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
__pad0: u32,
rdev: u64,
size: off_t,

View File

@ -655,7 +655,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
pub fn setuid(uid: u32) usize {
pub fn setuid(uid: uid_t) usize {
if (@hasField(SYS, "setuid32")) {
return syscall1(.setuid32, uid);
} else {
@ -663,7 +663,7 @@ pub fn setuid(uid: u32) usize {
}
}
pub fn setgid(gid: u32) usize {
pub fn setgid(gid: gid_t) usize {
if (@hasField(SYS, "setgid32")) {
return syscall1(.setgid32, gid);
} else {
@ -671,7 +671,7 @@ pub fn setgid(gid: u32) usize {
}
}
pub fn setreuid(ruid: u32, euid: u32) usize {
pub fn setreuid(ruid: uid_t, euid: uid_t) usize {
if (@hasField(SYS, "setreuid32")) {
return syscall2(.setreuid32, ruid, euid);
} else {
@ -679,7 +679,7 @@ pub fn setreuid(ruid: u32, euid: u32) usize {
}
}
pub fn setregid(rgid: u32, egid: u32) usize {
pub fn setregid(rgid: gid_t, egid: gid_t) usize {
if (@hasField(SYS, "setregid32")) {
return syscall2(.setregid32, rgid, egid);
} else {
@ -687,47 +687,61 @@ pub fn setregid(rgid: u32, egid: u32) usize {
}
}
pub fn getuid() u32 {
pub fn getuid() uid_t {
if (@hasField(SYS, "getuid32")) {
return @as(u32, syscall0(.getuid32));
return @as(uid_t, syscall0(.getuid32));
} else {
return @as(u32, syscall0(.getuid));
return @as(uid_t, syscall0(.getuid));
}
}
pub fn getgid() u32 {
pub fn getgid() gid_t {
if (@hasField(SYS, "getgid32")) {
return @as(u32, syscall0(.getgid32));
return @as(gid_t, syscall0(.getgid32));
} else {
return @as(u32, syscall0(.getgid));
return @as(gid_t, syscall0(.getgid));
}
}
pub fn geteuid() u32 {
pub fn geteuid() uid_t {
if (@hasField(SYS, "geteuid32")) {
return @as(u32, syscall0(.geteuid32));
return @as(uid_t, syscall0(.geteuid32));
} else {
return @as(u32, syscall0(.geteuid));
return @as(uid_t, syscall0(.geteuid));
}
}
pub fn getegid() u32 {
pub fn getegid() gid_t {
if (@hasField(SYS, "getegid32")) {
return @as(u32, syscall0(.getegid32));
return @as(gid_t, syscall0(.getegid32));
} else {
return @as(u32, syscall0(.getegid));
return @as(gid_t, syscall0(.getegid));
}
}
pub fn seteuid(euid: u32) usize {
return setreuid(std.math.maxInt(u32), euid);
pub fn seteuid(euid: uid_t) usize {
// We use setresuid here instead of setreuid to ensure that the saved uid
// is not changed. This is what musl and recent glibc versions do as well.
//
// The setresuid(2) man page says that if -1 is passed the corresponding
// id will not be changed. Since uid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t));
}
pub fn setegid(egid: u32) usize {
return setregid(std.math.maxInt(u32), egid);
pub fn setegid(egid: gid_t) usize {
// We use setresgid here instead of setregid to ensure that the saved uid
// is not changed. This is what musl and recent glibc versions do as well.
//
// The setresgid(2) man page says that if -1 is passed the corresponding
// id will not be changed. Since gid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t));
}
pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize {
if (@hasField(SYS, "getresuid32")) {
return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
} else {
@ -735,7 +749,7 @@ pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
}
}
pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
pub fn getresgid(rgid: *gid_t, egid: *gid_t, sgid: *gid_t) usize {
if (@hasField(SYS, "getresgid32")) {
return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
} else {
@ -743,7 +757,7 @@ pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
}
}
pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) usize {
if (@hasField(SYS, "setresuid32")) {
return syscall3(.setresuid32, ruid, euid, suid);
} else {
@ -751,7 +765,7 @@ pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
}
}
pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) usize {
if (@hasField(SYS, "setresgid32")) {
return syscall3(.setresgid32, rgid, egid, sgid);
} else {
@ -759,7 +773,7 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
}
}
pub fn getgroups(size: usize, list: *u32) usize {
pub fn getgroups(size: usize, list: *gid_t) usize {
if (@hasField(SYS, "getgroups32")) {
return syscall2(.getgroups32, size, @ptrToInt(list));
} else {
@ -767,7 +781,7 @@ pub fn getgroups(size: usize, list: *u32) usize {
}
}
pub fn setgroups(size: usize, list: *const u32) usize {
pub fn setgroups(size: usize, list: *const gid_t) usize {
if (@hasField(SYS, "setgroups32")) {
return syscall2(.setgroups32, size, @ptrToInt(list));
} else {
@ -1226,6 +1240,22 @@ pub fn bpf(cmd: BPF.Cmd, attr: *BPF.Attr, size: u32) usize {
return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size);
}
pub fn sync() void {
_ = syscall0(.sync);
}
pub fn syncfs(fd: fd_t) usize {
return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
}
pub fn fsync(fd: fd_t) usize {
return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
}
pub fn fdatasync(fd: fd_t) usize {
return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
}
test "" {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");

View File

@ -555,3 +555,39 @@ test "signalfd" {
return error.SkipZigTest;
_ = std.os.signalfd;
}
test "sync" {
if (builtin.os.tag != .linux)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
defer {
file.close();
tmp.dir.deleteFile(test_out_file) catch {};
}
os.sync();
try os.syncfs(file.handle);
}
test "fsync" {
if (builtin.os.tag != .linux and builtin.os.tag != .windows)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
defer {
file.close();
tmp.dir.deleteFile(test_out_file) catch {};
}
try os.fsync(file.handle);
try os.fdatasync(file.handle);
}

View File

@ -287,3 +287,5 @@ pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSA
pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn FlushFileBuffers(hFile: HANDLE) callconv(.Stdcall) BOOL;

View File

@ -578,8 +578,8 @@ fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []cons
}
pub const UserInfo = struct {
uid: u32,
gid: u32,
uid: os.uid_t,
gid: os.gid_t,
};
/// POSIX function which gets a uid from username.
@ -607,8 +607,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
var buf: [std.mem.page_size]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
var uid: u32 = 0;
var gid: u32 = 0;
var uid: os.uid_t = 0;
var gid: os.gid_t = 0;
while (true) {
const amt_read = try reader.read(buf[0..]);

View File

@ -197,7 +197,7 @@ pub const Progress = struct {
var maybe_node: ?*Node = &self.root;
while (maybe_node) |node| {
if (need_ellipse) {
self.bufWrite(&end, "...", .{});
self.bufWrite(&end, "... ", .{});
}
need_ellipse = false;
if (node.name.len != 0 or node.estimated_total_items != null) {
@ -218,7 +218,7 @@ pub const Progress = struct {
maybe_node = node.recently_updated_child;
}
if (need_ellipse) {
self.bufWrite(&end, "...", .{});
self.bufWrite(&end, "... ", .{});
}
}
@ -253,7 +253,7 @@ pub const Progress = struct {
const bytes_needed_for_esc_codes_at_end = if (std.builtin.os.tag == .windows) 0 else 11;
const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end;
if (end.* > max_end) {
const suffix = "...";
const suffix = "... ";
self.columns_written = self.columns_written - (end.* - max_end) + suffix.len;
std.mem.copy(u8, self.output_buffer[max_end..], suffix);
end.* = max_end + suffix.len;

View File

@ -40,7 +40,7 @@ pub fn main() anyerror!void {
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
std.debug.print("{}/{} {}... ", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {

View File

@ -3,11 +3,15 @@
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
pub const ArrayHashMap = array_hash_map.ArrayHashMap;
pub const ArrayHashMapUnmanaged = array_hash_map.ArrayHashMapUnmanaged;
pub const ArrayList = @import("array_list.zig").ArrayList;
pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned;
pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged;
pub const ArrayListSentineled = @import("array_list_sentineled.zig").ArrayListSentineled;
pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged;
pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap;
pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BloomFilter = @import("bloom_filter.zig").BloomFilter;
@ -32,10 +36,13 @@ pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
pub const SpinLock = @import("spinlock.zig").SpinLock;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const StringArrayHashMap = array_hash_map.StringArrayHashMap;
pub const StringArrayHashMapUnmanaged = array_hash_map.StringArrayHashMapUnmanaged;
pub const TailQueue = @import("linked_list.zig").TailQueue;
pub const Target = @import("target.zig").Target;
pub const Thread = @import("thread.zig").Thread;
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
pub const build = @import("build.zig");

View File

@ -101,7 +101,7 @@ pub const Target = struct {
/// Latest Windows version that the Zig Standard Library is aware of
pub const latest = WindowsVersion.win10_20h1;
pub const Range = struct {
min: WindowsVersion,
max: WindowsVersion,

View File

@ -615,6 +615,17 @@ test "zig fmt: infix operator and then multiline string literal" {
);
}
test "zig fmt: infix operator and then multiline string literal" {
try testCanonical(
\\const x = "" ++
\\ \\ hi0
\\ \\ hi1
\\ \\ hi2
\\;
\\
);
}
test "zig fmt: C pointers" {
try testCanonical(
\\const Ptr = [*c]i32;
@ -885,6 +896,28 @@ test "zig fmt: 2nd arg multiline string" {
);
}
test "zig fmt: 2nd arg multiline string many args" {
try testCanonical(
\\comptime {
\\ cases.addAsm("hello world linux x86_64",
\\ \\.text
\\ , "Hello, world!\n", "Hello, world!\n");
\\}
\\
);
}
test "zig fmt: final arg multiline string" {
try testCanonical(
\\comptime {
\\ cases.addAsm("hello world linux x86_64", "Hello, world!\n",
\\ \\.text
\\ );
\\}
\\
);
}
test "zig fmt: if condition wraps" {
try testTransform(
\\comptime {
@ -915,6 +948,11 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
\\ var a = if (cond and
\\ cond) |*f|
\\ x: {
\\ break :x &a.b;
\\ } else |err| err;
\\}
,
\\comptime {
@ -951,6 +989,35 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
\\ var a = if (cond and
\\ cond) |*f|
\\ x: {
\\ break :x &a.b;
\\ } else |err| err;
\\}
\\
);
}
test "zig fmt: if condition has line break but must not wrap" {
try testCanonical(
\\comptime {
\\ if (self.user_input_options.put(
\\ name,
\\ UserInputOption{
\\ .name = name,
\\ .used = false,
\\ },
\\ ) catch unreachable) |*prev_value| {
\\ foo();
\\ bar();
\\ }
\\ if (put(
\\ a,
\\ b,
\\ )) {
\\ foo();
\\ }
\\}
\\
);
@ -977,6 +1044,18 @@ test "zig fmt: if condition has line break but must not wrap" {
);
}
test "zig fmt: function call with multiline argument" {
try testCanonical(
\\comptime {
\\ self.user_input_options.put(name, UserInputOption{
\\ .name = name,
\\ .used = false,
\\ });
\\}
\\
);
}
test "zig fmt: same-line doc comment on variable declaration" {
try testTransform(
\\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space
@ -1228,7 +1307,7 @@ test "zig fmt: array literal with hint" {
\\const a = []u8{
\\ 1, 2,
\\ 3, //
\\ 4,
\\ 4,
\\ 5, 6,
\\ 7,
\\};
@ -1293,7 +1372,7 @@ test "zig fmt: multiline string parameter in fn call with trailing comma" {
\\ \\ZIG_C_HEADER_FILES {}
\\ \\ZIG_DIA_GUIDS_LIB {}
\\ \\
\\ ,
\\ ,
\\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
\\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
\\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
@ -2885,20 +2964,20 @@ test "zig fmt: multiline string in array" {
try testCanonical(
\\const Foo = [][]const u8{
\\ \\aaa
\\,
\\ ,
\\ \\bbb
\\};
\\
\\fn bar() void {
\\ const Foo = [][]const u8{
\\ \\aaa
\\ ,
\\ ,
\\ \\bbb
\\ };
\\ const Bar = [][]const u8{ // comment here
\\ \\aaa
\\ \\
\\ , // and another comment can go here
\\ , // and another comment can go here
\\ \\bbb
\\ };
\\}
@ -3214,6 +3293,34 @@ test "zig fmt: C var args" {
);
}
test "zig fmt: Only indent multiline string literals in function calls" {
try testCanonical(
\\test "zig fmt:" {
\\ try testTransform(
\\ \\const X = struct {
\\ \\ foo: i32, bar: i8 };
\\ ,
\\ \\const X = struct {
\\ \\ foo: i32, bar: i8
\\ \\};
\\ \\
\\ );
\\}
\\
);
}
test "zig fmt: Don't add extra newline after if" {
try testCanonical(
\\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
\\ if (cwd().symLink(existing_path, new_path, .{})) {
\\ return;
\\ }
\\}
\\
);
}
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
@ -3256,7 +3363,8 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b
var buffer = std.ArrayList(u8).init(allocator);
errdefer buffer.deinit();
anything_changed.* = try std.zig.render(allocator, buffer.outStream(), tree);
const outStream = buffer.outStream();
anything_changed.* = try std.zig.render(allocator, outStream, tree);
return buffer.toOwnedSlice();
}
fn testTransform(source: []const u8, expected_source: []const u8) !void {

File diff suppressed because it is too large Load Diff

View File

@ -1175,6 +1175,7 @@ pub const Tokenizer = struct {
},
.num_dot_dec => switch (c) {
'.' => {
result.id = .IntegerLiteral;
self.index -= 1;
state = .start;
break;
@ -1183,7 +1184,6 @@ pub const Tokenizer = struct {
state = .float_exponent_unsigned;
},
'0'...'9' => {
result.id = .FloatLiteral;
state = .float_fraction_dec;
},
else => {
@ -1769,6 +1769,7 @@ test "tokenizer - number literals decimal" {
testTokenize("7", &[_]Token.Id{.IntegerLiteral});
testTokenize("8", &[_]Token.Id{.IntegerLiteral});
testTokenize("9", &[_]Token.Id{.IntegerLiteral});
testTokenize("1..", &[_]Token.Id{ .IntegerLiteral, .Ellipsis2 });
testTokenize("0a", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("9b", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("1z", &[_]Token.Id{ .Invalid, .Identifier });

View File

@ -36,17 +36,17 @@ bin_file_path: []const u8,
/// It's rare for a decl to be exported, so we save memory by having a sparse map of
/// Decl pointers to details about them being exported.
/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
decl_exports: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// We track which export is associated with the given symbol name for quick
/// detection of symbol collisions.
symbol_exports: std.StringHashMapUnmanaged(*Export) = .{},
symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
/// is performing the export of another Decl.
/// This table owns the Export memory.
export_owners: std.AutoHashMapUnmanaged(*Decl, []*Export) = .{},
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// Maps fully qualified namespaced names to the Decl struct for them.
decl_table: std.HashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
link_error_flags: link.File.ErrorFlags = .{},
@ -57,13 +57,13 @@ work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// The ErrorMsg memory is owned by the decl, using Module's allocator.
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
/// a Decl can have a failed_decls entry but have analysis status of success.
failed_decls: std.AutoHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
failed_files: std.AutoHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
failed_exports: std.AutoHashMapUnmanaged(*Export, *ErrorMsg) = .{},
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
/// Incrementing integer used to compare against the corresponding Decl
/// field to determine whether a Decl's status applies to an ongoing update, or a
@ -125,7 +125,7 @@ pub const Decl = struct {
/// mapping them to an address in the output file.
/// Memory owned by this decl, using Module's allocator.
name: [*:0]const u8,
/// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`.
/// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`.
/// Reference to externally owned memory.
scope: *Scope,
/// The AST Node decl index or ZIR Inst index that contains this declaration.
@ -201,9 +201,9 @@ pub const Decl = struct {
/// typed_value may need to be regenerated.
dependencies: DepsTable = .{},
/// The reason this is not `std.AutoHashMapUnmanaged` is a workaround for
/// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for
/// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
pub const DepsTable = std.HashMapUnmanaged(*Decl, void, std.hash_map.getAutoHashFn(*Decl), std.hash_map.getAutoEqlFn(*Decl), false);
pub const DepsTable = std.ArrayHashMapUnmanaged(*Decl, void, std.array_hash_map.getAutoHashFn(*Decl), std.array_hash_map.getAutoEqlFn(*Decl), false);
pub fn destroy(self: *Decl, gpa: *Allocator) void {
gpa.free(mem.spanZ(self.name));
@ -217,9 +217,10 @@ pub const Decl = struct {
pub fn src(self: Decl) usize {
switch (self.scope.tag) {
.file => {
const file = @fieldParentPtr(Scope.File, "base", self.scope);
const tree = file.contents.tree;
.container => {
const container = @fieldParentPtr(Scope.Container, "base", self.scope);
const tree = container.file_scope.contents.tree;
// TODO Container should have it's own decls()
const decl_node = tree.root_node.decls()[self.src_index];
return tree.token_locs[decl_node.firstToken()].start;
},
@ -229,7 +230,7 @@ pub const Decl = struct {
const src_decl = module.decls[self.src_index];
return src_decl.inst.src;
},
.block => unreachable,
.file, .block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
.local_ptr => unreachable,
@ -359,6 +360,7 @@ pub const Scope = struct {
.local_ptr => return self.cast(LocalPtr).?.gen_zir.arena,
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
.file => unreachable,
.container => unreachable,
}
}
@ -368,15 +370,16 @@ pub const Scope = struct {
return switch (self.tag) {
.block => self.cast(Block).?.decl,
.gen_zir => self.cast(GenZIR).?.decl,
.local_val => return self.cast(LocalVal).?.gen_zir.decl,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl,
.local_val => self.cast(LocalVal).?.gen_zir.decl,
.local_ptr => self.cast(LocalPtr).?.gen_zir.decl,
.decl => self.cast(DeclAnalysis).?.decl,
.zir_module => null,
.file => null,
.container => null,
};
}
/// Asserts the scope has a parent which is a ZIRModule or File and
/// Asserts the scope has a parent which is a ZIRModule or Container and
/// returns it.
pub fn namespace(self: *Scope) *Scope {
switch (self.tag) {
@ -385,7 +388,8 @@ pub const Scope = struct {
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope,
.decl => return self.cast(DeclAnalysis).?.decl.scope,
.zir_module, .file => return self,
.file => return &self.cast(File).?.root_container.base,
.zir_module, .container => return self,
}
}
@ -399,8 +403,9 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.file => unreachable,
.zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
.file => return self.cast(File).?.fullyQualifiedNameHash(name),
.container => return self.cast(Container).?.fullyQualifiedNameHash(name),
}
}
@ -409,11 +414,12 @@ pub const Scope = struct {
switch (self.tag) {
.file => return self.cast(File).?.contents.tree,
.zir_module => unreachable,
.decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree,
.block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree,
.gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree,
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(File).?.contents.tree,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(File).?.contents.tree,
.decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
.container => return self.cast(Container).?.file_scope.contents.tree,
}
}
@ -427,13 +433,15 @@ pub const Scope = struct {
.decl => unreachable,
.zir_module => unreachable,
.file => unreachable,
.container => unreachable,
};
}
/// Asserts the scope has a parent which is a ZIRModule or File and
/// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and
/// returns the sub_file_path field.
pub fn subFilePath(base: *Scope) []const u8 {
switch (base.tag) {
.container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
.file => return @fieldParentPtr(File, "base", base).sub_file_path,
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
.block => unreachable,
@ -453,11 +461,13 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.container => unreachable,
}
}
pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
switch (base.tag) {
.container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
.file => return @fieldParentPtr(File, "base", base).getSource(module),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
.gen_zir => unreachable,
@ -471,8 +481,9 @@ pub const Scope = struct {
/// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
pub fn removeDecl(base: *Scope, child: *Decl) void {
switch (base.tag) {
.file => return @fieldParentPtr(File, "base", base).removeDecl(child),
.container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
.file => unreachable,
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@ -499,6 +510,7 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.container => unreachable,
}
}
@ -515,6 +527,8 @@ pub const Scope = struct {
zir_module,
/// .zig source code.
file,
/// struct, enum or union, every .file contains one of these.
container,
block,
decl,
gen_zir,
@ -522,6 +536,33 @@ pub const Scope = struct {
local_ptr,
};
pub const Container = struct {
pub const base_tag: Tag = .container;
base: Scope = Scope{ .tag = base_tag },
file_scope: *Scope.File,
/// Direct children of the file.
decls: std.AutoArrayHashMapUnmanaged(*Decl, void),
// TODO implement container types and put this in a status union
// ty: Type
pub fn deinit(self: *Container, gpa: *Allocator) void {
self.decls.deinit(gpa);
self.* = undefined;
}
pub fn removeDecl(self: *Container, child: *Decl) void {
_ = self.decls.remove(child);
}
pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash {
// TODO container scope qualified names.
return std.zig.hashSrc(name);
}
};
pub const File = struct {
pub const base_tag: Tag = .file;
base: Scope = Scope{ .tag = base_tag },
@ -544,8 +585,7 @@ pub const Scope = struct {
loaded_success,
},
/// Direct children of the file.
decls: ArrayListUnmanaged(*Decl),
root_container: Container,
pub fn unload(self: *File, gpa: *Allocator) void {
switch (self.status) {
@ -569,20 +609,11 @@ pub const Scope = struct {
}
pub fn deinit(self: *File, gpa: *Allocator) void {
self.decls.deinit(gpa);
self.root_container.deinit(gpa);
self.unload(gpa);
self.* = undefined;
}
pub fn removeDecl(self: *File, child: *Decl) void {
for (self.decls.items) |item, i| {
if (item == child) {
_ = self.decls.swapRemove(i);
return;
}
}
}
pub fn dumpSrc(self: *File, src: usize) void {
const loc = std.zig.findLineColumn(self.source.bytes, src);
std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
@ -604,11 +635,6 @@ pub const Scope = struct {
.bytes => |bytes| return bytes,
}
}
pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash {
// We don't have struct scopes yet so this is currently just a simple name hash.
return std.zig.hashSrc(name);
}
};
pub const ZIRModule = struct {
@ -725,6 +751,7 @@ pub const Scope = struct {
/// Points to the arena allocator of DeclAnalysis
arena: *Allocator,
label: ?Label = null,
is_comptime: bool,
pub const Label = struct {
zir_block: *zir.Inst.Block,
@ -860,7 +887,10 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.status = .never_loaded,
.decls = .{},
.root_container = .{
.file_scope = root_scope,
.decls = .{},
},
};
break :blk &root_scope.base;
} else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
@ -932,7 +962,8 @@ pub fn deinit(self: *Module) void {
self.symbol_exports.deinit(gpa);
self.root_scope.destroy(gpa);
for (self.global_error_set.items()) |entry| {
var it = self.global_error_set.iterator();
while (it.next()) |entry| {
gpa.free(entry.key);
}
self.global_error_set.deinit(gpa);
@ -967,7 +998,7 @@ pub fn update(self: *Module) !void {
// to force a refresh we unload now.
if (self.root_scope.cast(Scope.File)) |zig_file| {
zig_file.unload(self.gpa);
self.analyzeRootSrcFile(zig_file) catch |err| switch (err) {
self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
@ -1235,8 +1266,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const tracy = trace(@src());
defer tracy.end();
const file_scope = decl.scope.cast(Scope.File).?;
const tree = try self.getAstTree(file_scope);
const container_scope = decl.scope.cast(Scope.Container).?;
const tree = try self.getAstTree(container_scope);
const ast_node = tree.root_node.decls()[decl.src_index];
switch (ast_node.tag) {
.FnProto => {
@ -1307,7 +1338,6 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
.return_type = return_type_inst,
.param_types = param_types,
}, .{});
_ = try astgen.addZIRUnOp(self, &fn_type_scope.base, fn_src, .@"return", fn_type_inst);
// We need the memory for the Type to go into the arena for the Decl
var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
@ -1320,10 +1350,11 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
.decl = decl,
.instructions = .{},
.arena = &decl_arena.allocator,
.is_comptime = false,
};
defer block_scope.instructions.deinit(self.gpa);
const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, .{
const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, fn_type_inst, .{
.instructions = fn_type_scope.instructions.items,
});
const new_func = try decl_arena.allocator.create(Fn);
@ -1457,6 +1488,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
.decl = decl,
.instructions = .{},
.arena = &decl_arena.allocator,
.is_comptime = true,
};
defer block_scope.instructions.deinit(self.gpa);
@ -1489,10 +1521,53 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{});
}
const explicit_type = blk: {
const type_node = var_decl.getTypeNode() orelse
break :blk null;
const var_info: struct { ty: Type, val: ?Value } = if (var_decl.getInitNode()) |init_node| vi: {
var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
defer gen_scope_arena.deinit();
var gen_scope: Scope.GenZIR = .{
.decl = decl,
.arena = &gen_scope_arena.allocator,
.parent = decl.scope,
};
defer gen_scope.instructions.deinit(self.gpa);
const init_result_loc: astgen.ResultLoc = if (var_decl.getTypeNode()) |type_node| rl: {
const src = tree.token_locs[type_node.firstToken()].start;
const type_type = try astgen.addZIRInstConst(self, &gen_scope.base, src, .{
.ty = Type.initTag(.type),
.val = Value.initTag(.type_type),
});
const var_type = try astgen.expr(self, &gen_scope.base, .{ .ty = type_type }, type_node);
break :rl .{ .ty = var_type };
} else .none;
const src = tree.token_locs[init_node.firstToken()].start;
const init_inst = try astgen.expr(self, &gen_scope.base, init_result_loc, init_node);
var inner_block: Scope.Block = .{
.parent = null,
.func = null,
.decl = decl,
.instructions = .{},
.arena = &gen_scope_arena.allocator,
.is_comptime = true,
};
defer inner_block.instructions.deinit(self.gpa);
try zir_sema.analyzeBody(self, &inner_block.base, .{ .instructions = gen_scope.instructions.items });
// The result location guarantees the type coercion.
const analyzed_init_inst = init_inst.analyzed_inst.?;
// The is_comptime in the Scope.Block guarantees the result is comptime-known.
const val = analyzed_init_inst.value().?;
const ty = try analyzed_init_inst.ty.copy(block_scope.arena);
break :vi .{
.ty = ty,
.val = try val.copy(block_scope.arena),
};
} else if (!is_extern) {
return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{});
} else if (var_decl.getTypeNode()) |type_node| vi: {
// Temporary arena for the zir instructions.
var type_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
defer type_scope_arena.deinit();
@ -1509,71 +1584,24 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
.val = Value.initTag(.type_type),
});
const var_type = try astgen.expr(self, &type_scope.base, .{ .ty = type_type }, type_node);
_ = try astgen.addZIRUnOp(self, &type_scope.base, src, .@"return", var_type);
break :blk try zir_sema.analyzeBodyValueAsType(self, &block_scope, .{
const ty = try zir_sema.analyzeBodyValueAsType(self, &block_scope, var_type, .{
.instructions = type_scope.instructions.items,
});
};
var var_type: Type = undefined;
const value: ?Value = if (var_decl.getInitNode()) |init_node| blk: {
var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
defer gen_scope_arena.deinit();
var gen_scope: Scope.GenZIR = .{
.decl = decl,
.arena = &gen_scope_arena.allocator,
.parent = decl.scope,
break :vi .{
.ty = ty,
.val = null,
};
defer gen_scope.instructions.deinit(self.gpa);
const src = tree.token_locs[init_node.firstToken()].start;
// TODO comptime scope here
const init_inst = try astgen.expr(self, &gen_scope.base, .none, init_node);
_ = try astgen.addZIRUnOp(self, &gen_scope.base, src, .@"return", init_inst);
var inner_block: Scope.Block = .{
.parent = null,
.func = null,
.decl = decl,
.instructions = .{},
.arena = &gen_scope_arena.allocator,
};
defer inner_block.instructions.deinit(self.gpa);
try zir_sema.analyzeBody(self, &inner_block.base, .{ .instructions = gen_scope.instructions.items });
for (inner_block.instructions.items) |inst| {
if (inst.castTag(.ret)) |ret| {
const coerced = if (explicit_type) |some|
try self.coerce(&inner_block.base, some, ret.operand)
else
ret.operand;
const val = coerced.value() orelse
return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
var_type = explicit_type orelse try ret.operand.ty.copy(block_scope.arena);
break :blk try val.copy(block_scope.arena);
} else {
return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
}
}
unreachable;
} else if (!is_extern) {
return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{});
} else if (explicit_type) |some| blk: {
var_type = some;
break :blk null;
} else {
return self.failTok(&block_scope.base, var_decl.firstToken(), "unable to infer variable type", .{});
};
if (is_mutable and !var_type.isValidVarType(is_extern)) {
return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_type});
if (is_mutable and !var_info.ty.isValidVarType(is_extern)) {
return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_info.ty});
}
var type_changed = true;
if (decl.typedValueManaged()) |tvm| {
type_changed = !tvm.typed_value.ty.eql(var_type);
type_changed = !tvm.typed_value.ty.eql(var_info.ty);
tvm.deinit(self.gpa);
}
@ -1582,7 +1610,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const var_payload = try decl_arena.allocator.create(Value.Payload.Variable);
new_variable.* = .{
.owner_decl = decl,
.init = value orelse undefined,
.init = var_info.val orelse undefined,
.is_extern = is_extern,
.is_mutable = is_mutable,
.is_threadlocal = is_threadlocal,
@ -1593,7 +1621,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
decl.typed_value = .{
.most_recent = .{
.typed_value = .{
.ty = var_type,
.ty = var_info.ty,
.val = Value.initPayload(&var_payload.base),
},
.arena = decl_arena_state,
@ -1628,8 +1656,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
};
defer gen_scope.instructions.deinit(self.gpa);
// TODO comptime scope here
_ = try astgen.expr(self, &gen_scope.base, .none, comptime_decl.expr);
_ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr);
var block_scope: Scope.Block = .{
.parent = null,
@ -1637,6 +1664,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
.decl = decl,
.instructions = .{},
.arena = &analysis_arena.allocator,
.is_comptime = true,
};
defer block_scope.instructions.deinit(self.gpa);
@ -1699,10 +1727,12 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
}
}
fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
const tracy = trace(@src());
defer tracy.end();
const root_scope = container_scope.file_scope;
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
@ -1744,25 +1774,25 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
}
}
fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const tracy = trace(@src());
defer tracy.end();
// We may be analyzing it for the first time, or this may be
// an incremental update. This code handles both cases.
const tree = try self.getAstTree(root_scope);
const tree = try self.getAstTree(container_scope);
const decls = tree.root_node.decls();
try self.work_queue.ensureUnusedCapacity(decls.len);
try root_scope.decls.ensureCapacity(self.gpa, decls.len);
try container_scope.decls.ensureCapacity(self.gpa, decls.len);
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(root_scope.decls.items.len);
for (root_scope.decls.items) |file_decl| {
deleted_decls.putAssumeCapacityNoClobber(file_decl, {});
try deleted_decls.ensureCapacity(container_scope.decls.items().len);
for (container_scope.decls.items()) |entry| {
deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
}
for (decls) |src_decl, decl_i| {
@ -1774,7 +1804,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name_loc = tree.token_locs[name_tok];
const name = tree.tokenSliceLoc(name_loc);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@ -1802,8 +1832,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
}
}
} else {
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@ -1813,7 +1843,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
} else if (src_decl.castTag(.VarDecl)) |var_decl| {
const name_loc = tree.token_locs[var_decl.name_token];
const name = tree.tokenSliceLoc(name_loc);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@ -1829,8 +1859,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
decl.contents_hash = contents_hash;
}
} else {
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
if (var_decl.getExternExportToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@ -1842,11 +1872,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index});
defer self.gpa.free(name);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} else if (src_decl.castTag(.ContainerField)) |container_field| {
log.err("TODO: analyze container field", .{});
@ -1879,7 +1909,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.gpa);
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(self.decl_table.items().len);
for (self.decl_table.items()) |entry| {
@ -2007,6 +2037,7 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
.decl = decl,
.instructions = .{},
.arena = &arena.allocator,
.is_comptime = false,
};
defer inner_block.instructions.deinit(self.gpa);
@ -2088,16 +2119,23 @@ pub fn getErrorValue(self: *Module, name: []const u8) !std.StringHashMapUnmanage
errdefer self.global_error_set.removeAssertDiscard(name);
gop.entry.key = try self.gpa.dupe(u8, name);
gop.entry.value = @intCast(u16, self.global_error_set.items().len - 1);
gop.entry.value = @intCast(u16, self.global_error_set.count() - 1);
return gop.entry.*;
}
/// TODO split this into `requireRuntimeBlock` and `requireFunctionBlock` and audit callsites.
pub fn requireRuntimeBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
pub fn requireFunctionBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
return scope.cast(Scope.Block) orelse
return self.fail(scope, src, "instruction illegal outside function body", .{});
}
pub fn requireRuntimeBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
const block = try self.requireFunctionBlock(scope, src);
if (block.is_comptime) {
return self.fail(scope, src, "unable to resolve comptime value", .{});
}
return block;
}
pub fn resolveConstValue(self: *Module, scope: *Scope, base: *Inst) !Value {
return (try self.resolveDefinedValue(scope, base)) orelse
return self.fail(scope, base.src, "unable to resolve comptime value", .{});
@ -2584,6 +2622,72 @@ pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) In
return self.fail(scope, src, "TODO implement analysis of iserr", .{});
}
pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst {
const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
.Pointer => array_ptr.ty.elemType(),
else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}),
};
var array_type = ptr_child;
const elem_type = switch (ptr_child.zigTypeTag()) {
.Array => ptr_child.elemType(),
.Pointer => blk: {
if (ptr_child.isSinglePointer()) {
if (ptr_child.elemType().zigTypeTag() == .Array) {
array_type = ptr_child.elemType();
break :blk ptr_child.elemType().elemType();
}
return self.fail(scope, src, "slice of single-item pointer", .{});
}
break :blk ptr_child.elemType();
},
else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}),
};
const slice_sentinel = if (sentinel_opt) |sentinel| blk: {
const casted = try self.coerce(scope, elem_type, sentinel);
break :blk try self.resolveConstValue(scope, casted);
} else null;
var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
var return_elem_type = elem_type;
if (end_opt) |end| {
if (end.value()) |end_val| {
if (start.value()) |start_val| {
const start_u64 = start_val.toUnsignedInt();
const end_u64 = end_val.toUnsignedInt();
if (start_u64 > end_u64) {
return self.fail(scope, src, "out of bounds slice", .{});
}
const len = end_u64 - start_u64;
const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
array_type.sentinel()
else
slice_sentinel;
return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type);
return_ptr_size = .One;
}
}
}
const return_type = try self.ptrType(
scope,
src,
return_elem_type,
if (end_opt == null) slice_sentinel else null,
0, // TODO alignment
0,
0,
!ptr_child.isConstPtr(),
ptr_child.isAllowzeroPtr(),
ptr_child.isVolatilePtr(),
return_ptr_size,
);
return self.fail(scope, src, "TODO implement analysis of slice", .{});
}
/// Asserts that lhs and rhs types are both numeric.
pub fn cmpNumeric(
self: *Module,
@ -2794,6 +2898,12 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty
prev_inst = next_inst;
continue;
}
if (next_inst.ty.zigTypeTag() == .Undefined)
continue;
if (prev_inst.ty.zigTypeTag() == .Undefined) {
prev_inst = next_inst;
continue;
}
if (prev_inst.ty.isInt() and
next_inst.ty.isInt() and
prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
@ -3045,6 +3155,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
},
.file => unreachable,
.container => unreachable,
}
return error.AnalysisFail;
}
@ -3432,6 +3543,7 @@ pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
.is_comptime = parent_block.is_comptime,
};
defer fail_block.instructions.deinit(mod.gpa);

View File

@ -258,7 +258,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)),
.UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
.Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)),
.LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?),
.LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?, .block),
.Break => return rlWrap(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)),
.PtrType => return rlWrap(mod, scope, rl, try ptrType(mod, scope, node.castTag(.PtrType).?)),
.GroupedExpression => return expr(mod, scope, rl, node.castTag(.GroupedExpression).?.expr),
@ -275,15 +275,16 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.ErrorType => return rlWrap(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)),
.For => return forExpr(mod, scope, rl, node.castTag(.For).?),
.ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?),
.Slice => return rlWrap(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)),
.Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?),
.Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?),
.OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?),
.Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
.Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}),
.OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}),
.Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}),
.Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}),
.Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
.Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}),
.ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}),
.ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}),
.StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}),
@ -294,11 +295,46 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.AnyType => return mod.failNode(scope, node, "TODO implement astgen.expr for .AnyType", .{}),
.FnProto => return mod.failNode(scope, node, "TODO implement astgen.expr for .FnProto", .{}),
.ContainerDecl => return mod.failNode(scope, node, "TODO implement astgen.expr for .ContainerDecl", .{}),
.Comptime => return mod.failNode(scope, node, "TODO implement astgen.expr for .Comptime", .{}),
.Nosuspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Nosuspend", .{}),
}
}
fn comptimeKeyword(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Comptime) InnerError!*zir.Inst {
const tracy = trace(@src());
defer tracy.end();
return comptimeExpr(mod, scope, rl, node.expr);
}
pub fn comptimeExpr(mod: *Module, parent_scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerError!*zir.Inst {
const tree = parent_scope.tree();
const src = tree.token_locs[node.firstToken()].start;
// Optimization for labeled blocks: don't need to have 2 layers of blocks, we can reuse the existing one.
if (node.castTag(.LabeledBlock)) |block_node| {
return labeledBlockExpr(mod, parent_scope, rl, block_node, .block_comptime);
}
// Make a scope to collect generated instructions in the sub-expression.
var block_scope: Scope.GenZIR = .{
.parent = parent_scope,
.decl = parent_scope.decl().?,
.arena = parent_scope.arena(),
.instructions = .{},
};
defer block_scope.instructions.deinit(mod.gpa);
// No need to capture the result here because block_comptime_flat implies that the final
// instruction is the block's result value.
_ = try expr(mod, &block_scope.base, rl, node);
const block = try addZIRInstBlock(mod, parent_scope, src, .block_comptime_flat, .{
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
});
return &block.base;
}
fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst {
const tree = parent_scope.tree();
const src = tree.token_locs[node.ltoken].start;
@ -360,10 +396,13 @@ fn labeledBlockExpr(
parent_scope: *Scope,
rl: ResultLoc,
block_node: *ast.Node.LabeledBlock,
zir_tag: zir.Inst.Tag,
) InnerError!*zir.Inst {
const tracy = trace(@src());
defer tracy.end();
assert(zir_tag == .block or zir_tag == .block_comptime);
const tree = parent_scope.tree();
const src = tree.token_locs[block_node.lbrace].start;
@ -373,7 +412,7 @@ fn labeledBlockExpr(
const block_inst = try gen_zir.arena.create(zir.Inst.Block);
block_inst.* = .{
.base = .{
.tag = .block,
.tag = zir_tag,
.src = src,
},
.positionals = .{
@ -751,13 +790,31 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*
}
fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.op_token].start;
return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .iserr, .unwrap_err_unsafe, node.rhs, node.payload);
}
const err_union_ptr = try expr(mod, scope, .ref, node.lhs);
// TODO we could avoid an unnecessary copy if .iserr took a pointer
const err_union = try addZIRUnOp(mod, scope, src, .deref, err_union_ptr);
const cond = try addZIRUnOp(mod, scope, src, .iserr, err_union);
fn orelseExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfixOp) InnerError!*zir.Inst {
return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .isnull, .unwrap_optional_unsafe, node.rhs, null);
}
fn orelseCatchExpr(
mod: *Module,
scope: *Scope,
rl: ResultLoc,
lhs: *ast.Node,
op_token: ast.TokenIndex,
cond_op: zir.Inst.Tag,
unwrap_op: zir.Inst.Tag,
rhs: *ast.Node,
payload_node: ?*ast.Node,
) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[op_token].start;
const operand_ptr = try expr(mod, scope, .ref, lhs);
// TODO we could avoid an unnecessary copy if .iserr, .isnull took a pointer
const err_union = try addZIRUnOp(mod, scope, src, .deref, operand_ptr);
const cond = try addZIRUnOp(mod, scope, src, cond_op, err_union);
var block_scope: Scope.GenZIR = .{
.parent = scope,
@ -773,7 +830,7 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch)
.else_body = undefined, // populated below
}, .{});
const block = try addZIRInstBlock(mod, scope, src, .{
const block = try addZIRInstBlock(mod, scope, src, .block, .{
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
});
@ -786,55 +843,55 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch)
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block },
};
var err_scope: Scope.GenZIR = .{
var then_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
defer err_scope.instructions.deinit(mod.gpa);
defer then_scope.instructions.deinit(mod.gpa);
var err_val_scope: Scope.LocalVal = undefined;
const err_sub_scope = blk: {
const payload = node.payload orelse
break :blk &err_scope.base;
const then_sub_scope = blk: {
const payload = payload_node orelse
break :blk &then_scope.base;
const err_name = tree.tokenSlice(payload.castTag(.Payload).?.error_symbol.firstToken());
if (mem.eql(u8, err_name, "_"))
break :blk &err_scope.base;
break :blk &then_scope.base;
const unwrapped_err_ptr = try addZIRUnOp(mod, &err_scope.base, src, .unwrap_err_code, err_union_ptr);
const unwrapped_err_ptr = try addZIRUnOp(mod, &then_scope.base, src, .unwrap_err_code, operand_ptr);
err_val_scope = .{
.parent = &err_scope.base,
.gen_zir = &err_scope,
.parent = &then_scope.base,
.gen_zir = &then_scope,
.name = err_name,
.inst = try addZIRUnOp(mod, &err_scope.base, src, .deref, unwrapped_err_ptr),
.inst = try addZIRUnOp(mod, &then_scope.base, src, .deref, unwrapped_err_ptr),
};
break :blk &err_val_scope.base;
};
_ = try addZIRInst(mod, &err_scope.base, src, zir.Inst.Break, .{
_ = try addZIRInst(mod, &then_scope.base, src, zir.Inst.Break, .{
.block = block,
.operand = try expr(mod, err_sub_scope, branch_rl, node.rhs),
.operand = try expr(mod, then_sub_scope, branch_rl, rhs),
}, .{});
var not_err_scope: Scope.GenZIR = .{
var else_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
defer not_err_scope.instructions.deinit(mod.gpa);
defer else_scope.instructions.deinit(mod.gpa);
const unwrapped_payload = try addZIRUnOp(mod, &not_err_scope.base, src, .unwrap_err_unsafe, err_union_ptr);
_ = try addZIRInst(mod, &not_err_scope.base, src, zir.Inst.Break, .{
const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand_ptr);
_ = try addZIRInst(mod, &else_scope.base, src, zir.Inst.Break, .{
.block = block,
.operand = unwrapped_payload,
}, .{});
condbr.positionals.then_body = .{ .instructions = try err_scope.arena.dupe(*zir.Inst, err_scope.instructions.items) };
condbr.positionals.else_body = .{ .instructions = try not_err_scope.arena.dupe(*zir.Inst, not_err_scope.instructions.items) };
return rlWrap(mod, scope, rl, &block.base);
condbr.positionals.then_body = .{ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items) };
condbr.positionals.else_body = .{ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items) };
return rlWrapPtr(mod, scope, rl, &block.base);
}
/// Return whether the identifier names of two tokens are equal. Resolves @"" tokens without allocating.
@ -894,6 +951,36 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array
return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ElemPtr, .{ .array_ptr = array_ptr, .index = index }, .{}));
}
fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.Slice) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;
const usize_type = try addZIRInstConst(mod, scope, src, .{
.ty = Type.initTag(.type),
.val = Value.initTag(.usize_type),
});
const array_ptr = try expr(mod, scope, .ref, node.lhs);
const start = try expr(mod, scope, .{ .ty = usize_type }, node.start);
if (node.end == null and node.sentinel == null) {
return try addZIRBinOp(mod, scope, src, .slice_start, array_ptr, start);
}
const end = if (node.end) |end| try expr(mod, scope, .{ .ty = usize_type }, end) else null;
// we could get the child type here, but it is easier to just do it in semantic analysis.
const sentinel = if (node.sentinel) |sentinel| try expr(mod, scope, .none, sentinel) else null;
return try addZIRInst(
mod,
scope,
src,
zir.Inst.Slice,
.{ .array_ptr = array_ptr, .start = start },
.{ .end = end, .sentinel = sentinel },
);
}
fn deref(mod: *Module, scope: *Scope, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;
@ -946,7 +1033,7 @@ fn boolBinOp(
.else_body = undefined, // populated below
}, .{});
const block = try addZIRInstBlock(mod, scope, src, .{
const block = try addZIRInstBlock(mod, scope, src, .block, .{
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
});
@ -1095,7 +1182,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
.else_body = undefined, // populated below
}, .{});
const block = try addZIRInstBlock(mod, scope, if_src, .{
const block = try addZIRInstBlock(mod, scope, if_src, .block, .{
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
});
@ -1218,7 +1305,7 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
.then_body = undefined, // populated below
.else_body = undefined, // populated below
}, .{});
const cond_block = try addZIRInstBlock(mod, &loop_scope.base, while_src, .{
const cond_block = try addZIRInstBlock(mod, &loop_scope.base, while_src, .block, .{
.instructions = try loop_scope.arena.dupe(*zir.Inst, continue_scope.instructions.items),
});
// TODO avoid emitting the continue expr when there
@ -1231,7 +1318,7 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
const loop = try addZIRInstLoop(mod, &expr_scope.base, while_src, .{
.instructions = try expr_scope.arena.dupe(*zir.Inst, loop_scope.instructions.items),
});
const while_block = try addZIRInstBlock(mod, scope, while_src, .{
const while_block = try addZIRInstBlock(mod, scope, while_src, .block, .{
.instructions = try expr_scope.arena.dupe(*zir.Inst, expr_scope.instructions.items),
});
@ -1365,7 +1452,7 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For)
.then_body = undefined, // populated below
.else_body = undefined, // populated below
}, .{});
const cond_block = try addZIRInstBlock(mod, &loop_scope.base, for_src, .{
const cond_block = try addZIRInstBlock(mod, &loop_scope.base, for_src, .block, .{
.instructions = try loop_scope.arena.dupe(*zir.Inst, cond_scope.instructions.items),
});
@ -1382,7 +1469,7 @@ fn forExpr(mod: *Module, scope: *Scope, rl: ResultLoc, for_node: *ast.Node.For)
const loop = try addZIRInstLoop(mod, &for_scope.base, for_src, .{
.instructions = try for_scope.arena.dupe(*zir.Inst, loop_scope.instructions.items),
});
const for_block = try addZIRInstBlock(mod, scope, for_src, .{
const for_block = try addZIRInstBlock(mod, scope, for_src, .block, .{
.instructions = try for_scope.arena.dupe(*zir.Inst, for_scope.instructions.items),
});
@ -2260,6 +2347,30 @@ pub fn addZIRBinOp(
return &inst.base;
}
pub fn addZIRInstBlock(
mod: *Module,
scope: *Scope,
src: usize,
tag: zir.Inst.Tag,
body: zir.Module.Body,
) !*zir.Inst.Block {
const gen_zir = scope.getGenZIR();
try gen_zir.instructions.ensureCapacity(mod.gpa, gen_zir.instructions.items.len + 1);
const inst = try gen_zir.arena.create(zir.Inst.Block);
inst.* = .{
.base = .{
.tag = tag,
.src = src,
},
.positionals = .{
.body = body,
},
.kw_args = .{},
};
gen_zir.instructions.appendAssumeCapacity(&inst.base);
return inst;
}
pub fn addZIRInst(
mod: *Module,
scope: *Scope,
@ -2278,12 +2389,6 @@ pub fn addZIRInstConst(mod: *Module, scope: *Scope, src: usize, typed_value: Typ
return addZIRInst(mod, scope, src, zir.Inst.Const, P{ .typed_value = typed_value }, .{});
}
/// TODO The existence of this function is a workaround for a bug in stage1.
pub fn addZIRInstBlock(mod: *Module, scope: *Scope, src: usize, body: zir.Module.Body) !*zir.Inst.Block {
const P = std.meta.fieldInfo(zir.Inst.Block, "positionals").field_type;
return addZIRInstSpecial(mod, scope, src, zir.Inst.Block, P{ .body = body }, .{});
}
/// TODO The existence of this function is a workaround for a bug in stage1.
pub fn addZIRInstLoop(mod: *Module, scope: *Scope, src: usize, body: zir.Module.Body) !*zir.Inst.Loop {
const P = std.meta.fieldInfo(zir.Inst.Loop, "positionals").field_type;

View File

@ -132,7 +132,7 @@ pub fn generateSymbol(
.Array => {
// TODO populate .debug_info for the array
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
if (typed_value.ty.arraySentinel()) |sentinel| {
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
const prev_len = code.items.len;
@ -359,7 +359,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const Branch = struct {
inst_table: std.AutoHashMapUnmanaged(*ir.Inst, MCValue) = .{},
inst_table: std.AutoArrayHashMapUnmanaged(*ir.Inst, MCValue) = .{},
fn deinit(self: *Branch, gpa: *Allocator) void {
self.inst_table.deinit(gpa);
@ -436,8 +436,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try branch_stack.append(.{});
const src_data: struct {lbrace_src: usize, rbrace_src: usize, source: []const u8} = blk: {
if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| {
const tree = scope_file.contents.tree;
if (module_fn.owner_decl.scope.cast(Module.Scope.Container)) |container_scope| {
const tree = container_scope.file_scope.contents.tree;
const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const lbrace_src = tree.token_locs[block.lbrace].start;
@ -750,7 +750,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const ptr_bits = arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1);
if (self.allocReg(inst)) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
@ -788,7 +788,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue {
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, @intCast(u32, self.registers.count() + 1));
const reg = self.allocReg(reg_owner) orelse b: {
// We'll take over the first register. Move the instruction that was previously
@ -1247,7 +1247,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.base.isUnused())
return MCValue.dead;
try self.registers.ensureCapacity(self.gpa, self.registers.items().len + 1);
try self.registers.ensureCapacity(self.gpa, self.registers.count() + 1);
const result = self.args[self.arg_index];
self.arg_index += 1;
@ -1443,7 +1443,57 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
switch (arch) {
.x86_64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for x86_64 arch", .{}),
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
const arg = inst.args[arg_i];
const arg_mcv = try self.resolveInst(inst.args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.genSetReg(arg.src, reg, arg_mcv);
// TODO interact with the register allocator to mark the instruction as moved.
},
.stack_offset => {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const ptr_bytes = 8;
const got_addr = @intCast(u32, got.addr + func.owner_decl.link.macho.offset_table_index.? * ptr_bytes);
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else {
return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{});
}
},
.aarch64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for aarch64 arch", .{}),
else => unreachable,
}
@ -2486,6 +2536,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const decl = payload.decl;
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}

View File

@ -85,7 +85,7 @@ fn genArray(file: *C, decl: *Decl) !void {
const name = try map(file.base.allocator, mem.span(decl.name));
defer file.base.allocator.free(name);
if (tv.val.cast(Value.Payload.Bytes)) |payload|
if (tv.ty.arraySentinel()) |sentinel|
if (tv.ty.sentinel()) |sentinel|
if (sentinel.toUnsignedInt() == 0)
try file.constants.writer().print("const char *const {} = \"{}\";\n", .{ name, payload.data })
else
@ -110,7 +110,8 @@ const Context = struct {
}
fn deinit(self: *Context) void {
for (self.inst_map.items()) |kv| {
var it = self.inst_map.iterator();
while (it.next()) |kv| {
self.file.base.allocator.free(kv.value);
}
self.inst_map.deinit();

View File

@ -189,7 +189,7 @@ pub const Inst = struct {
}
pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator {
return switch (self.base.tag) {
return switch (base.tag) {
.cmp_lt => .lt,
.cmp_lte => .lte,
.cmp_eq => .eq,
@ -220,6 +220,14 @@ pub const Inst = struct {
unreachable;
}
pub fn breakBlock(base: *Inst) ?*Block {
return switch (base.tag) {
.br => base.castTag(.br).?.block,
.brvoid => base.castTag(.brvoid).?.block,
else => null,
};
}
pub const NoOp = struct {
base: Inst,

View File

@ -47,7 +47,7 @@ pub const File = struct {
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, true);
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
/// For DWARF .debug_info.
pub const DbgInfoTypeReloc = struct {

View File

@ -1629,7 +1629,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
defer {
for (dbg_info_type_relocs.items()) |*entry| {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
}
dbg_info_type_relocs.deinit(self.base.allocator);
@ -1655,8 +1656,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
if (decl.scope.cast(Module.Scope.File)) |scope_file| {
const tree = scope_file.contents.tree;
if (decl.scope.cast(Module.Scope.Container)) |container_scope| {
const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
@ -1917,7 +1918,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
for (dbg_info_type_relocs.items()) |*entry| {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key, &dbg_info_buffer);
}
@ -1925,7 +1927,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
// Now that we have the offset assigned we can finally perform type relocations.
for (dbg_info_type_relocs.items()) |entry| {
it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
for (entry.value.relocs.items) |off| {
mem.writeInt(
u32,
@ -2154,8 +2157,8 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
const tracy = trace(@src());
defer tracy.end();
const scope_file = decl.scope.cast(Module.Scope.File).?;
const tree = scope_file.contents.tree;
const container_scope = decl.scope.cast(Module.Scope.Container).?;
const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.

View File

@ -18,36 +18,66 @@ const File = link.File;
pub const base_tag: File.Tag = File.Tag.macho;
const LoadCommand = union(enum) {
Segment: macho.segment_command_64,
LinkeditData: macho.linkedit_data_command,
Symtab: macho.symtab_command,
Dysymtab: macho.dysymtab_command,
pub fn cmdsize(self: LoadCommand) u32 {
return switch (self) {
.Segment => |x| x.cmdsize,
.LinkeditData => |x| x.cmdsize,
.Symtab => |x| x.cmdsize,
.Dysymtab => |x| x.cmdsize,
};
}
};
base: File,
/// List of all load command headers that are in the file.
/// We use it to track number and size of all commands needed by the header.
commands: std.ArrayListUnmanaged(macho.load_command) = std.ArrayListUnmanaged(macho.load_command){},
command_file_offset: ?u64 = null,
/// Table of all load commands
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
segment_cmd_index: ?u16 = null,
symtab_cmd_index: ?u16 = null,
dysymtab_cmd_index: ?u16 = null,
data_in_code_cmd_index: ?u16 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
segments: std.ArrayListUnmanaged(macho.segment_command_64) = std.ArrayListUnmanaged(macho.segment_command_64){},
/// Section (headers) *always* follow segment (load commands) directly!
sections: std.ArrayListUnmanaged(macho.section_64) = std.ArrayListUnmanaged(macho.section_64){},
/// Table of all sections
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
/// Offset (index) into __TEXT segment load command.
text_segment_offset: ?u64 = null,
/// Offset (index) into __LINKEDIT segment load command.
linkedit_segment_offset: ?u664 = null,
/// __TEXT segment sections
text_section_index: ?u16 = null,
cstring_section_index: ?u16 = null,
const_text_section_index: ?u16 = null,
stubs_section_index: ?u16 = null,
stub_helper_section_index: ?u16 = null,
/// __DATA segment sections
got_section_index: ?u16 = null,
const_data_section_index: ?u16 = null,
/// Entry point load command
entry_point_cmd: ?macho.entry_point_command = null,
entry_addr: ?u64 = null,
/// The first 4GB of process' memory is reserved for the null (__PAGEZERO) segment.
/// This is also the start address for our binary.
vm_start_address: u64 = 0x100000000,
/// Table of all symbols used.
/// Internally references string table for names (which are optional).
symbol_table: std.ArrayListUnmanaged(macho.nlist_64) = .{},
seg_table_dirty: bool = false,
/// Table of symbol names aka the string table.
string_table: std.ArrayListUnmanaged(u8) = .{},
/// Table of symbol vaddr values. The values is the absolute vaddr value.
/// If the vaddr of the executable __TEXT segment vaddr changes, the entire offset
/// table needs to be rewritten.
offset_table: std.ArrayListUnmanaged(u64) = .{},
error_flags: File.ErrorFlags = File.ErrorFlags{},
cmd_table_dirty: bool = false,
/// Pointer to the last allocated text block
last_text_block: ?*TextBlock = null,
/// `alloc_num / alloc_den` is the factor of padding when allocating.
const alloc_num = 4;
const alloc_den = 3;
@ -67,7 +97,23 @@ const LIB_SYSTEM_NAME: [*:0]const u8 = "System";
const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib";
pub const TextBlock = struct {
pub const empty = TextBlock{};
/// Index into the symbol table
symbol_table_index: ?u32,
/// Index into offset table
offset_table_index: ?u32,
/// Size of this text block
size: u64,
/// Points to the previous and next neighbours
prev: ?*TextBlock,
next: ?*TextBlock,
pub const empty = TextBlock{
.symbol_table_index = null,
.offset_table_index = null,
.size = 0,
.prev = null,
.next = null,
};
};
pub const SrcFn = struct {
@ -117,6 +163,12 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
switch (options.output_mode) {
.Exe => {},
.Obj => {},
.Lib => return error.TODOImplementWritingLibFiles,
}
var self: MachO = .{
.base = .{
.file = file,
@ -127,37 +179,518 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach
};
errdefer self.deinit();
switch (options.output_mode) {
.Exe => {
// The first segment command for executables is always a __PAGEZERO segment.
const pagezero = .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = commandSize(@sizeOf(macho.segment_command_64)),
.segname = makeString("__PAGEZERO"),
.vmaddr = 0,
.vmsize = self.vm_start_address,
.fileoff = 0,
.filesize = 0,
.maxprot = macho.VM_PROT_NONE,
.initprot = macho.VM_PROT_NONE,
.nsects = 0,
.flags = 0,
};
try self.commands.append(allocator, .{
.cmd = pagezero.cmd,
.cmdsize = pagezero.cmdsize,
});
try self.segments.append(allocator, pagezero);
},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
try self.populateMissingMetadata();
return self;
}
pub fn flush(self: *MachO, module: *Module) !void {
switch (self.base.options.output_mode) {
.Exe => {
var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
{
// Specify path to dynamic linker dyld
const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH));
const load_dylinker = [1]macho.dylinker_command{
.{
.cmd = macho.LC_LOAD_DYLINKER,
.cmdsize = cmdsize,
.name = @sizeOf(macho.dylinker_command),
},
};
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), last_cmd_offset);
const file_offset = last_cmd_offset + @sizeOf(macho.dylinker_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset);
last_cmd_offset += cmdsize;
}
{
// Link against libSystem
const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH));
// TODO Find a way to work out runtime version from the OS version triple stored in std.Target.
// In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0.
const min_version = 0x10000;
const dylib = .{
.name = @sizeOf(macho.dylib_command),
.timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files
.current_version = min_version,
.compatibility_version = min_version,
};
const load_dylib = [1]macho.dylib_command{
.{
.cmd = macho.LC_LOAD_DYLIB,
.cmdsize = cmdsize,
.dylib = dylib,
},
};
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), last_cmd_offset);
const file_offset = last_cmd_offset + @sizeOf(macho.dylib_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset);
last_cmd_offset += cmdsize;
}
},
.Obj => {
{
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
symtab.nsyms = @intCast(u32, self.symbol_table.items.len);
const allocated_size = self.allocatedSize(symtab.stroff);
const needed_size = self.string_table.items.len;
log.debug("allocated_size = 0x{x}, needed_size = 0x{x}\n", .{ allocated_size, needed_size });
if (needed_size > allocated_size) {
symtab.strsize = 0;
symtab.stroff = @intCast(u32, self.findFreeSpace(needed_size, 1));
}
symtab.strsize = @intCast(u32, needed_size);
log.debug("writing string table from 0x{x} to 0x{x}\n", .{ symtab.stroff, symtab.stroff + symtab.strsize });
try self.base.file.?.pwriteAll(self.string_table.items, symtab.stroff);
}
var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
for (self.load_commands.items) |cmd| {
const cmd_to_write = [1]@TypeOf(cmd){cmd};
try self.base.file.?.pwriteAll(mem.sliceAsBytes(cmd_to_write[0..1]), last_cmd_offset);
last_cmd_offset += cmd.cmdsize();
}
const off = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64);
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items), off);
},
.Lib => return error.TODOImplementWritingLibFiles,
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
try self.writeMachOHeader();
}
}
pub fn deinit(self: *MachO) void {
self.offset_table.deinit(self.base.allocator);
self.string_table.deinit(self.base.allocator);
self.symbol_table.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
self.load_commands.deinit(self.base.allocator);
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
if (decl.link.macho.symbol_table_index) |_| return;
try self.symbol_table.ensureCapacity(self.base.allocator, self.symbol_table.items.len + 1);
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
log.debug("allocating symbol index {} for {}\n", .{ self.symbol_table.items.len, decl.name });
decl.link.macho.symbol_table_index = @intCast(u32, self.symbol_table.items.len);
_ = self.symbol_table.addOneAssumeCapacity();
decl.link.macho.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
self.symbol_table.items[decl.link.macho.symbol_table_index.?] = .{
.n_strx = 0,
.n_type = 0,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
};
self.offset_table.items[decl.link.macho.offset_table_index.?] = 0;
}
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
const tracy = trace(@src());
defer tracy.end();
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_line_buffer.deinit();
var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_info_buffer.deinit();
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
defer {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
}
dbg_info_type_relocs.deinit(self.base.allocator);
}
const typed_value = decl.typed_value.most_recent.typed_value;
const res = try codegen.generateSymbol(
&self.base,
decl.src(),
typed_value,
&code_buffer,
&dbg_line_buffer,
&dbg_info_buffer,
&dbg_info_type_relocs,
);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
log.debug("generated code {}\n", .{code});
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const symbol = &self.symbol_table.items[decl.link.macho.symbol_table_index.?];
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment);
log.debug("allocated text block for {} at 0x{x}\n", .{ decl_name, addr });
log.debug("updated text section {}\n", .{self.sections.items[self.text_section_index.?]});
symbol.* = .{
.n_strx = name_str_index,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, self.text_section_index.?) + 1,
.n_desc = 0,
.n_value = addr,
};
self.offset_table.items[decl.link.macho.offset_table_index.?] = addr;
try self.writeSymbol(decl.link.macho.symbol_table_index.?);
const text_section = self.sections.items[self.text_section_index.?];
const section_offset = symbol.n_value - text_section.addr;
const file_offset = text_section.offset + section_offset;
log.debug("file_offset 0x{x}\n", .{file_offset});
try self.base.file.?.pwriteAll(code, file_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
pub fn updateDeclExports(
self: *MachO,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
const tracy = trace(@src());
defer tracy.end();
if (decl.link.macho.symbol_table_index == null) return;
var decl_sym = self.symbol_table.items[decl.link.macho.symbol_table_index.?];
// TODO implement
if (exports.len == 0) return;
const exp = exports[0];
self.entry_addr = decl_sym.n_value;
decl_sym.n_type |= macho.N_EXT;
exp.link.sym_index = 0;
}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
return self.symbol_table.items[decl.link.macho.symbol_table_index.?].n_value;
}
pub fn populateMissingMetadata(self: *MachO) !void {
if (self.segment_cmd_index == null) {
self.segment_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.base.allocator, .{
.Segment = .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = makeStaticString(""),
.vmaddr = 0,
.vmsize = 0,
.fileoff = 0,
.filesize = 0,
.maxprot = 0,
.initprot = 0,
.nsects = 0,
.flags = 0,
},
});
self.cmd_table_dirty = true;
}
if (self.symtab_cmd_index == null) {
self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len);
try self.load_commands.append(self.base.allocator, .{
.Symtab = .{
.cmd = macho.LC_SYMTAB,
.cmdsize = @sizeOf(macho.symtab_command),
.symoff = 0,
.nsyms = 0,
.stroff = 0,
.strsize = 0,
},
});
self.cmd_table_dirty = true;
}
if (self.text_section_index == null) {
self.text_section_index = @intCast(u16, self.sections.items.len);
const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment;
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
const file_size = self.base.options.program_code_size_hint;
const off = @intCast(u32, self.findFreeSpace(file_size, 1));
const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS;
log.debug("found __text section free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
try self.sections.append(self.base.allocator, .{
.sectname = makeStaticString("__text"),
.segname = makeStaticString("__TEXT"),
.addr = 0,
.size = file_size,
.offset = off,
.@"align" = 0x1000,
.reloff = 0,
.nreloc = 0,
.flags = flags,
.reserved1 = 0,
.reserved2 = 0,
.reserved3 = 0,
});
segment.vmsize += file_size;
segment.filesize += file_size;
segment.fileoff = off;
log.debug("initial text section {}\n", .{self.sections.items[self.text_section_index.?]});
}
{
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
if (symtab.symoff == 0) {
const p_align = @sizeOf(macho.nlist_64);
const nsyms = self.base.options.symbol_count_hint;
const file_size = p_align * nsyms;
const off = @intCast(u32, self.findFreeSpace(file_size, p_align));
log.debug("found symbol table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
symtab.symoff = off;
symtab.nsyms = @intCast(u32, nsyms);
}
if (symtab.stroff == 0) {
try self.string_table.append(self.base.allocator, 0);
const file_size = @intCast(u32, self.string_table.items.len);
const off = @intCast(u32, self.findFreeSpace(file_size, 1));
log.debug("found string table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
symtab.stroff = off;
symtab.strsize = file_size;
}
}
}
fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment;
const text_section = &self.sections.items[self.text_section_index.?];
const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den;
var block_placement: ?*TextBlock = null;
const addr = blk: {
if (self.last_text_block) |last| {
const last_symbol = self.symbol_table.items[last.symbol_table_index.?];
const ideal_capacity = last.size * alloc_num / alloc_den;
const ideal_capacity_end_addr = last_symbol.n_value + ideal_capacity;
const new_start_addr = mem.alignForwardGeneric(u64, ideal_capacity_end_addr, alignment);
block_placement = last;
break :blk new_start_addr;
} else {
break :blk text_section.addr;
}
};
log.debug("computed symbol address 0x{x}\n", .{addr});
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
const text_capacity = self.allocatedSize(text_section.offset);
const needed_size = (addr + new_block_size) - text_section.addr;
log.debug("text capacity 0x{x}, needed size 0x{x}\n", .{ text_capacity, needed_size });
if (needed_size > text_capacity) {
// TODO handle growth
}
self.last_text_block = text_block;
text_section.size = needed_size;
segment.vmsize = needed_size;
segment.filesize = needed_size;
if (alignment < text_section.@"align") {
text_section.@"align" = @intCast(u32, alignment);
}
}
text_block.size = new_block_size;
if (text_block.prev) |prev| {
prev.next = text_block.next;
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
if (block_placement) |big_block| {
text_block.prev = big_block;
text_block.next = big_block.next;
big_block.next = text_block;
} else {
text_block.prev = null;
text_block.next = null;
}
return addr;
}
fn makeStaticString(comptime bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
if (bytes.len > buf.len) @compileError("string too long; max 16 bytes");
mem.copy(u8, buf[0..], bytes);
return buf;
}
fn makeString(self: *MachO, bytes: []const u8) !u32 {
try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1);
const result = self.string_table.items.len;
self.string_table.appendSliceAssumeCapacity(bytes);
self.string_table.appendAssumeCapacity(0);
return @intCast(u32, result);
}
fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int {
const size = @intCast(Int, min_size);
if (size % alignment == 0) return size;
const div = size / alignment;
return (div + 1) * alignment;
}
fn commandSize(min_size: anytype) u32 {
return alignSize(u32, min_size, @sizeOf(u64));
}
fn addPadding(self: *MachO, size: u64, file_offset: u64) !void {
if (size == 0) return;
const buf = try self.base.allocator.alloc(u8, size);
defer self.base.allocator.free(buf);
mem.set(u8, buf[0..], 0);
try self.base.file.?.pwriteAll(buf, file_offset);
}
fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
const hdr_size: u64 = @sizeOf(macho.mach_header_64);
if (start < hdr_size)
return hdr_size;
const end = start + satMul(size, alloc_num) / alloc_den;
{
const off = @sizeOf(macho.mach_header_64);
var tight_size: u64 = 0;
for (self.load_commands.items) |cmd| {
tight_size += cmd.cmdsize();
}
const increased_size = satMul(tight_size, alloc_num) / alloc_den;
const test_end = off + increased_size;
if (end > off and start < test_end) {
return test_end;
}
}
for (self.sections.items) |section| {
const increased_size = satMul(section.size, alloc_num) / alloc_den;
const test_end = section.offset + increased_size;
if (end > section.offset and start < test_end) {
return test_end;
}
}
if (self.symtab_cmd_index) |symtab_index| {
const symtab = self.load_commands.items[symtab_index].Symtab;
{
const tight_size = @sizeOf(macho.nlist_64) * symtab.nsyms;
const increased_size = satMul(tight_size, alloc_num) / alloc_den;
const test_end = symtab.symoff + increased_size;
if (end > symtab.symoff and start < test_end) {
return test_end;
}
}
{
const increased_size = satMul(symtab.strsize, alloc_num) / alloc_den;
const test_end = symtab.stroff + increased_size;
if (end > symtab.stroff and start < test_end) {
return test_end;
}
}
}
return null;
}
fn allocatedSize(self: *MachO, start: u64) u64 {
if (start == 0)
return 0;
var min_pos: u64 = std.math.maxInt(u64);
{
const off = @sizeOf(macho.mach_header_64);
if (off > start and off < min_pos) min_pos = off;
}
for (self.sections.items) |section| {
if (section.offset <= start) continue;
if (section.offset < min_pos) min_pos = section.offset;
}
if (self.symtab_cmd_index) |symtab_index| {
const symtab = self.load_commands.items[symtab_index].Symtab;
if (symtab.symoff > start and symtab.symoff < min_pos) min_pos = symtab.symoff;
if (symtab.stroff > start and symtab.stroff < min_pos) min_pos = symtab.stroff;
}
return min_pos - start;
}
fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u16) u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForwardGeneric(u64, item_end, min_alignment);
}
return start;
}
fn writeSymbol(self: *MachO, index: usize) !void {
const tracy = trace(@src());
defer tracy.end();
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
var sym = [1]macho.nlist_64{self.symbol_table.items[index]};
const off = symtab.symoff + @sizeOf(macho.nlist_64) * index;
log.debug("writing symbol {} at 0x{x}\n", .{ sym[0], off });
try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
}
/// Writes Mach-O file header.
/// Should be invoked last as it needs up-to-date values of ncmds and sizeof_cmds bookkeeping
/// variables.
fn writeMachOHeader(self: *MachO) !void {
var hdr: macho.mach_header_64 = undefined;
hdr.magic = macho.MH_MAGIC_64;
@ -190,193 +723,26 @@ fn writeMachOHeader(self: *MachO) !void {
},
};
hdr.filetype = filetype;
hdr.ncmds = @intCast(u32, self.load_commands.items.len);
const ncmds = try math.cast(u32, self.commands.items.len);
hdr.ncmds = ncmds;
var sizeof_cmds: u32 = 0;
for (self.commands.items) |cmd| {
sizeof_cmds += cmd.cmdsize;
var sizeofcmds: u32 = 0;
for (self.load_commands.items) |cmd| {
sizeofcmds += cmd.cmdsize();
}
hdr.sizeofcmds = sizeof_cmds;
hdr.sizeofcmds = sizeofcmds;
// TODO should these be set to something else?
hdr.flags = 0;
hdr.reserved = 0;
log.debug("writing Mach-O header {}\n", .{hdr});
try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0);
}
pub fn flush(self: *MachO, module: *Module) !void {
// Save segments first
{
const buf = try self.base.allocator.alloc(macho.segment_command_64, self.segments.items.len);
defer self.base.allocator.free(buf);
self.command_file_offset = @sizeOf(macho.mach_header_64);
for (buf) |*seg, i| {
seg.* = self.segments.items[i];
self.command_file_offset.? += self.segments.items[i].cmdsize;
}
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), @sizeOf(macho.mach_header_64));
}
switch (self.base.options.output_mode) {
.Exe => {
{
// Specify path to dynamic linker dyld
const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH));
const load_dylinker = [1]macho.dylinker_command{
.{
.cmd = macho.LC_LOAD_DYLINKER,
.cmdsize = cmdsize,
.name = @sizeOf(macho.dylinker_command),
},
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_LOAD_DYLINKER,
.cmdsize = cmdsize,
});
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), self.command_file_offset.?);
const file_offset = self.command_file_offset.? + @sizeOf(macho.dylinker_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset);
self.command_file_offset.? += cmdsize;
}
{
// Link against libSystem
const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH));
// TODO Find a way to work out runtime version from the OS version triple stored in std.Target.
// In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0.
const min_version = 0x10000;
const dylib = .{
.name = @sizeOf(macho.dylib_command),
.timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files
.current_version = min_version,
.compatibility_version = min_version,
};
const load_dylib = [1]macho.dylib_command{
.{
.cmd = macho.LC_LOAD_DYLIB,
.cmdsize = cmdsize,
.dylib = dylib,
},
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_LOAD_DYLIB,
.cmdsize = cmdsize,
});
try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), self.command_file_offset.?);
const file_offset = self.command_file_offset.? + @sizeOf(macho.dylib_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset);
self.command_file_offset.? += cmdsize;
}
},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
try self.writeMachOHeader();
}
}
pub fn deinit(self: *MachO) void {
self.commands.deinit(self.base.allocator);
self.segments.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
pub fn updateDeclExports(
self: *MachO,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
@panic("TODO implement getDeclVAddr for MachO");
}
pub fn populateMissingMetadata(self: *MachO) !void {
if (self.text_segment_offset == null) {
self.text_segment_offset = @intCast(u64, self.segments.items.len);
const file_size = alignSize(u64, self.base.options.program_code_size_hint, 0x1000);
log.debug("vmsize/filesize = {}", .{file_size});
const file_offset = 0;
const vm_address = self.vm_start_address; // the end of __PAGEZERO segment in VM
const protection = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE;
const cmdsize = commandSize(@sizeOf(macho.segment_command_64));
const text_segment = .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = cmdsize,
.segname = makeString("__TEXT"),
.vmaddr = vm_address,
.vmsize = file_size,
.fileoff = 0, // __TEXT segment *always* starts at 0 file offset
.filesize = 0, //file_size,
.maxprot = protection,
.initprot = protection,
.nsects = 0,
.flags = 0,
};
try self.commands.append(self.base.allocator, .{
.cmd = macho.LC_SEGMENT_64,
.cmdsize = cmdsize,
});
try self.segments.append(self.base.allocator, text_segment);
}
}
fn makeString(comptime bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
if (bytes.len > buf.len) @compileError("MachO segment/section name too long");
mem.copy(u8, buf[0..], bytes);
return buf;
}
fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int {
const size = @intCast(Int, min_size);
if (size % alignment == 0) return size;
const div = size / alignment;
return (div + 1) * alignment;
}
fn commandSize(min_size: anytype) u32 {
return alignSize(u32, min_size, @sizeOf(u64));
}
fn addPadding(self: *MachO, size: u32, file_offset: u64) !void {
if (size == 0) return;
const buf = try self.base.allocator.alloc(u8, size);
defer self.base.allocator.free(buf);
mem.set(u8, buf[0..], 0);
try self.base.file.?.pwriteAll(buf, file_offset);
/// Saturating multiplication
fn satMul(a: anytype, b: anytype) @TypeOf(a, b) {
const T = @TypeOf(a, b);
return std.math.mul(T, a, b) catch std.math.maxInt(T);
}

View File

@ -15,7 +15,7 @@ pub fn analyze(
var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
defer table.deinit();
try table.ensureCapacity(body.instructions.len);
try table.ensureCapacity(@intCast(u32, body.instructions.len));
try analyzeWithTable(arena, &table, null, body);
}
@ -84,8 +84,11 @@ fn analyzeInst(
try analyzeWithTable(arena, table, &then_table, inst.then_body);
// Reset the table back to its state from before the branch.
for (then_table.items()) |entry| {
table.removeAssertDiscard(entry.key);
{
var it = then_table.iterator();
while (it.next()) |entry| {
table.removeAssertDiscard(entry.key);
}
}
var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
@ -97,28 +100,36 @@ fn analyzeInst(
var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer else_entry_deaths.deinit();
for (else_table.items()) |entry| {
const else_death = entry.key;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
{
var it = else_table.iterator();
while (it.next()) |entry| {
const else_death = entry.key;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
}
}
}
// This loop is the same, except it's for the then branch, and it additionally
// has to put its items back into the table to undo the reset.
for (then_table.items()) |entry| {
const then_death = entry.key;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
{
var it = then_table.iterator();
while (it.next()) |entry| {
const then_death = entry.key;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
}
_ = try table.put(then_death, {});
}
_ = try table.put(then_death, {});
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(ns.items().len + then_table.items().len + else_table.items().len);
for (then_table.items()) |entry| {
try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
var it = then_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
for (else_table.items()) |entry| {
it = else_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
}
}

View File

@ -839,7 +839,8 @@ fn fmtPathFile(
// As a heuristic, we make enough capacity for the same as the input source.
try fmt.out_buffer.ensureCapacity(source_code.len);
fmt.out_buffer.items.len = 0;
const anything_changed = try std.zig.render(fmt.gpa, fmt.out_buffer.writer(), tree);
const writer = fmt.out_buffer.writer();
const anything_changed = try std.zig.render(fmt.gpa, writer, tree);
if (!anything_changed)
return; // Good thing we didn't waste any file system access on this.

View File

@ -474,15 +474,15 @@ pub const TestContext = struct {
var all_errors = try module.getAllErrorsAlloc();
defer all_errors.deinit(allocator);
if (all_errors.list.len != 0) {
std.debug.warn("\nErrors occurred updating the module:\n================\n", .{});
std.debug.print("\nErrors occurred updating the module:\n================\n", .{});
for (all_errors.list) |err| {
std.debug.warn(":{}:{}: error: {}\n================\n", .{ err.line + 1, err.column + 1, err.msg });
std.debug.print(":{}:{}: error: {}\n================\n", .{ err.line + 1, err.column + 1, err.msg });
}
if (case.cbe) {
const C = module.bin_file.cast(link.File.C).?;
std.debug.warn("Generated C: \n===============\n{}\n\n===========\n\n", .{C.main.items});
std.debug.print("Generated C: \n===============\n{}\n\n===========\n\n", .{C.main.items});
}
std.debug.warn("Test failed.\n", .{});
std.debug.print("Test failed.\n", .{});
std.process.exit(1);
}
}
@ -497,12 +497,12 @@ pub const TestContext = struct {
var out = file.reader().readAllAlloc(arena, 1024 * 1024) catch @panic("Unable to read C output!");
if (expected_output.len != out.len) {
std.debug.warn("\nTransformed C length differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ expected_output, out });
std.debug.print("\nTransformed C length differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ expected_output, out });
std.process.exit(1);
}
for (expected_output) |e, i| {
if (out[i] != e) {
std.debug.warn("\nTransformed C differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ expected_output, out });
std.debug.print("\nTransformed C differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ expected_output, out });
std.process.exit(1);
}
}
@ -526,12 +526,12 @@ pub const TestContext = struct {
defer test_node.end();
if (expected_output.len != out_zir.items.len) {
std.debug.warn("{}\nTransformed ZIR length differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
std.debug.print("{}\nTransformed ZIR length differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
std.process.exit(1);
}
for (expected_output) |e, i| {
if (out_zir.items[i] != e) {
std.debug.warn("{}\nTransformed ZIR differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
std.debug.print("{}\nTransformed ZIR differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
std.process.exit(1);
}
}
@ -554,7 +554,7 @@ pub const TestContext = struct {
break;
}
} else {
std.debug.warn("{}\nUnexpected error:\n================\n:{}:{}: error: {}\n================\nTest failed.\n", .{ case.name, a.line + 1, a.column + 1, a.msg });
std.debug.print("{}\nUnexpected error:\n================\n:{}:{}: error: {}\n================\nTest failed.\n", .{ case.name, a.line + 1, a.column + 1, a.msg });
std.process.exit(1);
}
}
@ -562,7 +562,7 @@ pub const TestContext = struct {
for (handled_errors) |h, i| {
if (!h) {
const er = e[i];
std.debug.warn("{}\nDid not receive error:\n================\n{}:{}: {}\n================\nTest failed.\n", .{ case.name, er.line, er.column, er.msg });
std.debug.print("{}\nDid not receive error:\n================\n{}:{}: {}\n================\nTest failed.\n", .{ case.name, er.line, er.column, er.msg });
std.process.exit(1);
}
}
@ -643,7 +643,7 @@ pub const TestContext = struct {
switch (exec_result.term) {
.Exited => |code| {
if (code != 0) {
std.debug.warn("elf file exited with code {}\n", .{code});
std.debug.print("elf file exited with code {}\n", .{code});
return error.BinaryBadExitCode;
}
},

View File

@ -19,23 +19,9 @@ pub const Error = error{OutOfMemory};
const TypeError = Error || error{UnsupportedType};
const TransError = TypeError || error{UnsupportedTranslation};
const DeclTable = std.HashMap(usize, []const u8, addrHash, addrEql, false);
const DeclTable = std.AutoArrayHashMap(usize, []const u8);
fn addrHash(x: usize) u32 {
switch (@typeInfo(usize).Int.bits) {
32 => return x,
// pointers are usually aligned so we ignore the bits that are probably all 0 anyway
// usually the larger bits of addr space are unused so we just chop em off
64 => return @truncate(u32, x >> 4),
else => @compileError("unreachable"),
}
}
fn addrEql(a: usize, b: usize) bool {
return a == b;
}
const SymbolTable = std.StringHashMap(*ast.Node);
const SymbolTable = std.StringArrayHashMap(*ast.Node);
const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
@ -285,7 +271,7 @@ pub const Context = struct {
/// a list of names that we found by visiting all the top level decls without
/// translating them. The other maps are updated as we translate; this one is updated
/// up front in a pre-processing step.
global_names: std.StringHashMap(void),
global_names: std.StringArrayHashMap(void),
fn getMangle(c: *Context) u32 {
c.mangle_count += 1;
@ -380,7 +366,7 @@ pub fn translate(
.alias_list = AliasList.init(gpa),
.global_scope = try arena.allocator.create(Scope.Root),
.clang_context = ZigClangASTUnit_getASTContext(ast_unit).?,
.global_names = std.StringHashMap(void).init(gpa),
.global_names = std.StringArrayHashMap(void).init(gpa),
.token_ids = .{},
.token_locs = .{},
.errors = .{},
@ -6424,7 +6410,8 @@ fn getFnProto(c: *Context, ref: *ast.Node) ?*ast.Node.FnProto {
}
fn addMacros(c: *Context) !void {
for (c.global_scope.macro_table.items()) |kv| {
var it = c.global_scope.macro_table.iterator();
while (it.next()) |kv| {
if (getFnProto(c, kv.value)) |proto_node| {
// If a macro aliases a global variable which is a function pointer, we conclude that
// the macro is intended to represent a function that assumes the function pointer

View File

@ -163,7 +163,7 @@ pub const Type = extern union {
// Hot path for common case:
if (a.castPointer()) |a_payload| {
if (b.castPointer()) |b_payload| {
return eql(a_payload.pointee_type, b_payload.pointee_type);
return a.tag() == b.tag() and eql(a_payload.pointee_type, b_payload.pointee_type);
}
}
const is_slice_a = isSlice(a);
@ -189,10 +189,10 @@ pub const Type = extern union {
.Array => {
if (a.arrayLen() != b.arrayLen())
return false;
if (a.elemType().eql(b.elemType()))
if (!a.elemType().eql(b.elemType()))
return false;
const sentinel_a = a.arraySentinel();
const sentinel_b = b.arraySentinel();
const sentinel_a = a.sentinel();
const sentinel_b = b.sentinel();
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
return sa.eql(sb);
@ -238,7 +238,7 @@ pub const Type = extern union {
}
}
pub fn hash(self: Type) u32 {
pub fn hash(self: Type) u64 {
var hasher = std.hash.Wyhash.init(0);
const zig_type_tag = self.zigTypeTag();
std.hash.autoHash(&hasher, zig_type_tag);
@ -303,7 +303,7 @@ pub const Type = extern union {
// TODO implement more type hashing
},
}
return @truncate(u32, hasher.final());
return hasher.final();
}
pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
@ -501,9 +501,9 @@ pub const Type = extern union {
.noreturn,
=> return out_stream.writeAll(@tagName(t)),
.enum_literal => return out_stream.writeAll("@TypeOf(.EnumLiteral)"),
.@"null" => return out_stream.writeAll("@TypeOf(null)"),
.@"undefined" => return out_stream.writeAll("@TypeOf(undefined)"),
.enum_literal => return out_stream.writeAll("@Type(.EnumLiteral)"),
.@"null" => return out_stream.writeAll("@Type(.Null)"),
.@"undefined" => return out_stream.writeAll("@Type(.Undefined)"),
.@"anyframe" => return out_stream.writeAll("anyframe"),
.anyerror_void_error_union => return out_stream.writeAll("anyerror!void"),
@ -630,8 +630,8 @@ pub const Type = extern union {
const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
if (payload.sentinel) |some| switch (payload.size) {
.One, .C => unreachable,
.Many => try out_stream.writeAll("[*:{}]"),
.Slice => try out_stream.writeAll("[:{}]"),
.Many => try out_stream.print("[*:{}]", .{some}),
.Slice => try out_stream.print("[:{}]", .{some}),
} else switch (payload.size) {
.One => try out_stream.writeAll("*"),
.Many => try out_stream.writeAll("[*]"),
@ -1341,6 +1341,81 @@ pub const Type = extern union {
};
}
pub fn isAllowzeroPtr(self: Type) bool {
return switch (self.tag()) {
.u8,
.i8,
.u16,
.i16,
.u32,
.i32,
.u64,
.i64,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.c_void,
.bool,
.void,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.noreturn,
.@"null",
.@"undefined",
.array,
.array_sentinel,
.array_u8,
.array_u8_sentinel_0,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.function,
.int_unsigned,
.int_signed,
.single_mut_pointer,
.single_const_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.enum_literal,
.error_union,
.@"anyframe",
.anyframe_T,
.anyerror_void_error_union,
.error_set,
.error_set_single,
=> false,
.pointer => {
const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise);
return payload.@"allowzero";
},
};
}
/// Asserts that the type is an optional
pub fn isPtrLikeOptional(self: Type) bool {
switch (self.tag()) {
@ -1585,8 +1660,8 @@ pub const Type = extern union {
};
}
/// Asserts the type is an array or vector.
pub fn arraySentinel(self: Type) ?Value {
/// Asserts the type is an array, pointer or vector.
pub fn sentinel(self: Type) ?Value {
return switch (self.tag()) {
.u8,
.i8,
@ -1626,16 +1701,8 @@ pub const Type = extern union {
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.function,
.pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.int_unsigned,
.int_signed,
@ -1651,7 +1718,18 @@ pub const Type = extern union {
.error_set_single,
=> unreachable,
.array, .array_u8 => return null,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.single_const_pointer_to_comptime_int,
.array,
.array_u8,
=> return null,
.pointer => return self.cast(Payload.Pointer).?.sentinel,
.array_sentinel => return self.cast(Payload.ArraySentinel).?.sentinel,
.array_u8_sentinel_0 => return Value.initTag(.zero),
};

View File

@ -301,15 +301,15 @@ pub const Value = extern union {
.comptime_int_type => return out_stream.writeAll("comptime_int"),
.comptime_float_type => return out_stream.writeAll("comptime_float"),
.noreturn_type => return out_stream.writeAll("noreturn"),
.null_type => return out_stream.writeAll("@TypeOf(null)"),
.undefined_type => return out_stream.writeAll("@TypeOf(undefined)"),
.null_type => return out_stream.writeAll("@Type(.Null)"),
.undefined_type => return out_stream.writeAll("@Type(.Undefined)"),
.fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
.fn_void_no_args_type => return out_stream.writeAll("fn() void"),
.fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
.const_slice_u8_type => return out_stream.writeAll("[]const u8"),
.enum_literal_type => return out_stream.writeAll("@TypeOf(.EnumLiteral)"),
.enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"),
.anyframe_type => return out_stream.writeAll("anyframe"),
.null_value => return out_stream.writeAll("null"),
@ -358,7 +358,8 @@ pub const Value = extern union {
.error_set => {
const error_set = val.cast(Payload.ErrorSet).?;
try out_stream.writeAll("error{");
for (error_set.fields.items()) |entry| {
var it = error_set.fields.iterator();
while (it.next()) |entry| {
try out_stream.print("{},", .{entry.value});
}
return out_stream.writeAll("}");

View File

@ -78,6 +78,13 @@ pub const Inst = struct {
bitor,
/// A labeled block of code, which can return a value.
block,
/// A block of code, which can return a value. There are no instructions that break out of
/// this block; it is implied that the final instruction is the result.
block_flat,
/// Same as `block` but additionally makes the inner instructions execute at comptime.
block_comptime,
/// Same as `block_flat` but additionally makes the inner instructions execute at comptime.
block_comptime_flat,
/// Boolean NOT. See also `bitnot`.
boolnot,
/// Return a value from a `Block`.
@ -224,6 +231,10 @@ pub const Inst = struct {
const_slice_type,
/// Create a pointer type with attributes
ptr_type,
/// Slice operation `array_ptr[start..end:sentinel]`
slice,
/// Slice operation with just start `lhs[rhs..]`
slice_start,
/// Write a value to a pointer. For loading, see `deref`.
store,
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
@ -336,11 +347,17 @@ pub const Inst = struct {
.xor,
.error_union_type,
.merge_error_sets,
.slice_start,
=> BinOp,
.block,
.block_flat,
.block_comptime,
.block_comptime_flat,
=> Block,
.arg => Arg,
.array_type_sentinel => ArrayTypeSentinel,
.block => Block,
.@"break" => Break,
.breakvoid => BreakVoid,
.call => Call,
@ -368,6 +385,7 @@ pub const Inst = struct {
.ptr_type => PtrType,
.enum_literal => EnumLiteral,
.error_set => ErrorSet,
.slice => Slice,
};
}
@ -392,6 +410,9 @@ pub const Inst = struct {
.bitcast_result_ptr,
.bitor,
.block,
.block_flat,
.block_comptime,
.block_comptime_flat,
.boolnot,
.breakpoint,
.call,
@ -466,6 +487,8 @@ pub const Inst = struct {
.error_union_type,
.bitnot,
.error_set,
.slice,
.slice_start,
=> false,
.@"break",
@ -946,6 +969,20 @@ pub const Inst = struct {
},
kw_args: struct {},
};
pub const Slice = struct {
pub const base_tag = Tag.slice;
base: Inst,
positionals: struct {
array_ptr: *Inst,
start: *Inst,
},
kw_args: struct {
end: ?*Inst = null,
sentinel: ?*Inst = null,
},
};
};
pub const ErrorMsg = struct {
@ -1034,7 +1071,7 @@ pub const Module = struct {
defer write.loop_table.deinit();
// First, build a map of *Inst to @ or % indexes
try write.inst_table.ensureCapacity(self.decls.len);
try write.inst_table.ensureCapacity(@intCast(u32, self.decls.len));
for (self.decls) |decl, decl_i| {
try write.inst_table.putNoClobber(decl.inst, .{ .inst = decl.inst, .index = null, .name = decl.name });
@ -1670,7 +1707,7 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.arena = std.heap.ArenaAllocator.init(allocator),
.old_module = &old_module,
.next_auto_name = 0,
.names = std.StringHashMap(void).init(allocator),
.names = std.StringArrayHashMap(void).init(allocator),
.primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
@ -1743,7 +1780,7 @@ const EmitZIR = struct {
arena: std.heap.ArenaAllocator,
old_module: *const IrModule,
decls: std.ArrayListUnmanaged(*Decl),
names: std.StringHashMap(void),
names: std.StringArrayHashMap(void),
next_auto_name: usize,
primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Decl),
indent: usize,
@ -2559,7 +2596,7 @@ const EmitZIR = struct {
var len_pl = Value.Payload.Int_u64{ .int = ty.arrayLen() };
const len = Value.initPayload(&len_pl.base);
const inst = if (ty.arraySentinel()) |sentinel| blk: {
const inst = if (ty.sentinel()) |sentinel| blk: {
const inst = try self.arena.allocator.create(Inst.ArrayTypeSentinel);
inst.* = .{
.base = .{

View File

@ -31,7 +31,10 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.arg => return analyzeInstArg(mod, scope, old_inst.castTag(.arg).?),
.bitcast_ref => return analyzeInstBitCastRef(mod, scope, old_inst.castTag(.bitcast_ref).?),
.bitcast_result_ptr => return analyzeInstBitCastResultPtr(mod, scope, old_inst.castTag(.bitcast_result_ptr).?),
.block => return analyzeInstBlock(mod, scope, old_inst.castTag(.block).?),
.block => return analyzeInstBlock(mod, scope, old_inst.castTag(.block).?, false),
.block_comptime => return analyzeInstBlock(mod, scope, old_inst.castTag(.block_comptime).?, true),
.block_flat => return analyzeInstBlockFlat(mod, scope, old_inst.castTag(.block_flat).?, false),
.block_comptime_flat => return analyzeInstBlockFlat(mod, scope, old_inst.castTag(.block_comptime_flat).?, true),
.@"break" => return analyzeInstBreak(mod, scope, old_inst.castTag(.@"break").?),
.breakpoint => return analyzeInstBreakpoint(mod, scope, old_inst.castTag(.breakpoint).?),
.breakvoid => return analyzeInstBreakVoid(mod, scope, old_inst.castTag(.breakvoid).?),
@ -129,6 +132,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.error_union_type => return analyzeInstErrorUnionType(mod, scope, old_inst.castTag(.error_union_type).?),
.anyframe_type => return analyzeInstAnyframeType(mod, scope, old_inst.castTag(.anyframe_type).?),
.error_set => return analyzeInstErrorSet(mod, scope, old_inst.castTag(.error_set).?),
.slice => return analyzeInstSlice(mod, scope, old_inst.castTag(.slice).?),
.slice_start => return analyzeInstSliceStart(mod, scope, old_inst.castTag(.slice_start).?),
}
}
@ -147,17 +152,16 @@ pub fn analyzeBody(mod: *Module, scope: *Scope, body: zir.Module.Body) !void {
}
}
pub fn analyzeBodyValueAsType(mod: *Module, block_scope: *Scope.Block, body: zir.Module.Body) !Type {
pub fn analyzeBodyValueAsType(
mod: *Module,
block_scope: *Scope.Block,
zir_result_inst: *zir.Inst,
body: zir.Module.Body,
) !Type {
try analyzeBody(mod, &block_scope.base, body);
for (block_scope.instructions.items) |inst| {
if (inst.castTag(.ret)) |ret| {
const val = try mod.resolveConstValue(&block_scope.base, ret.operand);
return val.toType(block_scope.base.arena());
} else {
return mod.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
}
}
unreachable;
const result_inst = zir_result_inst.analyzed_inst.?;
const val = try mod.resolveConstValue(&block_scope.base, result_inst);
return val.toType(block_scope.base.arena());
}
pub fn analyzeZirDecl(mod: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bool {
@ -362,7 +366,7 @@ fn analyzeInstRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!
}
fn analyzeInstRetType(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst {
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
const b = try mod.requireFunctionBlock(scope, inst.base.src);
const fn_ty = b.func.?.owner_decl.typed_value.most_recent.typed_value.ty;
const ret_type = fn_ty.fnReturnType();
return mod.constType(scope, inst.base.src, ret_type);
@ -517,6 +521,7 @@ fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
.is_comptime = parent_block.is_comptime,
};
defer child_block.instructions.deinit(mod.gpa);
@ -529,7 +534,29 @@ fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError
return &loop_inst.base;
}
fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
fn analyzeInstBlockFlat(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst {
const parent_block = scope.cast(Scope.Block).?;
var child_block: Scope.Block = .{
.parent = parent_block,
.func = parent_block.func,
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
.label = null,
.is_comptime = parent_block.is_comptime or is_comptime,
};
defer child_block.instructions.deinit(mod.gpa);
try analyzeBody(mod, &child_block.base, inst.positionals.body);
const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items);
try parent_block.instructions.appendSlice(mod.gpa, copied_instructions);
return copied_instructions[copied_instructions.len - 1];
}
fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block, is_comptime: bool) InnerError!*Inst {
const parent_block = scope.cast(Scope.Block).?;
// Reserve space for a Block instruction so that generated Break instructions can
@ -557,6 +584,7 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerErr
.results = .{},
.block_inst = block_inst,
}),
.is_comptime = is_comptime or parent_block.is_comptime,
};
const label = &child_block.label.?;
@ -569,6 +597,28 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerErr
assert(child_block.instructions.items.len != 0);
assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn());
if (label.results.items.len == 0) {
// No need for a block instruction. We can put the new instructions directly into the parent block.
const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items);
try parent_block.instructions.appendSlice(mod.gpa, copied_instructions);
return copied_instructions[copied_instructions.len - 1];
}
if (label.results.items.len == 1) {
const last_inst_index = child_block.instructions.items.len - 1;
const last_inst = child_block.instructions.items[last_inst_index];
if (last_inst.breakBlock()) |br_block| {
if (br_block == block_inst) {
// No need for a block instruction. We can put the new instructions directly into the parent block.
// Here we omit the break instruction.
const copied_instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]);
try parent_block.instructions.appendSlice(mod.gpa, copied_instructions);
return label.results.items[0];
}
}
}
// It should be impossible to have the number of results be > 1 in a comptime scope.
assert(!child_block.is_comptime); // We should have already got a compile error in the condbr condition.
// Need to set the type and emit the Block instruction. This allows machine code generation
// to emit a jump instruction to after the block when it encounters the break.
try parent_block.instructions.append(mod.gpa, &block_inst.base);
@ -595,8 +645,12 @@ fn analyzeInstBreakVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.BreakVoid)
}
fn analyzeInstDbgStmt(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst {
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
return mod.addNoOp(b, inst.base.src, Type.initTag(.void), .dbg_stmt);
if (scope.cast(Scope.Block)) |b| {
if (!b.is_comptime) {
return mod.addNoOp(b, inst.base.src, Type.initTag(.void), .dbg_stmt);
}
}
return mod.constVoid(scope, inst.base.src);
}
fn analyzeInstDeclRefStr(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr) InnerError!*Inst {
@ -764,7 +818,7 @@ fn analyzeInstErrorSet(mod: *Module, scope: *Scope, inst: *zir.Inst.ErrorSet) In
.fields = .{},
.decl = undefined, // populated below
};
try payload.fields.ensureCapacity(&new_decl_arena.allocator, inst.positionals.fields.len);
try payload.fields.ensureCapacity(&new_decl_arena.allocator, @intCast(u32, inst.positionals.fields.len));
for (inst.positionals.fields) |field_name| {
const entry = try mod.getErrorValue(field_name);
@ -1083,7 +1137,7 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne
const array_ptr = try resolveInst(mod, scope, inst.positionals.array_ptr);
const uncasted_index = try resolveInst(mod, scope, inst.positionals.index);
const elem_index = try mod.coerce(scope, Type.initTag(.usize), uncasted_index);
const elem_ty = switch (array_ptr.ty.zigTypeTag()) {
.Pointer => array_ptr.ty.elemType(),
else => return mod.fail(scope, inst.positionals.array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}),
@ -1120,6 +1174,22 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne
return mod.fail(scope, inst.base.src, "TODO implement more analyze elemptr", .{});
}
fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst {
const array_ptr = try resolveInst(mod, scope, inst.positionals.array_ptr);
const start = try resolveInst(mod, scope, inst.positionals.start);
const end = if (inst.kw_args.end) |end| try resolveInst(mod, scope, end) else null;
const sentinel = if (inst.kw_args.sentinel) |sentinel| try resolveInst(mod, scope, sentinel) else null;
return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, end, sentinel);
}
fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst {
const array_ptr = try resolveInst(mod, scope, inst.positionals.lhs);
const start = try resolveInst(mod, scope, inst.positionals.rhs);
return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, null, null);
}
fn analyzeInstShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst {
return mod.fail(scope, inst.base.src, "TODO implement analyzeInstShl", .{});
}
@ -1187,6 +1257,12 @@ fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn
if (casted_lhs.value()) |lhs_val| {
if (casted_rhs.value()) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return mod.constInst(scope, inst.base.src, .{
.ty = resolved_type,
.val = Value.initTag(.undef),
});
}
return analyzeInstComptimeOp(mod, scope, scalar_type, inst, lhs_val, rhs_val);
}
}
@ -1376,6 +1452,7 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
.is_comptime = parent_block.is_comptime,
};
defer true_block.instructions.deinit(mod.gpa);
try analyzeBody(mod, &true_block.base, inst.positionals.then_body);
@ -1386,6 +1463,7 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
.is_comptime = parent_block.is_comptime,
};
defer false_block.instructions.deinit(mod.gpa);
try analyzeBody(mod, &false_block.base, inst.positionals.else_body);

View File

@ -15342,9 +15342,14 @@ static IrInstGen *ir_analyze_cast(IrAnalyze *ira, IrInst *source_instr,
ZigType *array_type = actual_type->data.pointer.child_type;
bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0
|| !actual_type->data.pointer.is_const);
if (const_ok && types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
array_type->data.array.child_type, source_node,
!slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
!slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk &&
(slice_ptr_type->data.pointer.sentinel == nullptr ||
(array_type->data.array.sentinel != nullptr &&
const_values_equal(ira->codegen, array_type->data.array.sentinel,
slice_ptr_type->data.pointer.sentinel))))
{
// If the pointers both have ABI align, it works.
// Or if the array length is 0, alignment doesn't matter.
@ -25684,6 +25689,10 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
set_optional_payload(inner_fields[2], struct_field->init_val);
inner_fields[3]->special = ConstValSpecialStatic;
inner_fields[3]->type = ira->codegen->builtin_types.entry_bool;
inner_fields[3]->data.x_bool = struct_field->is_comptime;
ZigValue *name = create_const_str_lit(ira->codegen, struct_field->name)->data.x_ptr.data.ref.pointee;
init_const_slice(ira->codegen, inner_fields[0], name, 0, buf_len(struct_field->name), true);
@ -26292,6 +26301,8 @@ static ZigType *type_info_to_type(IrAnalyze *ira, IrInst *source_instr, ZigTypeI
buf_ptr(&field->type_entry->name), buf_ptr(&field->type_entry->name)));
return ira->codegen->invalid_inst_gen->value->type;
}
if ((err = get_const_field_bool(ira, source_instr->source_node, field_value, "is_comptime", 3, &field->is_comptime)))
return ira->codegen->invalid_inst_gen->value->type;
}
return entry;

View File

@ -2,6 +2,14 @@ const tests = @import("tests.zig");
const std = @import("std");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add("slice sentinel mismatch",
\\export fn entry() void {
\\ const y: [:1]const u8 = &[_:2]u8{ 1, 2 };
\\}
, &[_][]const u8{
"tmp.zig:2:37: error: expected type '[:1]const u8', found '*const [2:2]u8'",
});
cases.add("@Type with undefined",
\\comptime {
\\ _ = @Type(.{ .Array = .{ .len = 0, .child = u8, .sentinel = undefined } });

View File

@ -418,3 +418,9 @@ test "Struct.is_tuple" {
expect(@typeInfo(@TypeOf(.{0})).Struct.is_tuple);
expect(!@typeInfo(@TypeOf(.{ .a = 0 })).Struct.is_tuple);
}
test "StructField.is_comptime" {
const info = @typeInfo(struct { x: u8 = 3, comptime y: u32 = 5 }).Struct;
expect(!info.fields[0].is_comptime);
expect(info.fields[1].is_comptime);
}

View File

@ -274,7 +274,7 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
var case = ctx.exe("substracting numbers at runtime", linux_x64);
var case = ctx.exe("subtracting numbers at runtime", linux_x64);
case.addCompareOutput(
\\export fn _start() noreturn {
\\ sub(7, 4);
@ -967,10 +967,19 @@ pub fn addCases(ctx: *TestContext) !void {
\\fn entry() void {}
, &[_][]const u8{":2:4: error: redefinition of 'entry'"});
ctx.compileError("extern variable has no type", linux_x64,
\\comptime {
\\ _ = foo;
\\}
\\extern var foo;
, &[_][]const u8{":4:1: error: unable to infer variable type"});
{
var case = ctx.obj("extern variable has no type", linux_x64);
case.addError(
\\comptime {
\\ _ = foo;
\\}
\\extern var foo: i32;
, &[_][]const u8{":2:9: error: unable to resolve comptime value"});
case.addError(
\\export fn entry() void {
\\ _ = foo;
\\}
\\extern var foo;
, &[_][]const u8{":4:1: error: unable to infer variable type"});
}
}