Merge remote-tracking branch 'origin/master' into llvm10

master
Andrew Kelley 2020-02-25 16:30:40 -05:00
commit f33bf48af7
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
171 changed files with 7252 additions and 7618 deletions

View File

@ -240,8 +240,8 @@ find_package(Threads)
# CMake doesn't let us create an empty executable, so we hang on to this one separately.
set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp")
# This is our shim which will be replaced by libuserland written in Zig.
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/userland.cpp")
# This is our shim which will be replaced by libstage2 written in Zig.
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/stage2.cpp")
if(ZIG_ENABLE_MEM_PROFILE)
set(ZIG_SOURCES_MEM_PROFILE "${CMAKE_SOURCE_DIR}/src/mem_profile.cpp")
@ -263,7 +263,6 @@ set(ZIG_SOURCES
"${CMAKE_SOURCE_DIR}/src/heap.cpp"
"${CMAKE_SOURCE_DIR}/src/ir.cpp"
"${CMAKE_SOURCE_DIR}/src/ir_print.cpp"
"${CMAKE_SOURCE_DIR}/src/libc_installation.cpp"
"${CMAKE_SOURCE_DIR}/src/link.cpp"
"${CMAKE_SOURCE_DIR}/src/mem.cpp"
"${CMAKE_SOURCE_DIR}/src/os.cpp"
@ -377,27 +376,27 @@ set_target_properties(opt_c_util PROPERTIES
COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}"
)
add_library(compiler STATIC ${ZIG_SOURCES})
set_target_properties(compiler PROPERTIES
add_library(zigcompiler STATIC ${ZIG_SOURCES})
set_target_properties(zigcompiler PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
target_link_libraries(compiler LINK_PUBLIC
target_link_libraries(zigcompiler LINK_PUBLIC
zig_cpp
opt_c_util
${SOFTFLOAT_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
)
if(NOT MSVC)
target_link_libraries(compiler LINK_PUBLIC ${LIBXML2})
target_link_libraries(zigcompiler LINK_PUBLIC ${LIBXML2})
endif()
if(ZIG_DIA_GUIDS_LIB)
target_link_libraries(compiler LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB})
target_link_libraries(zigcompiler LINK_PUBLIC ${ZIG_DIA_GUIDS_LIB})
endif()
if(MSVC OR MINGW)
target_link_libraries(compiler LINK_PUBLIC version)
target_link_libraries(zigcompiler LINK_PUBLIC version)
endif()
add_executable(zig0 "${ZIG_MAIN_SRC}" "${ZIG0_SHIM_SRC}")
@ -405,40 +404,43 @@ set_target_properties(zig0 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
target_link_libraries(zig0 compiler)
target_link_libraries(zig0 zigcompiler)
if(MSVC)
set(LIBUSERLAND "${CMAKE_BINARY_DIR}/userland.lib")
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/zigstage2.lib")
else()
set(LIBUSERLAND "${CMAKE_BINARY_DIR}/libuserland.a")
set(LIBSTAGE2 "${CMAKE_BINARY_DIR}/libzigstage2.a")
endif()
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(LIBUSERLAND_RELEASE_MODE "false")
set(LIBSTAGE2_RELEASE_ARG "")
else()
set(LIBUSERLAND_RELEASE_MODE "true")
set(LIBSTAGE2_RELEASE_ARG --release-fast --strip)
endif()
if(WIN32)
set(LIBSTAGE2_WINDOWS_ARGS "-lntdll")
else()
set(LIBSTAGE2_WINDOWS_ARGS "")
endif()
set(BUILD_LIBUSERLAND_ARGS "build"
set(BUILD_LIBSTAGE2_ARGS "build-lib"
"src-self-hosted/stage2.zig"
-mcpu=baseline
--name zigstage2
--override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
"-Doutput-dir=${CMAKE_BINARY_DIR}"
"-Drelease=${LIBUSERLAND_RELEASE_MODE}"
"-Dlib-files-only"
--prefix "${CMAKE_INSTALL_PREFIX}"
libuserland
--cache on
--output-dir "${CMAKE_BINARY_DIR}"
${LIBSTAGE2_RELEASE_ARG}
--disable-gen-h
--bundle-compiler-rt
-fPIC
-lc
${LIBSTAGE2_WINDOWS_ARGS}
)
# When using Visual Studio build system generator we default to libuserland install.
if(MSVC)
set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL "Disable copying lib/ files to install prefix")
if(NOT ZIG_SKIP_INSTALL_LIB_FILES)
set(BUILD_LIBUSERLAND_ARGS ${BUILD_LIBUSERLAND_ARGS} install)
endif()
endif()
add_custom_target(zig_build_libuserland ALL
COMMAND zig0 ${BUILD_LIBUSERLAND_ARGS}
add_custom_target(zig_build_libstage2 ALL
COMMAND zig0 ${BUILD_LIBSTAGE2_ARGS}
DEPENDS zig0
BYPRODUCTS "${LIBUSERLAND}"
BYPRODUCTS "${LIBSTAGE2}"
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
)
add_executable(zig "${ZIG_MAIN_SRC}")
@ -447,22 +449,40 @@ set_target_properties(zig PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
target_link_libraries(zig compiler "${LIBUSERLAND}")
target_link_libraries(zig zigcompiler "${LIBSTAGE2}")
if(MSVC)
target_link_libraries(zig ntdll.lib)
elseif(MINGW)
target_link_libraries(zig ntdll)
endif()
add_dependencies(zig zig_build_libuserland)
add_dependencies(zig zig_build_libstage2)
install(TARGETS zig DESTINATION bin)
# CODE has no effect with Visual Studio build system generator.
if(NOT MSVC)
get_target_property(zig0_BINARY_DIR zig0 BINARY_DIR)
install(CODE "set(zig0_EXE \"${zig0_BINARY_DIR}/zig0\")")
install(CODE "set(INSTALL_LIBUSERLAND_ARGS \"${BUILD_LIBUSERLAND_ARGS}\" install)")
install(CODE "set(BUILD_LIBUSERLAND_ARGS \"${BUILD_LIBUSERLAND_ARGS}\")")
set(ZIG_INSTALL_ARGS "build"
--override-lib-dir "${CMAKE_SOURCE_DIR}/lib"
"-Dlib-files-only"
--prefix "${CMAKE_INSTALL_PREFIX}"
install
)
# CODE has no effect with Visual Studio build system generator, therefore
# when using Visual Studio build system generator we resort to running
# `zig build install` during the build phase.
if(MSVC)
set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL
"Windows-only: Disable copying lib/ files to install prefix during the build phase")
if(NOT ZIG_SKIP_INSTALL_LIB_FILES)
add_custom_target(zig_install_lib_files ALL
COMMAND zig ${ZIG_INSTALL_ARGS}
DEPENDS zig
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
)
endif()
else()
get_target_property(zig_BINARY_DIR zig BINARY_DIR)
install(CODE "set(zig_EXE \"${zig_BINARY_DIR}/zig\")")
install(CODE "set(ZIG_INSTALL_ARGS \"${ZIG_INSTALL_ARGS}\")")
install(CODE "set(CMAKE_SOURCE_DIR \"${CMAKE_SOURCE_DIR}\")")
install(SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/cmake/install.cmake)
endif()

View File

@ -65,8 +65,6 @@ pub fn build(b: *Builder) !void {
try configureStage2(b, test_stage2, ctx);
try configureStage2(b, exe, ctx);
addLibUserlandStep(b, mode);
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
const skip_release_fast = b.option(bool, "skip-release-fast", "Main test suite skips release-fast builds") orelse skip_release;
@ -176,7 +174,7 @@ fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void {
}
fn fileExists(filename: []const u8) !bool {
fs.File.access(filename) catch |err| switch (err) {
fs.cwd().access(filename, .{}) catch |err| switch (err) {
error.FileNotFound => return false,
else => return err,
};
@ -379,28 +377,3 @@ const Context = struct {
dia_guids_lib: []const u8,
llvm: LibraryDep,
};
fn addLibUserlandStep(b: *Builder, mode: builtin.Mode) void {
const artifact = b.addStaticLibrary("userland", "src-self-hosted/stage1.zig");
artifact.disable_gen_h = true;
artifact.bundle_compiler_rt = true;
artifact.setTarget(builtin.arch, builtin.os, builtin.abi);
artifact.setBuildMode(mode);
artifact.force_pic = true;
if (mode != .Debug) {
artifact.strip = true;
}
artifact.linkSystemLibrary("c");
if (builtin.os == .windows) {
artifact.linkSystemLibrary("ntdll");
}
const libuserland_step = b.step("libuserland", "Build the userland compiler library for use in stage1");
libuserland_step.dependOn(&artifact.step);
const output_dir = b.option(
[]const u8,
"output-dir",
"For libuserland step, where to put the output",
) orelse return;
artifact.setOutputDir(output_dir);
}

View File

@ -34,38 +34,21 @@ release/bin/zig build test-behavior
# release/bin/zig build test-std
release/bin/zig build test-compiler-rt
# This test is disabled because it triggers "out of memory" on the sr.ht CI service.
# See https://github.com/ziglang/zig/issues/3210
# release/bin/zig build test-compare-output
# This test is disabled because it triggers "out of memory" on the sr.ht CI service.
# See https://github.com/ziglang/zig/issues/3210
# release/bin/zig build test-standalone
release/bin/zig build test-compare-output
release/bin/zig build test-standalone
release/bin/zig build test-stack-traces
release/bin/zig build test-cli
release/bin/zig build test-asm-link
release/bin/zig build test-runtime-safety
# This test is disabled because it triggers "out of memory" on the sr.ht CI service.
# See https://github.com/ziglang/zig/issues/3210
# release/bin/zig build test-translate-c
release/bin/zig build test-translate-c
release/bin/zig build test-run-translated-c
release/bin/zig build test-gen-h
# This test is disabled because it triggers "out of memory" on the sr.ht CI service.
# See https://github.com/ziglang/zig/issues/3210
# release/bin/zig build test-compile-errors
# This test is disabled because it triggers "out of memory" on the sr.ht CI service.
# See https://github.com/ziglang/zig/issues/3210
# release/bin/zig build docs
release/bin/zig build test-compile-errors
release/bin/zig build docs
if [ -f ~/.s3cfg ]; then
mv ../LICENSE release/
# Enable when `release/bin/zig build docs` passes without "out of memory" or failures
#mv ../zig-cache/langref.html release/
mv ../zig-cache/langref.html release/
mv release/bin/zig release/
rmdir release/bin

View File

@ -1,16 +1,16 @@
message("-- Installing: ${CMAKE_INSTALL_PREFIX}/lib")
if(NOT EXISTS ${zig0_EXE})
if(NOT EXISTS ${zig_EXE})
message("::")
message(":: ERROR: Executable not found")
message(":: (execute_process)")
message("::")
message(":: executable: ${zig0_EXE}")
message(":: executable: ${zig_EXE}")
message("::")
message(FATAL_ERROR)
endif()
execute_process(COMMAND ${zig0_EXE} ${INSTALL_LIBUSERLAND_ARGS}
execute_process(COMMAND ${zig_EXE} ${ZIG_INSTALL_ARGS}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
RESULT_VARIABLE _result
)
@ -19,11 +19,11 @@ if(_result)
message(":: ERROR: ${_result}")
message(":: (execute_process)")
string(REPLACE ";" " " s_INSTALL_LIBUSERLAND_ARGS "${INSTALL_LIBUSERLAND_ARGS}")
string(REPLACE ";" " " s_INSTALL_LIBSTAGE2_ARGS "${ZIG_INSTALL_ARGS}")
message("::")
message(":: argv: ${zig0_EXE} ${s_INSTALL_LIBUSERLAND_ARGS} install")
message(":: argv: ${zig_EXE} ${s_INSTALL_LIBSTAGE2_ARGS}")
set(_args ${zig0_EXE} ${INSTALL_LIBUSERLAND_ARGS})
set(_args ${zig_EXE} ${ZIG_INSTALL_ARGS})
list(LENGTH _args _len)
math(EXPR _len "${_len} - 1")
message("::")

View File

@ -550,7 +550,7 @@ pub fn main() void {
{#syntax#}i7{#endsyntax#} refers to a signed 7-bit integer. The maximum allowed bit-width of an
integer type is {#syntax#}65535{#endsyntax#}.
</p>
{#see_also|Integers|Floats|void|Errors|@IntType#}
{#see_also|Integers|Floats|void|Errors|@Type#}
{#header_close#}
{#header_open|Primitive Values#}
<div class="table-wrapper">
@ -2025,7 +2025,8 @@ test "volatile" {
conversions are not possible.
</p>
{#code_begin|test#}
const assert = @import("std").debug.assert;
const std = @import("std");
const assert = std.debug.assert;
test "pointer casting" {
const bytes align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12 };
@ -2034,7 +2035,7 @@ test "pointer casting" {
// Even this example is contrived - there are better ways to do the above than
// pointer casting. For example, using a slice narrowing cast:
const u32_value = @bytesToSlice(u32, bytes[0..])[0];
const u32_value = std.mem.bytesAsSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
// And even another way, the most straightforward way to do it:
@ -2114,16 +2115,16 @@ test "function alignment" {
{#link|safety check|Incorrect Pointer Alignment#}:
</p>
{#code_begin|test_safety|incorrect alignment#}
const assert = @import("std").debug.assert;
const std = @import("std");
test "pointer alignment safety" {
var array align(4) = [_]u32{ 0x11111111, 0x11111111 };
const bytes = @sliceToBytes(array[0..]);
assert(foo(bytes) == 0x11111111);
const bytes = std.mem.sliceAsBytes(array[0..]);
std.debug.assert(foo(bytes) == 0x11111111);
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}
@ -2249,7 +2250,7 @@ test "slice widening" {
// Zig supports slice widening and slice narrowing. Cast a slice of u8
// to a slice of anything else, and Zig will perform the length conversion.
const array align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13 };
const slice = @bytesToSlice(u32, array[0..]);
const slice = mem.bytesAsSlice(u32, array[0..]);
assert(slice.len == 2);
assert(slice[0] == 0x12121212);
assert(slice[1] == 0x13131313);
@ -2809,14 +2810,10 @@ test "@TagType" {
assert(@TagType(Small) == u2);
}
// @memberCount tells how many fields an enum has:
test "@memberCount" {
assert(@memberCount(Small) == 4);
}
// @memberName tells the name of a field in an enum:
test "@memberName" {
assert(mem.eql(u8, @memberName(Small, 1), "Two"));
// @typeInfo tells us the field count and the fields names:
test "@typeInfo" {
assert(@typeInfo(Small).Enum.fields.len == 4);
assert(mem.eql(u8, @typeInfo(Small).Enum.fields[1].name, "Two"));
}
// @tagName gives a []const u8 representation of an enum value:
@ -2824,7 +2821,7 @@ test "@tagName" {
assert(mem.eql(u8, @tagName(Small.Three), "Three"));
}
{#code_end#}
{#see_also|@memberName|@memberCount|@tagName|@sizeOf#}
{#see_also|@typeInfo|@tagName|@sizeOf#}
{#header_open|extern enum#}
<p>
@ -5186,7 +5183,6 @@ test "coercion of zero bit types" {
<li>{#link|@bitCast#} - change type but maintain bit representation</li>
<li>{#link|@alignCast#} - make a pointer have more alignment</li>
<li>{#link|@boolToInt#} - convert true to 1 and false to 0</li>
<li>{#link|@bytesToSlice#} - convert a slice of bytes to a slice of another type</li>
<li>{#link|@enumToInt#} - obtain the integer tag value of an enum or tagged union</li>
<li>{#link|@errSetCast#} - convert to a smaller error set</li>
<li>{#link|@errorToInt#} - obtain the integer value of an error code</li>
@ -5199,7 +5195,6 @@ test "coercion of zero bit types" {
<li>{#link|@intToPtr#} - convert an address to a pointer</li>
<li>{#link|@ptrCast#} - convert between pointer types</li>
<li>{#link|@ptrToInt#} - obtain the address of a pointer</li>
<li>{#link|@sliceToBytes#} - convert a slice of anything to a slice of bytes</li>
<li>{#link|@truncate#} - convert between integer types, chopping off bits</li>
</ul>
{#header_close#}
@ -6672,18 +6667,6 @@ comptime {
</p>
{#see_also|Alignment#}
{#header_close#}
{#header_open|@ArgType#}
<pre>{#syntax#}@ArgType(comptime T: type, comptime n: usize) type{#endsyntax#}</pre>
<p>
This builtin function takes a function type and returns the type of the parameter at index {#syntax#}n{#endsyntax#}.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a function type.
</p>
<p>
Note: This function is deprecated. Use {#link|@typeInfo#} instead.
</p>
{#header_close#}
{#header_open|@as#}
<pre>{#syntax#}@as(comptime T: type, expression) T{#endsyntax#}</pre>
@ -6817,7 +6800,7 @@ async fn func(y: *i32) void {
Asserts that {#syntax#}@sizeOf(@TypeOf(value)) == @sizeOf(DestType){#endsyntax#}.
</p>
<p>
Asserts that {#syntax#}@typeId(DestType) != @import("builtin").TypeId.Pointer{#endsyntax#}. Use {#syntax#}@ptrCast{#endsyntax#} or {#syntax#}@intToPtr{#endsyntax#} if you need this.
Asserts that {#syntax#}@typeInfo(DestType) != .Pointer{#endsyntax#}. Use {#syntax#}@ptrCast{#endsyntax#} or {#syntax#}@intToPtr{#endsyntax#} if you need this.
</p>
<p>
Can be used for these things for example:
@ -6929,18 +6912,6 @@ async fn func(y: *i32) void {
{#see_also|@bitOffsetOf#}
{#header_close#}
{#header_open|@bytesToSlice#}
<pre>{#syntax#}@bytesToSlice(comptime Element: type, bytes: []u8) []Element{#endsyntax#}</pre>
<p>
Converts a slice of bytes or array of bytes into a slice of {#syntax#}Element{#endsyntax#}.
The resulting slice has the same {#link|pointer|Pointers#} properties as the parameter.
</p>
<p>
Attempting to convert a number of bytes with a length that does not evenly divide into a slice of
elements results in safety-protected {#link|Undefined Behavior#}.
</p>
{#header_close#}
{#header_open|@call#}
<pre>{#syntax#}@call(options: std.builtin.CallOptions, function: var, args: var) var{#endsyntax#}</pre>
<p>
@ -7248,7 +7219,7 @@ test "main" {
<p>
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as {#syntax#}numerator / denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator != 0{#endsyntax#} and
{#syntax#}!(@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
{#syntax#}!(@typeInfo(T) == .Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
</p>
<ul>
<li>{#syntax#}@divFloor(-5, 3) == -2{#endsyntax#}</li>
@ -7262,7 +7233,7 @@ test "main" {
<p>
Truncated division. Rounds toward zero. For unsigned integers it is
the same as {#syntax#}numerator / denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator != 0{#endsyntax#} and
{#syntax#}!(@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
{#syntax#}!(@typeInfo(T) == .Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
</p>
<ul>
<li>{#syntax#}@divTrunc(-5, 3) == -1{#endsyntax#}</li>
@ -7320,7 +7291,7 @@ test "main" {
{#header_close#}
{#header_open|@errorToInt#}
<pre>{#syntax#}@errorToInt(err: var) @IntType(false, @sizeOf(anyerror) * 8){#endsyntax#}</pre>
<pre>{#syntax#}@errorToInt(err: var) std.meta.IntType(false, @sizeOf(anyerror) * 8){#endsyntax#}</pre>
<p>
Supports the following types:
</p>
@ -7365,7 +7336,7 @@ comptime {
@export(internalName, .{ .name = "foo", .linkage = .Strong });
}
extern fn internalName() void {}
fn internalName() callconv(.C) void {}
{#code_end#}
<p>This is equivalent to:</p>
{#code_begin|obj#}
@ -7614,7 +7585,7 @@ test "@hasDecl" {
{#header_close#}
{#header_open|@intToError#}
<pre>{#syntax#}@intToError(value: @IntType(false, @sizeOf(anyerror) * 8)) anyerror{#endsyntax#}</pre>
<pre>{#syntax#}@intToError(value: std.meta.IntType(false, @sizeOf(anyerror) * 8)) anyerror{#endsyntax#}</pre>
<p>
Converts from the integer representation of an error into {#link|The Global Error Set#} type.
</p>
@ -7647,44 +7618,6 @@ test "@hasDecl" {
</p>
{#header_close#}
{#header_open|@IntType#}
<pre>{#syntax#}@IntType(comptime is_signed: bool, comptime bit_count: u16) type{#endsyntax#}</pre>
<p>
This function returns an integer type with the given signness and bit count. The maximum
bit count for an integer type is {#syntax#}65535{#endsyntax#}.
</p>
<p>
Deprecated. Use {#link|@Type#}.
</p>
{#header_close#}
{#header_open|@memberCount#}
<pre>{#syntax#}@memberCount(comptime T: type) comptime_int{#endsyntax#}</pre>
<p>
This function returns the number of members in a struct, enum, or union type.
</p>
<p>
The result is a compile time constant.
</p>
<p>
It does not include functions, variables, or constants.
</p>
{#header_close#}
{#header_open|@memberName#}
<pre>{#syntax#}@memberName(comptime T: type, comptime index: usize) [N]u8{#endsyntax#}</pre>
<p>Returns the field name of a struct, union, or enum.</p>
<p>
The result is a compile time constant.
</p>
<p>
It does not include functions, variables, or constants.
</p>
{#header_close#}
{#header_open|@memberType#}
<pre>{#syntax#}@memberType(comptime T: type, comptime index: usize) type{#endsyntax#}</pre>
<p>Returns the field type of a struct or union.</p>
{#header_close#}
{#header_open|@memcpy#}
<pre>{#syntax#}@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize){#endsyntax#}</pre>
<p>
@ -8067,14 +8000,6 @@ test "@setRuntimeSafety" {
{#see_also|@bitSizeOf|@typeInfo#}
{#header_close#}
{#header_open|@sliceToBytes#}
<pre>{#syntax#}@sliceToBytes(value: var) []u8{#endsyntax#}</pre>
<p>
Converts a slice or array to a slice of {#syntax#}u8{#endsyntax#}. The resulting slice has the same
{#link|pointer|Pointers#} properties as the parameter.
</p>
{#header_close#}
{#header_open|@splat#}
<pre>{#syntax#}@splat(comptime len: u32, scalar: var) @Vector(len, @TypeOf(scalar)){#endsyntax#}</pre>
<p>
@ -8388,43 +8313,6 @@ test "integer truncation" {
<li>{#link|struct#}</li>
</ul>
{#header_close#}
{#header_open|@typeId#}
<pre>{#syntax#}@typeId(comptime T: type) @import("builtin").TypeId{#endsyntax#}</pre>
<p>
Returns which kind of type something is. Possible values:
</p>
{#code_begin|syntax#}
pub const TypeId = enum {
Type,
Void,
Bool,
NoReturn,
Int,
Float,
Pointer,
Array,
Struct,
ComptimeFloat,
ComptimeInt,
Undefined,
Null,
Optional,
ErrorUnion,
ErrorSet,
Enum,
Union,
Fn,
BoundFn,
Opaque,
Frame,
AnyFrame,
Vector,
EnumLiteral,
};
{#code_end#}
{#header_close#}
{#header_open|@typeInfo#}
<pre>{#syntax#}@typeInfo(comptime T: type) @import("std").builtin.TypeInfo{#endsyntax#}</pre>
<p>
@ -8885,25 +8773,6 @@ pub fn main() void {
var b: u32 = 3;
var c = @divExact(a, b);
std.debug.warn("value: {}\n", .{c});
}
{#code_end#}
{#header_close#}
{#header_open|Slice Widen Remainder#}
<p>At compile-time:</p>
{#code_begin|test_err|unable to convert#}
comptime {
var bytes = [5]u8{ 1, 2, 3, 4, 5 };
var slice = @bytesToSlice(u32, bytes[0..]);
}
{#code_end#}
<p>At runtime:</p>
{#code_begin|exe_err#}
const std = @import("std");
pub fn main() void {
var bytes = [5]u8{ 1, 2, 3, 4, 5 };
var slice = @bytesToSlice(u32, bytes[0..]);
std.debug.warn("value: {}\n", .{slice[0]});
}
{#code_end#}
{#header_close#}
@ -9085,14 +8954,15 @@ comptime {
{#code_end#}
<p>At runtime:</p>
{#code_begin|exe_err#}
const mem = @import("std").mem;
pub fn main() !void {
var array align(4) = [_]u32{ 0x11111111, 0x11111111 };
const bytes = @sliceToBytes(array[0..]);
const bytes = mem.sliceAsBytes(array[0..]);
if (foo(bytes) != 0x11111111) return error.Wrong;
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
const int_slice = mem.bytesAsSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}

View File

@ -188,6 +188,14 @@ pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
self.len += items.len;
}
/// Append a value to the list `n` times. Allocates more memory
/// as necessary.
pub fn appendNTimes(self: *Self, value: T, n: usize) !void {
const old_len = self.len;
try self.resize(self.len + n);
mem.set(T, self.items[old_len..self.len], value);
}
/// Adjust the list's length to `new_len`. Doesn't initialize
/// added items if any.
pub fn resize(self: *Self, new_len: usize) !void {
@ -311,6 +319,23 @@ test "std.ArrayList.basic" {
testing.expect(list.pop() == 33);
}
test "std.ArrayList.appendNTimes" {
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try list.appendNTimes(2, 10);
testing.expectEqual(@as(usize, 10), list.len);
for (list.toSlice()) |element| {
testing.expectEqual(@as(i32, 2), element);
}
}
test "std.ArrayList.appendNTimes with failing allocator" {
var list = ArrayList(i32).init(testing.failing_allocator);
defer list.deinit();
testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10));
}
test "std.ArrayList.orderedRemove" {
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();

View File

@ -147,6 +147,10 @@ pub const Buffer = struct {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
pub fn print(self: *Buffer, comptime fmt: []const u8, args: var) !void {
return std.fmt.format(self, error{OutOfMemory}, Buffer.append, fmt, args);
}
};
test "simple Buffer" {
@ -190,3 +194,11 @@ test "Buffer.initCapacity" {
testing.expect(buf.capacity() == old_cap);
testing.expect(mem.eql(u8, buf.toSliceConst(), "hello"));
}
test "Buffer.print" {
var buf = try Buffer.init(testing.allocator, "");
defer buf.deinit();
try buf.print("Hello {} the {}", .{ 2, "world" });
testing.expect(buf.eql("Hello 2 the world"));
}

View File

@ -27,9 +27,6 @@ pub const Builder = struct {
install_tls: TopLevelStep,
uninstall_tls: TopLevelStep,
allocator: *Allocator,
native_system_lib_paths: ArrayList([]const u8),
native_system_include_dirs: ArrayList([]const u8),
native_system_rpaths: ArrayList([]const u8),
user_input_options: UserInputOptionsMap,
available_options_map: AvailableOptionsMap,
available_options_list: ArrayList(AvailableOption),
@ -41,6 +38,7 @@ pub const Builder = struct {
verbose_ir: bool,
verbose_llvm_ir: bool,
verbose_cimport: bool,
verbose_llvm_cpu_features: bool,
invalid_user_input: bool,
zig_exe: []const u8,
default_step: *Step,
@ -137,11 +135,9 @@ pub const Builder = struct {
.verbose_ir = false,
.verbose_llvm_ir = false,
.verbose_cimport = false,
.verbose_llvm_cpu_features = false,
.invalid_user_input = false,
.allocator = allocator,
.native_system_lib_paths = ArrayList([]const u8).init(allocator),
.native_system_include_dirs = ArrayList([]const u8).init(allocator),
.native_system_rpaths = ArrayList([]const u8).init(allocator),
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
@ -172,15 +168,11 @@ pub const Builder = struct {
};
try self.top_level_steps.append(&self.install_tls);
try self.top_level_steps.append(&self.uninstall_tls);
self.detectNativeSystemPaths();
self.default_step = &self.install_tls.step;
return self;
}
pub fn destroy(self: *Builder) void {
self.native_system_lib_paths.deinit();
self.native_system_include_dirs.deinit();
self.native_system_rpaths.deinit();
self.env_map.deinit();
self.top_level_steps.deinit();
self.allocator.destroy(self);
@ -347,18 +339,6 @@ pub const Builder = struct {
};
}
pub fn addNativeSystemIncludeDir(self: *Builder, path: []const u8) void {
self.native_system_include_dirs.append(path) catch unreachable;
}
pub fn addNativeSystemRPath(self: *Builder, path: []const u8) void {
self.native_system_rpaths.append(path) catch unreachable;
}
pub fn addNativeSystemLibPath(self: *Builder, path: []const u8) void {
self.native_system_lib_paths.append(path) catch unreachable;
}
pub fn make(self: *Builder, step_names: []const []const u8) !void {
try self.makePath(self.cache_root);
@ -433,87 +413,6 @@ pub const Builder = struct {
return error.InvalidStepName;
}
fn detectNativeSystemPaths(self: *Builder) void {
var is_nixos = false;
if (process.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
is_nixos = true;
var it = mem.tokenize(nix_cflags_compile, " ");
while (true) {
const word = it.next() orelse break;
if (mem.eql(u8, word, "-isystem")) {
const include_path = it.next() orelse {
warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n", .{});
break;
};
self.addNativeSystemIncludeDir(include_path);
} else {
warn("Unrecognized C flag from NIX_CFLAGS_COMPILE: {}\n", .{word});
break;
}
}
} else |err| {
assert(err == error.EnvironmentVariableNotFound);
}
if (process.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| {
is_nixos = true;
var it = mem.tokenize(nix_ldflags, " ");
while (true) {
const word = it.next() orelse break;
if (mem.eql(u8, word, "-rpath")) {
const rpath = it.next() orelse {
warn("Expected argument after -rpath in NIX_LDFLAGS\n", .{});
break;
};
self.addNativeSystemRPath(rpath);
} else if (word.len > 2 and word[0] == '-' and word[1] == 'L') {
const lib_path = word[2..];
self.addNativeSystemLibPath(lib_path);
} else {
warn("Unrecognized C flag from NIX_LDFLAGS: {}\n", .{word});
break;
}
}
} else |err| {
assert(err == error.EnvironmentVariableNotFound);
}
if (is_nixos) return;
switch (builtin.os) {
.windows => {},
else => {
const triple = (Target{
.Cross = CrossTarget{
.arch = builtin.arch,
.os = builtin.os,
.abi = builtin.abi,
.cpu_features = builtin.cpu_features,
},
}).linuxTriple(self.allocator);
// TODO: $ ld --verbose | grep SEARCH_DIR
// the output contains some paths that end with lib64, maybe include them too?
// also, what is the best possible order of things?
self.addNativeSystemIncludeDir("/usr/local/include");
self.addNativeSystemLibPath("/usr/local/lib");
self.addNativeSystemLibPath("/usr/local/lib64");
self.addNativeSystemIncludeDir(self.fmt("/usr/include/{}", .{triple}));
self.addNativeSystemLibPath(self.fmt("/usr/lib/{}", .{triple}));
self.addNativeSystemIncludeDir("/usr/include");
self.addNativeSystemLibPath("/lib");
self.addNativeSystemLibPath("/lib64");
self.addNativeSystemLibPath("/usr/lib");
self.addNativeSystemLibPath("/usr/lib64");
// example: on a 64-bit debian-based linux distro, with zlib installed from apt:
// zlib.h is in /usr/include (added above)
// libz.so.1 is in /lib/x86_64-linux-gnu (added here)
self.addNativeSystemLibPath(self.fmt("/lib/{}", .{triple}));
},
}
}
pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
const type_id = comptime typeToEnum(T);
const available_option = AvailableOption{
@ -638,7 +537,7 @@ pub const Builder = struct {
return Target.Native;
} else {
const target_str = self.option([]const u8, "target", "the target to build for") orelse return Target.Native;
return Target.parse(target_str) catch unreachable; // TODO better error message for bad target
return Target.parse(.{ .arch_os_abi = target_str }) catch unreachable; // TODO better error message for bad target
}
}
@ -710,13 +609,13 @@ pub const Builder = struct {
}
fn typeToEnum(comptime T: type) TypeId {
return switch (@typeId(T)) {
builtin.TypeId.Int => TypeId.Int,
builtin.TypeId.Float => TypeId.Float,
builtin.TypeId.Bool => TypeId.Bool,
return switch (@typeInfo(T)) {
.Int => .Int,
.Float => .Float,
.Bool => .Bool,
else => switch (T) {
[]const u8 => TypeId.String,
[]const []const u8 => TypeId.List,
[]const u8 => .String,
[]const []const u8 => .List,
else => @compileError("Unsupported type: " ++ @typeName(T)),
},
};
@ -728,11 +627,11 @@ pub const Builder = struct {
pub fn typeIdName(id: TypeId) []const u8 {
return switch (id) {
TypeId.Bool => "bool",
TypeId.Int => "int",
TypeId.Float => "float",
TypeId.String => "string",
TypeId.List => "list",
.Bool => "bool",
.Int => "int",
.Float => "float",
.String => "string",
.List => "list",
};
}
@ -1155,6 +1054,9 @@ pub const LibExeObjStep = struct {
frameworks: BufSet,
verbose_link: bool,
verbose_cc: bool,
emit_llvm_ir: bool = false,
emit_asm: bool = false,
emit_bin: bool = true,
disable_gen_h: bool,
bundle_compiler_rt: bool,
disable_stack_probing: bool,
@ -1182,7 +1084,6 @@ pub const LibExeObjStep = struct {
include_dirs: ArrayList(IncludeDir),
c_macros: ArrayList([]const u8),
output_dir: ?[]const u8,
need_system_paths: bool,
is_linking_libc: bool = false,
vcpkg_bin_path: ?[]const u8 = null,
@ -1320,7 +1221,6 @@ pub const LibExeObjStep = struct {
.disable_stack_probing = false,
.disable_sanitize_c = false,
.output_dir = null,
.need_system_paths = false,
.single_threaded = false,
.installed_path = null,
.install_step = null,
@ -1496,7 +1396,6 @@ pub const LibExeObjStep = struct {
/// Prefer to use `linkSystemLibrary` instead.
pub fn linkSystemLibraryName(self: *LibExeObjStep, name: []const u8) void {
self.link_objects.append(LinkObject{ .SystemLib = self.builder.dupe(name) }) catch unreachable;
self.need_system_paths = true;
}
/// This links against a system library, exclusively using pkg-config to find the library.
@ -1940,6 +1839,11 @@ pub const LibExeObjStep = struct {
if (builder.verbose_llvm_ir) zig_args.append("--verbose-llvm-ir") catch unreachable;
if (builder.verbose_link or self.verbose_link) zig_args.append("--verbose-link") catch unreachable;
if (builder.verbose_cc or self.verbose_cc) zig_args.append("--verbose-cc") catch unreachable;
if (builder.verbose_llvm_cpu_features) zig_args.append("--verbose-llvm-cpu-features") catch unreachable;
if (self.emit_llvm_ir) try zig_args.append("-femit-llvm-ir");
if (self.emit_asm) try zig_args.append("-femit-asm");
if (!self.emit_bin) try zig_args.append("-fno-emit-bin");
if (self.strip) {
try zig_args.append("--strip");
@ -2008,43 +1912,33 @@ pub const LibExeObjStep = struct {
try zig_args.append(self.target.zigTriple(builder.allocator) catch unreachable);
const all_features = self.target.getArch().allFeaturesList();
var populated_cpu_features = cross.cpu_features.cpu.features;
if (self.target.getArch().subArchFeature()) |sub_arch_index| {
populated_cpu_features.addFeature(sub_arch_index);
}
var populated_cpu_features = cross.cpu.model.features;
populated_cpu_features.populateDependencies(all_features);
if (populated_cpu_features.eql(cross.cpu_features.features)) {
if (populated_cpu_features.eql(cross.cpu.features)) {
// The CPU name alone is sufficient.
// If it is the baseline CPU, no command line args are required.
if (cross.cpu_features.cpu != self.target.getArch().getBaselineCpuFeatures().cpu) {
try zig_args.append("-target-cpu");
try zig_args.append(cross.cpu_features.cpu.name);
if (cross.cpu.model != Target.Cpu.baseline(self.target.getArch()).model) {
try zig_args.append("-mcpu");
try zig_args.append(cross.cpu.model.name);
}
} else {
try zig_args.append("-target-cpu");
try zig_args.append(cross.cpu_features.cpu.name);
var mcpu_buffer = try std.Buffer.init(builder.allocator, "-mcpu=");
try mcpu_buffer.append(cross.cpu.model.name);
try zig_args.append("-target-feature");
var feature_str_buffer = try std.Buffer.initSize(builder.allocator, 0);
for (all_features) |feature, i_usize| {
const i = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = cross.cpu_features.features.isEnabled(i);
const in_actual_set = cross.cpu.features.isEnabled(i);
if (in_cpu_set and !in_actual_set) {
try feature_str_buffer.appendByte('-');
try feature_str_buffer.append(feature.name);
try feature_str_buffer.appendByte(',');
try mcpu_buffer.appendByte('-');
try mcpu_buffer.append(feature.name);
} else if (!in_cpu_set and in_actual_set) {
try feature_str_buffer.appendByte('+');
try feature_str_buffer.append(feature.name);
try feature_str_buffer.appendByte(',');
try mcpu_buffer.appendByte('+');
try mcpu_buffer.append(feature.name);
}
}
if (mem.endsWith(u8, feature_str_buffer.toSliceConst(), ",")) {
feature_str_buffer.shrink(feature_str_buffer.len() - 1);
}
try zig_args.append(feature_str_buffer.toSliceConst());
try zig_args.append(mcpu_buffer.toSliceConst());
}
},
}
@ -2152,23 +2046,6 @@ pub const LibExeObjStep = struct {
try zig_args.append(lib_path);
}
if (self.need_system_paths and self.target == Target.Native) {
for (builder.native_system_include_dirs.toSliceConst()) |include_path| {
zig_args.append("-isystem") catch unreachable;
zig_args.append(builder.pathFromRoot(include_path)) catch unreachable;
}
for (builder.native_system_rpaths.toSliceConst()) |rpath| {
zig_args.append("-rpath") catch unreachable;
zig_args.append(rpath) catch unreachable;
}
for (builder.native_system_lib_paths.toSliceConst()) |lib_path| {
zig_args.append("--library-path") catch unreachable;
zig_args.append(lib_path) catch unreachable;
}
}
for (self.c_macros.toSliceConst()) |c_macro| {
try zig_args.append("-D");
try zig_args.append(c_macro);

View File

@ -6,8 +6,8 @@ pub const Target = std.Target;
/// Deprecated: use `std.Target.Os`.
pub const Os = std.Target.Os;
/// Deprecated: use `std.Target.Arch`.
pub const Arch = std.Target.Arch;
/// Deprecated: use `std.Target.Cpu.Arch`.
pub const Arch = std.Target.Cpu.Arch;
/// Deprecated: use `std.Target.Abi`.
pub const Abi = std.Target.Abi;
@ -18,9 +18,6 @@ pub const ObjectFormat = std.Target.ObjectFormat;
/// Deprecated: use `std.Target.SubSystem`.
pub const SubSystem = std.Target.SubSystem;
/// Deprecated: use `std.Target.CpuFeatures`.
pub const CpuFeatures = std.Target.CpuFeatures;
/// Deprecated: use `std.Target.Cpu`.
pub const Cpu = std.Target.Cpu;

View File

@ -62,6 +62,8 @@ pub fn versionCheck(glibc_version: builtin.Version) type {
};
}
pub extern "c" var environ: [*:null]?[*:0]u8;
pub extern "c" fn fopen(filename: [*:0]const u8, modes: [*:0]const u8) ?*FILE;
pub extern "c" fn fclose(stream: *FILE) c_int;
pub extern "c" fn fwrite(ptr: [*]const u8, size_of_type: usize, item_count: usize, stream: *FILE) usize;
@ -96,6 +98,7 @@ pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_uint, options: c_uint) c_int;
pub extern "c" fn fork() c_int;
pub extern "c" fn access(path: [*:0]const u8, mode: c_uint) c_int;
pub extern "c" fn faccessat(dirfd: fd_t, path: [*:0]const u8, mode: c_uint, flags: c_uint) c_int;
pub extern "c" fn pipe(fds: *[2]fd_t) c_int;
pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
pub extern "c" fn mkdir(path: [*:0]const u8, mode: c_uint) c_int;

View File

@ -616,6 +616,7 @@ pub const Tokenizer = struct {
},
.BackSlash => switch (c) {
'\n' => {
result.start = self.index + 1;
state = .Start;
},
'\r' => {
@ -631,6 +632,7 @@ pub const Tokenizer = struct {
},
.BackSlashCr => switch (c) {
'\n' => {
result.start = self.index + 1;
state = .Start;
},
else => {

View File

@ -48,7 +48,10 @@ pub const ChildProcess = struct {
cwd: ?[]const u8,
err_pipe: if (builtin.os == .windows) void else [2]os.fd_t,
llnode: if (builtin.os == .windows) void else TailQueue(*ChildProcess).Node,
expand_arg0: Arg0Expand,
pub const Arg0Expand = os.Arg0Expand;
pub const SpawnError = error{
OutOfMemory,
@ -90,7 +93,6 @@ pub const ChildProcess = struct {
.handle = undefined,
.thread_handle = undefined,
.err_pipe = undefined,
.llnode = undefined,
.term = null,
.env_map = null,
.cwd = null,
@ -102,6 +104,7 @@ pub const ChildProcess = struct {
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
.expand_arg0 = .no_expand,
};
errdefer allocator.destroy(child);
return child;
@ -174,34 +177,56 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
/// TODO deprecate in favor of exec2
pub fn exec(
allocator: *mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8,
env_map: ?*const BufMap,
max_output_size: usize,
max_output_bytes: usize,
) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
return exec2(.{
.allocator = allocator,
.argv = argv,
.cwd = cwd,
.env_map = env_map,
.max_output_bytes = max_output_bytes,
});
}
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
/// TODO rename to exec
pub fn exec2(args: struct {
allocator: *mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8 = null,
env_map: ?*const BufMap = null,
max_output_bytes: usize = 50 * 1024,
expand_arg0: Arg0Expand = .no_expand,
}) !ExecResult {
const child = try ChildProcess.init(args.argv, args.allocator);
defer child.deinit();
child.stdin_behavior = ChildProcess.StdIo.Ignore;
child.stdout_behavior = ChildProcess.StdIo.Pipe;
child.stderr_behavior = ChildProcess.StdIo.Pipe;
child.cwd = cwd;
child.env_map = env_map;
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
child.cwd = args.cwd;
child.env_map = args.env_map;
child.expand_arg0 = args.expand_arg0;
try child.spawn();
var stdout = Buffer.initNull(allocator);
var stderr = Buffer.initNull(allocator);
var stdout = Buffer.initNull(args.allocator);
var stderr = Buffer.initNull(args.allocator);
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
var stdout_file_in_stream = child.stdout.?.inStream();
var stderr_file_in_stream = child.stderr.?.inStream();
try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
try stdout_file_in_stream.stream.readAllBuffer(&stdout, args.max_output_bytes);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, args.max_output_bytes);
return ExecResult{
.term = try child.wait(),
@ -420,7 +445,7 @@ pub const ChildProcess = struct {
os.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err);
}
const err = os.execvpe(self.allocator, self.argv, env_map);
const err = os.execvpe_expandArg0(self.allocator, self.expand_arg0, self.argv, env_map);
forkChildErrReport(err_pipe[1], err);
}
@ -453,7 +478,6 @@ pub const ChildProcess = struct {
self.pid = pid;
self.err_pipe = err_pipe;
self.llnode = TailQueue(*ChildProcess).Node.init(self);
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
@ -827,7 +851,7 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
os.exit(1);
}
const ErrInt = @IntType(false, @sizeOf(anyerror) * 8);
const ErrInt = std.meta.IntType(false, @sizeOf(anyerror) * 8);
fn writeIntFd(fd: i32, value: ErrInt) !void {
const file = File{

View File

@ -57,3 +57,34 @@ test "crypto" {
_ = @import("crypto/sha3.zig");
_ = @import("crypto/x25519.zig");
}
test "issue #4532: no index out of bounds" {
const types = [_]type{
Md5,
Sha1,
Sha224,
Sha256,
Sha384,
Sha512,
Blake2s224,
Blake2s256,
Blake2b384,
Blake2b512,
};
inline for (types) |Hasher| {
var block = [_]u8{'#'} ** Hasher.block_length;
var out1: [Hasher.digest_length]u8 = undefined;
var out2: [Hasher.digest_length]u8 = undefined;
var h = Hasher.init();
h.update(block[0..]);
h.final(out1[0..]);
h.reset();
h.update(block[0..1]);
h.update(block[1..]);
h.final(out2[0..]);
std.testing.expectEqual(out1, out2);
}
}

View File

@ -94,7 +94,7 @@ fn Blake2s(comptime out_len: usize) type {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 64) {
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
d.t += 64;
@ -331,7 +331,7 @@ fn Blake2b(comptime out_len: usize) type {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 128) {
if (d.buf_len != 0 and d.buf_len + b.len >= 128) {
off += 128 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
d.t += 128;

View File

@ -24,11 +24,11 @@ pub const State = struct {
const Self = @This();
pub fn toSlice(self: *Self) []u8 {
return @sliceToBytes(self.data[0..]);
return mem.sliceAsBytes(self.data[0..]);
}
pub fn toSliceConst(self: *Self) []const u8 {
return @sliceToBytes(self.data[0..]);
return mem.sliceAsBytes(self.data[0..]);
}
pub fn permute(self: *Self) void {

View File

@ -63,7 +63,7 @@ pub const Md5 = struct {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 64) {
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);

View File

@ -61,7 +61,7 @@ pub const Sha1 = struct {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 64) {
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);

View File

@ -116,7 +116,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 64) {
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
@ -458,7 +458,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 128) {
if (d.buf_len != 0 and d.buf_len + b.len >= 128) {
off += 128 - d.buf_len;
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);

View File

@ -72,7 +72,7 @@ pub const NullTerminated2DArray = struct {
errdefer allocator.free(buf);
var write_index = index_size;
const index_buf = @bytesToSlice(?[*]u8, buf);
const index_buf = mem.bytesAsSlice(?[*]u8, buf);
var i: usize = 0;
for (slices) |slice| {

View File

@ -2,7 +2,7 @@ const std = @import("std");
const testing = std.testing;
pub fn readULEB128(comptime T: type, in_stream: var) !T {
const ShiftT = @IntType(false, std.math.log2(T.bit_count));
const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
var result: T = 0;
var shift: usize = 0;
@ -27,7 +27,7 @@ pub fn readULEB128(comptime T: type, in_stream: var) !T {
}
pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
const ShiftT = @IntType(false, std.math.log2(T.bit_count));
const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
var result: T = 0;
var shift: usize = 0;
@ -55,8 +55,8 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
}
pub fn readILEB128(comptime T: type, in_stream: var) !T {
const UT = @IntType(false, T.bit_count);
const ShiftT = @IntType(false, std.math.log2(T.bit_count));
const UT = std.meta.IntType(false, T.bit_count);
const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
var result: UT = 0;
var shift: usize = 0;
@ -87,8 +87,8 @@ pub fn readILEB128(comptime T: type, in_stream: var) !T {
}
pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
const UT = @IntType(false, T.bit_count);
const ShiftT = @IntType(false, std.math.log2(T.bit_count));
const UT = std.meta.IntType(false, T.bit_count);
const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
var result: UT = 0;
var shift: usize = 0;

View File

@ -1,6 +1,7 @@
pub const Channel = @import("event/channel.zig").Channel;
pub const Future = @import("event/future.zig").Future;
pub const Group = @import("event/group.zig").Group;
pub const Batch = @import("event/batch.zig").Batch;
pub const Lock = @import("event/lock.zig").Lock;
pub const Locked = @import("event/locked.zig").Locked;
pub const RwLock = @import("event/rwlock.zig").RwLock;
@ -11,6 +12,7 @@ test "import event tests" {
_ = @import("event/channel.zig");
_ = @import("event/future.zig");
_ = @import("event/group.zig");
_ = @import("event/batch.zig");
_ = @import("event/lock.zig");
_ = @import("event/locked.zig");
_ = @import("event/rwlock.zig");

139
lib/std/event/batch.zig Normal file
View File

@ -0,0 +1,139 @@
const std = @import("../std.zig");
const testing = std.testing;
/// Performs multiple async functions in parallel, without heap allocation.
/// Async function frames are managed externally to this abstraction, and
/// passed in via the `add` function. Once all the jobs are added, call `wait`.
/// This API is *not* thread-safe. The object must be accessed from one thread at
/// a time, however, it need not be the same thread.
pub fn Batch(
/// The return value for each job.
/// If a job slot was re-used due to maxed out concurrency, then its result
/// value will be overwritten. The values can be accessed with the `results` field.
comptime Result: type,
/// How many jobs to run in parallel.
comptime max_jobs: comptime_int,
/// Controls whether the `add` and `wait` functions will be async functions.
comptime async_behavior: enum {
/// Observe the value of `std.io.is_async` to decide whether `add`
/// and `wait` will be async functions. Asserts that the jobs do not suspend when
/// `std.io.mode == .blocking`. This is a generally safe assumption, and the
/// usual recommended option for this parameter.
auto_async,
/// Always uses the `noasync` keyword when using `await` on the jobs,
/// making `add` and `wait` non-async functions. Asserts that the jobs do not suspend.
never_async,
/// `add` and `wait` use regular `await` keyword, making them async functions.
always_async,
},
) type {
return struct {
jobs: [max_jobs]Job,
next_job_index: usize,
collected_result: CollectedResult,
const Job = struct {
frame: ?anyframe->Result,
result: Result,
};
const Self = @This();
const CollectedResult = switch (@typeInfo(Result)) {
.ErrorUnion => Result,
else => void,
};
const async_ok = switch (async_behavior) {
.auto_async => std.io.is_async,
.never_async => false,
.always_async => true,
};
pub fn init() Self {
return Self{
.jobs = [1]Job{
.{
.frame = null,
.result = undefined,
},
} ** max_jobs,
.next_job_index = 0,
.collected_result = {},
};
}
/// Add a frame to the Batch. If all jobs are in-flight, then this function
/// waits until one completes.
/// This function is *not* thread-safe. It must be called from one thread at
/// a time, however, it need not be the same thread.
/// TODO: "select" language feature to use the next available slot, rather than
/// awaiting the next index.
pub fn add(self: *Self, frame: anyframe->Result) void {
const job = &self.jobs[self.next_job_index];
self.next_job_index = (self.next_job_index + 1) % max_jobs;
if (job.frame) |existing| {
job.result = if (async_ok) await existing else noasync await existing;
if (CollectedResult != void) {
job.result catch |err| {
self.collected_result = err;
};
}
}
job.frame = frame;
}
/// Wait for all the jobs to complete.
/// Safe to call any number of times.
/// If `Result` is an error union, this function returns the last error that occurred, if any.
/// Unlike the `results` field, the return value of `wait` will report any error that occurred;
/// hitting max parallelism will not compromise the result.
/// This function is *not* thread-safe. It must be called from one thread at
/// a time, however, it need not be the same thread.
pub fn wait(self: *Self) CollectedResult {
for (self.jobs) |*job| if (job.frame) |f| {
job.result = if (async_ok) await f else noasync await f;
if (CollectedResult != void) {
job.result catch |err| {
self.collected_result = err;
};
}
job.frame = null;
};
return self.collected_result;
}
};
}
test "std.event.Batch" {
var count: usize = 0;
var batch = Batch(void, 2, .auto_async).init();
batch.add(&async sleepALittle(&count));
batch.add(&async increaseByTen(&count));
batch.wait();
testing.expect(count == 11);
var another = Batch(anyerror!void, 2, .auto_async).init();
another.add(&async somethingElse());
another.add(&async doSomethingThatFails());
testing.expectError(error.ItBroke, another.wait());
}
fn sleepALittle(count: *usize) void {
std.time.sleep(1 * std.time.millisecond);
_ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
fn increaseByTen(count: *usize) void {
var i: usize = 0;
while (i < 10) : (i += 1) {
_ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
}
fn doSomethingThatFails() anyerror!void {}
fn somethingElse() anyerror!void {
return error.ItBroke;
}

View File

@ -5,6 +5,11 @@ const testing = std.testing;
const Allocator = std.mem.Allocator;
/// ReturnType must be `void` or `E!void`
/// TODO This API was created back with the old design of async/await, when calling any
/// async function required an allocator. There is an ongoing experiment to transition
/// all uses of this API to the simpler and more resource-aware `std.event.Batch` API.
/// If the transition goes well, all usages of `Group` will be gone, and this API
/// will be deleted.
pub fn Group(comptime ReturnType: type) type {
return struct {
frame_stack: Stack,

View File

@ -12,15 +12,18 @@ const maxInt = std.math.maxInt;
const Thread = std.Thread;
pub const Loop = struct {
allocator: *mem.Allocator,
next_tick_queue: std.atomic.Queue(anyframe),
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
extra_threads: []*Thread,
// pre-allocated eventfds. all permanently active.
// this is how we send promises to be resumed on other threads.
/// For resources that have the same lifetime as the `Loop`.
/// This is only used by `Loop` for the thread pool and associated resources.
arena: std.heap.ArenaAllocator,
/// Pre-allocated eventfds. All permanently active.
/// This is how `Loop` sends promises to be resumed on other threads.
available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
@ -127,11 +130,9 @@ pub const Loop = struct {
/// Thread count is the total thread count. The thread pool size will be
/// max(thread_count - 1, 0)
pub fn initThreadPool(self: *Loop, thread_count: usize) !void {
// TODO: https://github.com/ziglang/zig/issues/3539
const allocator = std.heap.page_allocator;
self.* = Loop{
.arena = std.heap.ArenaAllocator.init(std.heap.page_allocator),
.pending_event_count = 1,
.allocator = allocator,
.os_data = undefined,
.next_tick_queue = std.atomic.Queue(anyframe).init(),
.extra_threads = undefined,
@ -143,17 +144,17 @@ pub const Loop = struct {
.overlapped = ResumeNode.overlapped_init,
},
};
errdefer self.arena.deinit();
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
const resume_node_count = std.math.max(extra_thread_count, 1);
self.eventfd_resume_nodes = try self.allocator.alloc(
self.eventfd_resume_nodes = try self.arena.allocator.alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
);
errdefer self.allocator.free(self.eventfd_resume_nodes);
self.extra_threads = try self.allocator.alloc(*Thread, extra_thread_count);
errdefer self.allocator.free(self.extra_threads);
self.extra_threads = try self.arena.allocator.alloc(*Thread, extra_thread_count);
try self.initOsData(extra_thread_count);
errdefer self.deinitOsData();
@ -161,7 +162,8 @@ pub const Loop = struct {
pub fn deinit(self: *Loop) void {
self.deinitOsData();
self.allocator.free(self.extra_threads);
self.arena.deinit();
self.* = undefined;
}
const InitOsDataError = os.EpollCreateError || mem.Allocator.Error || os.EventFdError ||
@ -407,7 +409,6 @@ pub const Loop = struct {
noasync os.close(self.os_data.final_eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| noasync os.close(node.data.eventfd);
noasync os.close(self.os_data.epollfd);
self.allocator.free(self.eventfd_resume_nodes);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
noasync os.close(self.os_data.kqfd);

View File

@ -101,7 +101,7 @@ pub fn LinearFifo(
}
}
{ // set unused area to undefined
const unused = @sliceToBytes(self.buf[self.count..]);
const unused = mem.sliceAsBytes(self.buf[self.count..]);
@memset(unused.ptr, undefined, unused.len);
}
}
@ -166,12 +166,12 @@ pub fn LinearFifo(
{ // set old range to undefined. Note: may be wrapped around
const slice = self.readableSliceMut(0);
if (slice.len >= count) {
const unused = @sliceToBytes(slice[0..count]);
const unused = mem.sliceAsBytes(slice[0..count]);
@memset(unused.ptr, undefined, unused.len);
} else {
const unused = @sliceToBytes(slice[0..]);
const unused = mem.sliceAsBytes(slice[0..]);
@memset(unused.ptr, undefined, unused.len);
const unused2 = @sliceToBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
@memset(unused2.ptr, undefined, unused2.len);
}
}

View File

@ -82,7 +82,7 @@ pub fn format(
comptime fmt: []const u8,
args: var,
) Errors!void {
const ArgSetType = @IntType(false, 32);
const ArgSetType = u32;
if (@typeInfo(@TypeOf(args)) != .Struct) {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
@ -405,7 +405,7 @@ pub fn formatType(
try format(context, Errors, output, "@{x}", .{@ptrToInt(&value)});
}
},
.Struct => {
.Struct => |StructT| {
if (comptime std.meta.trait.hasFn("format")(T)) {
return value.format(fmt, options, context, Errors, output);
}
@ -416,27 +416,28 @@ pub fn formatType(
}
comptime var field_i = 0;
try output(context, "{");
inline while (field_i < @memberCount(T)) : (field_i += 1) {
inline for (StructT.fields) |f| {
if (field_i == 0) {
try output(context, " .");
} else {
try output(context, ", .");
}
try output(context, @memberName(T, field_i));
try output(context, f.name);
try output(context, " = ");
try formatType(@field(value, @memberName(T, field_i)), fmt, options, context, Errors, output, max_depth - 1);
try formatType(@field(value, f.name), fmt, options, context, Errors, output, max_depth - 1);
field_i += 1;
}
try output(context, " }");
},
.Pointer => |ptr_info| switch (ptr_info.size) {
.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
.Array => |info| {
if (info.child == u8) {
return formatText(value, fmt, options, context, Errors, output);
}
return format(context, Errors, output, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
},
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
.Enum, .Union, .Struct => {
return formatType(value.*, fmt, options, context, Errors, output, max_depth);
},
else => return format(context, Errors, output, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
@ -509,7 +510,7 @@ fn formatValue(
}
const T = @TypeOf(value);
switch (@typeId(T)) {
switch (@typeInfo(T)) {
.Float => return formatFloatValue(value, fmt, options, context, Errors, output),
.Int, .ComptimeInt => return formatIntValue(value, fmt, options, context, Errors, output),
.Bool => return output(context, if (value) "true" else "false"),
@ -757,8 +758,6 @@ pub fn formatFloatDecimal(
} else {
try output(context, ".0");
}
} else {
try output(context, "0");
}
return;
@ -945,7 +944,7 @@ fn formatIntSigned(
.fill = options.fill,
};
const uint = @IntType(false, @TypeOf(value).bit_count);
const uint = std.meta.IntType(false, @TypeOf(value).bit_count);
if (value < 0) {
const minus_sign: u8 = '-';
try output(context, @as(*const [1]u8, &minus_sign)[0..]);
@ -973,7 +972,7 @@ fn formatIntUnsigned(
assert(base >= 2);
var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
const MinInt = @IntType(@TypeOf(value).is_signed, min_int_bits);
const MinInt = std.meta.IntType(@TypeOf(value).is_signed, min_int_bits);
var a: MinInt = value;
var index: usize = buf.len;
@ -1399,6 +1398,7 @@ test "float.special" {
test "float.decimal" {
try testFmt("f64: 152314000000000000000000000000", "f64: {d}", .{@as(f64, 1.52314e+29)});
try testFmt("f32: 0", "f32: {d}", .{@as(f32, 0.0)});
try testFmt("f32: 1.1", "f32: {d:.1}", .{@as(f32, 1.1234)});
try testFmt("f32: 1234.57", "f32: {d:.2}", .{@as(f32, 1234.567)});
// -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64).

View File

@ -389,7 +389,7 @@ test "fmt.parseFloat" {
const epsilon = 1e-7;
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
testing.expectError(error.InvalidCharacter, parseFloat(T, " 1"));

View File

@ -96,7 +96,6 @@ pub fn updateFile(source_path: []const u8, dest_path: []const u8) !PrevStatus {
/// atime, and mode of the source file so that the next call to `updateFile` will not need a copy.
/// Returns the previous status of the file before updating.
/// If any of the directories do not exist for dest_path, they are created.
/// TODO https://github.com/ziglang/zig/issues/2885
pub fn updateFileMode(source_path: []const u8, dest_path: []const u8, mode: ?File.Mode) !PrevStatus {
const my_cwd = cwd();
@ -818,6 +817,13 @@ pub const Dir = struct {
) File.OpenError!File {
const w = os.windows;
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
return error.IsDir;
}
if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) {
return error.IsDir;
}
var result = File{
.handle = undefined,
.io_mode = .blocking,
@ -839,12 +845,6 @@ pub const Dir = struct {
.SecurityDescriptor = null,
.SecurityQualityOfService = null,
};
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
return error.IsDir;
}
if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) {
return error.IsDir;
}
var io: w.IO_STATUS_BLOCK = undefined;
const rc = w.ntdll.NtCreateFile(
&result.handle,
@ -864,6 +864,7 @@ pub const Dir = struct {
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.NO_MEDIA_IN_DEVICE => return error.NoDevice,
.INVALID_PARAMETER => unreachable,
.SHARING_VIOLATION => return error.SharingViolation,
.ACCESS_DENIED => return error.AccessDenied,
@ -1323,6 +1324,50 @@ pub const Dir = struct {
defer file.close();
try file.write(data);
}
pub const AccessError = os.AccessError;
/// Test accessing `path`.
/// `path` is UTF8-encoded.
/// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this function.
/// For example, instead of testing if a file exists and then opening it, just
/// open it and handle the error for file not found.
pub fn access(self: Dir, sub_path: []const u8, flags: File.OpenFlags) AccessError!void {
if (builtin.os == .windows) {
const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path);
return self.accessW(&sub_path_w, flags);
}
const path_c = try os.toPosixPath(sub_path);
return self.accessZ(&path_c, flags);
}
/// Same as `access` except the path parameter is null-terminated.
pub fn accessZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) AccessError!void {
if (builtin.os == .windows) {
const sub_path_w = try os.windows.cStrToPrefixedFileW(sub_path);
return self.accessW(&sub_path_w, flags);
}
const os_mode = if (flags.write and flags.read)
@as(u32, os.R_OK | os.W_OK)
else if (flags.write)
@as(u32, os.W_OK)
else
@as(u32, os.F_OK);
const result = if (need_async_thread)
std.event.Loop.instance.?.faccessatZ(self.fd, sub_path, os_mode)
else
os.faccessatZ(self.fd, sub_path, os_mode, 0);
return result;
}
/// Same as `access` except asserts the target OS is Windows and the path parameter is
/// * WTF-16 encoded
/// * null-terminated
/// * NtDll prefixed
/// TODO currently this ignores `flags`.
pub fn accessW(self: Dir, sub_path_w: [*:0]const u16, flags: File.OpenFlags) AccessError!void {
return os.faccessatW(self.fd, sub_path_w, 0, 0);
}
};
/// Returns an handle to the current working directory that is open for traversal.

View File

@ -60,31 +60,6 @@ pub const File = struct {
mode: Mode = default_mode,
};
/// Test for the existence of `path`.
/// `path` is UTF8-encoded.
/// In general it is recommended to avoid this function. For example,
/// instead of testing if a file exists and then opening it, just
/// open it and handle the error for file not found.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn access(path: []const u8) !void {
return os.access(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn accessC(path: [*:0]const u8) !void {
return os.accessC(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated UTF16LE-encoded.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn accessW(path: [*:0]const u16) !void {
return os.accessW(path, os.F_OK);
}
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
pub fn close(self: File) void {

View File

@ -26,7 +26,7 @@ fn eqlString(a: []const u16, b: []const u16) bool {
}
fn hashString(s: []const u16) u32 {
return @truncate(u32, std.hash.Wyhash.hash(0, @sliceToBytes(s)));
return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceAsBytes(s)));
}
const WatchEventError = error{

View File

@ -93,7 +93,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
// TODO Check if the situation is better after #561 is resolved.
.Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
.Float => |info| hash(hasher, @bitCast(std.meta.IntType(false, info.bits), key), strat),
.Bool => hash(hasher, @boolToInt(key), strat),
.Enum => hash(hasher, @enumToInt(key), strat),

View File

@ -10,7 +10,7 @@ const primes = [_]u64{
};
fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
const T = @IntType(false, 8 * bytes);
const T = std.meta.IntType(false, 8 * bytes);
return mem.readIntSliceLittle(T, data[0..bytes]);
}

View File

@ -283,14 +283,14 @@ const WasmPageAllocator = struct {
fn getBit(self: FreeBlock, idx: usize) PageStatus {
const bit_offset = 0;
return @intToEnum(PageStatus, Io.get(@sliceToBytes(self.data), idx, bit_offset));
return @intToEnum(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
}
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
const bit_offset = 0;
var i: usize = 0;
while (i < len) : (i += 1) {
Io.set(@sliceToBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
Io.set(mem.sliceAsBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
}
}
@ -552,7 +552,7 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
buf_node.* = BufNode{
.data = buf,
@ -1015,7 +1015,7 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
const USizeShift = std.meta.IntType(false, std.math.log2(usize.bit_count));
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;

View File

@ -121,76 +121,37 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
unbuffered_in_stream: *Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size });
fifo: FifoType,
pub fn init(unbuffered_in_stream: *Stream) Self {
return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
// Initialize these two fields to buffer_size so that
// in `readFn` we treat the state as being able to read
// more from the unbuffered stream. If we set them to 0
// and 0, the code would think we already hit EOF.
.start_index = buffer_size,
.end_index = buffer_size,
.fifo = FifoType.init(),
.stream = Stream{ .readFn = readFn },
};
}
fn readFn(in_stream: *Stream, dest: []u8) !usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
// Hot path for one byte reads
if (dest.len == 1 and self.end_index > self.start_index) {
dest[0] = self.buffer[self.start_index];
self.start_index += 1;
return 1;
}
var dest_index: usize = 0;
while (true) {
const dest_space = dest.len - dest_index;
if (dest_space == 0) {
return dest_index;
}
const amt_buffered = self.end_index - self.start_index;
if (amt_buffered == 0) {
assert(self.end_index <= buffer_size);
// Make sure the last read actually gave us some data
if (self.end_index == 0) {
while (dest_index < dest.len) {
const written = self.fifo.read(dest[dest_index..]);
if (written == 0) {
// fifo empty, fill it
const writable = self.fifo.writableSlice(0);
assert(writable.len > 0);
const n = try self.unbuffered_in_stream.read(writable);
if (n == 0) {
// reading from the unbuffered stream returned nothing
// so we have nothing left to read.
return dest_index;
}
// we can read more data from the unbuffered stream
if (dest_space < buffer_size) {
self.start_index = 0;
self.end_index = try self.unbuffered_in_stream.read(self.buffer[0..]);
// Shortcut
if (self.end_index >= dest_space) {
mem.copy(u8, dest[dest_index..], self.buffer[0..dest_space]);
self.start_index = dest_space;
return dest.len;
}
} else {
// asking for so much data that buffering is actually less efficient.
// forward the request directly to the unbuffered stream
const amt_read = try self.unbuffered_in_stream.read(dest[dest_index..]);
return dest_index + amt_read;
}
self.fifo.update(n);
}
const copy_amount = math.min(dest_space, amt_buffered);
const copy_end_index = self.start_index + copy_amount;
mem.copy(u8, dest[dest_index..], self.buffer[self.start_index..copy_end_index]);
self.start_index = copy_end_index;
dest_index += copy_amount;
dest_index += written;
}
return dest.len;
}
};
}
@ -235,7 +196,7 @@ test "io.BufferedInStream" {
/// Creates a stream which supports 'un-reading' data, so that it can be read again.
/// This makes look-ahead style parsing much easier.
pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) type {
pub fn PeekStream(comptime buffer_type: std.fifo.LinearFifoBufferType, comptime InStreamError: type) type {
return struct {
const Self = @This();
pub const Error = InStreamError;
@ -244,57 +205,57 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
stream: Stream,
base: *Stream,
// Right now the look-ahead space is statically allocated, but a version with dynamic allocation
// is not too difficult to derive from this.
buffer: [buffer_size]u8,
index: usize,
at_end: bool,
const FifoType = std.fifo.LinearFifo(u8, buffer_type);
fifo: FifoType,
pub fn init(base: *Stream) Self {
return Self{
.base = base,
.buffer = undefined,
.index = 0,
.at_end = false,
.stream = Stream{ .readFn = readFn },
};
pub usingnamespace switch (buffer_type) {
.Static => struct {
pub fn init(base: *Stream) Self {
return .{
.base = base,
.fifo = FifoType.init(),
.stream = Stream{ .readFn = readFn },
};
}
},
.Slice => struct {
pub fn init(base: *Stream, buf: []u8) Self {
return .{
.base = base,
.fifo = FifoType.init(buf),
.stream = Stream{ .readFn = readFn },
};
}
},
.Dynamic => struct {
pub fn init(base: *Stream, allocator: *mem.Allocator) Self {
return .{
.base = base,
.fifo = FifoType.init(allocator),
.stream = Stream{ .readFn = readFn },
};
}
},
};
pub fn putBackByte(self: *Self, byte: u8) !void {
try self.putBack(&[_]u8{byte});
}
pub fn putBackByte(self: *Self, byte: u8) void {
self.buffer[self.index] = byte;
self.index += 1;
}
pub fn putBack(self: *Self, bytes: []const u8) void {
var pos = bytes.len;
while (pos != 0) {
pos -= 1;
self.putBackByte(bytes[pos]);
}
pub fn putBack(self: *Self, bytes: []const u8) !void {
try self.fifo.unget(bytes);
}
fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
// copy over anything putBack()'d
var pos: usize = 0;
while (pos < dest.len and self.index != 0) {
dest[pos] = self.buffer[self.index - 1];
self.index -= 1;
pos += 1;
}
if (pos == dest.len or self.at_end) {
return pos;
}
var dest_index = self.fifo.read(dest);
if (dest_index == dest.len) return dest_index;
// ask the backing stream for more
const left = dest.len - pos;
const read = try self.base.read(dest[pos..]);
assert(read <= left);
self.at_end = (read < left);
return pos + read;
dest_index += try self.base.read(dest[dest_index..]);
return dest_index;
}
};
}
@ -376,7 +337,7 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
assert(u_bit_count >= bits);
break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
};
const Buf = @IntType(false, buf_bit_count);
const Buf = std.meta.IntType(false, buf_bit_count);
const BufShift = math.Log2Int(Buf);
out_bits.* = @as(usize, 0);
@ -607,52 +568,33 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
unbuffered_out_stream: *Stream,
buffer: [buffer_size]u8,
index: usize,
const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size });
fifo: FifoType,
pub fn init(unbuffered_out_stream: *Stream) Self {
return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
.index = 0,
.fifo = FifoType.init(),
.stream = Stream{ .writeFn = writeFn },
};
}
pub fn flush(self: *Self) !void {
try self.unbuffered_out_stream.write(self.buffer[0..self.index]);
self.index = 0;
while (true) {
const slice = self.fifo.readableSlice(0);
if (slice.len == 0) break;
try self.unbuffered_out_stream.write(slice);
self.fifo.discard(slice.len);
}
}
fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len == 1) {
// This is not required logic but a shorter path
// for single byte writes
self.buffer[self.index] = bytes[0];
self.index += 1;
if (self.index == buffer_size) {
try self.flush();
}
return;
} else if (bytes.len >= self.buffer.len) {
if (bytes.len >= self.fifo.writableLength()) {
try self.flush();
return self.unbuffered_out_stream.write(bytes);
}
var src_index: usize = 0;
while (src_index < bytes.len) {
const dest_space_left = self.buffer.len - self.index;
const copy_amt = math.min(dest_space_left, bytes.len - src_index);
mem.copy(u8, self.buffer[self.index..], bytes[src_index .. src_index + copy_amt]);
self.index += copy_amt;
assert(self.index <= self.buffer.len);
if (self.index == self.buffer.len) {
try self.flush();
}
src_index += copy_amt;
}
self.fifo.writeAssumeCapacity(bytes);
}
};
}
@ -717,7 +659,7 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
assert(u_bit_count >= bits);
break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
};
const Buf = @IntType(false, buf_bit_count);
const Buf = std.meta.IntType(false, buf_bit_count);
const BufShift = math.Log2Int(Buf);
const buf_value = @intCast(Buf, value);
@ -848,73 +790,6 @@ pub const BufferedAtomicFile = struct {
}
};
pub fn readLine(buf: *std.Buffer) ![]u8 {
var stdin_stream = getStdIn().inStream();
return readLineFrom(&stdin_stream.stream, buf);
}
/// Reads all characters until the next newline into buf, and returns
/// a slice of the characters read (excluding the newline character(s)).
pub fn readLineFrom(stream: var, buf: *std.Buffer) ![]u8 {
const start = buf.len();
while (true) {
const byte = try stream.readByte();
switch (byte) {
'\r' => {
// trash the following \n
_ = try stream.readByte();
return buf.toSlice()[start..];
},
'\n' => return buf.toSlice()[start..],
else => try buf.appendByte(byte),
}
}
}
test "io.readLineFrom" {
var buf = try std.Buffer.initSize(testing.allocator, 0);
defer buf.deinit();
var mem_stream = SliceInStream.init(
\\Line 1
\\Line 22
\\Line 333
);
const stream = &mem_stream.stream;
testing.expectEqualSlices(u8, "Line 1", try readLineFrom(stream, &buf));
testing.expectEqualSlices(u8, "Line 22", try readLineFrom(stream, &buf));
testing.expectError(error.EndOfStream, readLineFrom(stream, &buf));
testing.expectEqualSlices(u8, "Line 1Line 22Line 333", buf.toSlice());
}
pub fn readLineSlice(slice: []u8) ![]u8 {
var stdin_stream = getStdIn().inStream();
return readLineSliceFrom(&stdin_stream.stream, slice);
}
/// Reads all characters until the next newline into slice, and returns
/// a slice of the characters read (excluding the newline character(s)).
pub fn readLineSliceFrom(stream: var, slice: []u8) ![]u8 {
// We cannot use Buffer.fromOwnedSlice, as it wants to append a null byte
// after taking ownership, which would always require an allocation.
var buf = std.Buffer{ .list = std.ArrayList(u8).fromOwnedSlice(testing.failing_allocator, slice) };
try buf.resize(0);
return try readLineFrom(stream, &buf);
}
test "io.readLineSliceFrom" {
var buf: [7]u8 = undefined;
var mem_stream = SliceInStream.init(
\\Line 1
\\Line 22
\\Line 333
);
const stream = &mem_stream.stream;
testing.expectEqualSlices(u8, "Line 1", try readLineSliceFrom(stream, buf[0..]));
testing.expectError(error.OutOfMemory, readLineSliceFrom(stream, buf[0..]));
}
pub const Packing = enum {
/// Pack data to byte alignment
Byte,
@ -956,12 +831,12 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
//@BUG: inferred error issue. See: #1386
fn deserializeInt(self: *Self, comptime T: type) (Error || error{EndOfStream})!T {
comptime assert(trait.is(builtin.TypeId.Int)(T) or trait.is(builtin.TypeId.Float)(T));
comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T));
const u8_bit_count = 8;
const t_bit_count = comptime meta.bitCount(T);
const U = @IntType(false, t_bit_count);
const U = std.meta.IntType(false, t_bit_count);
const Log2U = math.Log2Int(U);
const int_size = (U.bit_count + 7) / 8;
@ -976,7 +851,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
if (int_size == 1) {
if (t_bit_count == 8) return @bitCast(T, buffer[0]);
const PossiblySignedByte = @IntType(T.is_signed, 8);
const PossiblySignedByte = std.meta.IntType(T.is_signed, 8);
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
}
@ -1005,9 +880,9 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
/// Deserializes data into the type pointed to by `ptr`
pub fn deserializeInto(self: *Self, ptr: var) !void {
const T = @TypeOf(ptr);
comptime assert(trait.is(builtin.TypeId.Pointer)(T));
comptime assert(trait.is(.Pointer)(T));
if (comptime trait.isSlice(T) or comptime trait.isPtrTo(builtin.TypeId.Array)(T)) {
if (comptime trait.isSlice(T) or comptime trait.isPtrTo(.Array)(T)) {
for (ptr) |*v|
try self.deserializeInto(v);
return;
@ -1016,7 +891,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
comptime assert(trait.isSingleItemPtr(T));
const C = comptime meta.Child(T);
const child_type_id = @typeId(C);
const child_type_id = @typeInfo(C);
//custom deserializer: fn(self: *Self, deserializer: var) !void
if (comptime trait.hasFn("deserialize")(C)) return C.deserialize(ptr, self);
@ -1027,10 +902,10 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
}
switch (child_type_id) {
builtin.TypeId.Void => return,
builtin.TypeId.Bool => ptr.* = (try self.deserializeInt(u1)) > 0,
builtin.TypeId.Float, builtin.TypeId.Int => ptr.* = try self.deserializeInt(C),
builtin.TypeId.Struct => {
.Void => return,
.Bool => ptr.* = (try self.deserializeInt(u1)) > 0,
.Float, .Int => ptr.* = try self.deserializeInt(C),
.Struct => {
const info = @typeInfo(C).Struct;
inline for (info.fields) |*field_info| {
@ -1040,7 +915,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
if (FieldType == void or FieldType == u0) continue;
//it doesn't make any sense to read pointers
if (comptime trait.is(builtin.TypeId.Pointer)(FieldType)) {
if (comptime trait.is(.Pointer)(FieldType)) {
@compileError("Will not " ++ "read field " ++ name ++ " of struct " ++
@typeName(C) ++ " because it " ++ "is of pointer-type " ++
@typeName(FieldType) ++ ".");
@ -1049,7 +924,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
try self.deserializeInto(&@field(ptr, name));
}
},
builtin.TypeId.Union => {
.Union => {
const info = @typeInfo(C).Union;
if (info.tag_type) |TagType| {
//we avoid duplicate iteration over the enum tags
@ -1073,7 +948,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
@compileError("Cannot meaningfully deserialize " ++ @typeName(C) ++
" because it is an untagged union. Use a custom deserialize().");
},
builtin.TypeId.Optional => {
.Optional => {
const OC = comptime meta.Child(C);
const exists = (try self.deserializeInt(u1)) > 0;
if (!exists) {
@ -1085,7 +960,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
const val_ptr = &ptr.*.?;
try self.deserializeInto(val_ptr);
},
builtin.TypeId.Enum => {
.Enum => {
var value = try self.deserializeInt(@TagType(C));
ptr.* = try meta.intToEnum(C, value);
},
@ -1134,12 +1009,12 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
fn serializeInt(self: *Self, value: var) Error!void {
const T = @TypeOf(value);
comptime assert(trait.is(builtin.TypeId.Int)(T) or trait.is(builtin.TypeId.Float)(T));
comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T));
const t_bit_count = comptime meta.bitCount(T);
const u8_bit_count = comptime meta.bitCount(u8);
const U = @IntType(false, t_bit_count);
const U = std.meta.IntType(false, t_bit_count);
const Log2U = math.Log2Int(U);
const int_size = (U.bit_count + 7) / 8;
@ -1183,11 +1058,11 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
return;
}
switch (@typeId(T)) {
builtin.TypeId.Void => return,
builtin.TypeId.Bool => try self.serializeInt(@as(u1, @boolToInt(value))),
builtin.TypeId.Float, builtin.TypeId.Int => try self.serializeInt(value),
builtin.TypeId.Struct => {
switch (@typeInfo(T)) {
.Void => return,
.Bool => try self.serializeInt(@as(u1, @boolToInt(value))),
.Float, .Int => try self.serializeInt(value),
.Struct => {
const info = @typeInfo(T);
inline for (info.Struct.fields) |*field_info| {
@ -1197,7 +1072,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
if (FieldType == void or FieldType == u0) continue;
//It doesn't make sense to write pointers
if (comptime trait.is(builtin.TypeId.Pointer)(FieldType)) {
if (comptime trait.is(.Pointer)(FieldType)) {
@compileError("Will not " ++ "serialize field " ++ name ++
" of struct " ++ @typeName(T) ++ " because it " ++
"is of pointer-type " ++ @typeName(FieldType) ++ ".");
@ -1205,7 +1080,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
try self.serialize(@field(value, name));
}
},
builtin.TypeId.Union => {
.Union => {
const info = @typeInfo(T).Union;
if (info.tag_type) |TagType| {
const active_tag = meta.activeTag(value);
@ -1226,7 +1101,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
@compileError("Cannot meaningfully serialize " ++ @typeName(T) ++
" because it is an untagged union. Use a custom serialize().");
},
builtin.TypeId.Optional => {
.Optional => {
if (value == null) {
try self.serializeInt(@as(u1, @boolToInt(false)));
return;
@ -1237,10 +1112,10 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
const val_ptr = &value.?;
try self.serialize(val_ptr.*);
},
builtin.TypeId.Enum => {
.Enum => {
try self.serializeInt(@enumToInt(value));
},
else => @compileError("Cannot serialize " ++ @tagName(@typeId(T)) ++ " types (unimplemented)."),
else => @compileError("Cannot serialize " ++ @tagName(@typeInfo(T)) ++ " types (unimplemented)."),
}
}
};

View File

@ -235,7 +235,7 @@ pub fn InStream(comptime ReadError: type) type {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
try self.readNoEof(@sliceToBytes(res[0..]));
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
}

View File

@ -5,6 +5,7 @@ const meta = std.meta;
const trait = std.trait;
const DefaultPrng = std.rand.DefaultPrng;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const mem = std.mem;
const fs = std.fs;
@ -44,8 +45,8 @@ test "write a file, read it, then delete it" {
defer file.close();
const file_size = try file.getEndPos();
const expected_file_size = "begin".len + data.len + "end".len;
expect(file_size == expected_file_size);
const expected_file_size: u64 = "begin".len + data.len + "end".len;
expectEqual(expected_file_size, file_size);
var file_in_stream = file.inStream();
var buf_stream = io.BufferedInStream(File.ReadError).init(&file_in_stream.stream);
@ -93,12 +94,12 @@ test "SliceInStream" {
test "PeekStream" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
var ss = io.SliceInStream.init(&bytes);
var ps = io.PeekStream(2, io.SliceInStream.Error).init(&ss.stream);
var ps = io.PeekStream(.{ .Static = 2 }, io.SliceInStream.Error).init(&ss.stream);
var dest: [4]u8 = undefined;
ps.putBackByte(9);
ps.putBackByte(10);
try ps.putBackByte(9);
try ps.putBackByte(10);
var read = try ps.stream.read(dest[0..4]);
expect(read == 4);
@ -114,8 +115,8 @@ test "PeekStream" {
expect(read == 2);
expect(mem.eql(u8, dest[0..2], bytes[6..8]));
ps.putBackByte(11);
ps.putBackByte(12);
try ps.putBackByte(11);
try ps.putBackByte(12);
read = try ps.stream.read(dest[0..4]);
expect(read == 2);
@ -317,6 +318,7 @@ test "BitStreams with File Stream" {
}
fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packing: io.Packing) !void {
@setEvalBranchQuota(1500);
//@NOTE: if this test is taking too long, reduce the maximum tested bitsize
const max_test_bitsize = 128;
@ -340,8 +342,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
comptime var i = 0;
inline while (i <= max_test_bitsize) : (i += 1) {
const U = @IntType(false, i);
const S = @IntType(true, i);
const U = std.meta.IntType(false, i);
const S = std.meta.IntType(true, i);
try serializer.serializeInt(@as(U, i));
if (i != 0) try serializer.serializeInt(@as(S, -1)) else try serializer.serialize(@as(S, 0));
}
@ -349,8 +351,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
i = 0;
inline while (i <= max_test_bitsize) : (i += 1) {
const U = @IntType(false, i);
const S = @IntType(true, i);
const U = std.meta.IntType(false, i);
const S = std.meta.IntType(true, i);
const x = try deserializer.deserializeInt(U);
const y = try deserializer.deserializeInt(S);
expect(x == @as(U, i));

View File

@ -19,6 +19,74 @@ const StringEscapes = union(enum) {
},
};
/// Checks to see if a string matches what it would be as a json-encoded string
/// Assumes that `encoded` is a well-formed json string
fn encodesTo(decoded: []const u8, encoded: []const u8) bool {
var i: usize = 0;
var j: usize = 0;
while (i < decoded.len) {
if (j >= encoded.len) return false;
if (encoded[j] != '\\') {
if (decoded[i] != encoded[j]) return false;
j += 1;
i += 1;
} else {
const escape_type = encoded[j + 1];
if (escape_type != 'u') {
const t: u8 = switch (escape_type) {
'\\' => '\\',
'/' => '/',
'n' => '\n',
'r' => '\r',
't' => '\t',
'f' => 12,
'b' => 8,
'"' => '"',
else => unreachable,
};
if (decoded[i] != t) return false;
j += 2;
i += 1;
} else {
var codepoint = std.fmt.parseInt(u21, encoded[j + 2 .. j + 6], 16) catch unreachable;
j += 6;
if (codepoint >= 0xD800 and codepoint < 0xDC00) {
// surrogate pair
assert(encoded[j] == '\\');
assert(encoded[j + 1] == 'u');
const low_surrogate = std.fmt.parseInt(u21, encoded[j + 2 .. j + 6], 16) catch unreachable;
codepoint = 0x10000 + (((codepoint & 0x03ff) << 10) | (low_surrogate & 0x03ff));
j += 6;
}
var buf: [4]u8 = undefined;
const len = std.unicode.utf8Encode(codepoint, &buf) catch unreachable;
if (i + len > decoded.len) return false;
if (!mem.eql(u8, decoded[i .. i + len], buf[0..len])) return false;
i += len;
}
}
}
assert(i == decoded.len);
assert(j == encoded.len);
return true;
}
test "encodesTo" {
// same
testing.expectEqual(true, encodesTo("false", "false"));
// totally different
testing.expectEqual(false, encodesTo("false", "true"));
// differnt lengths
testing.expectEqual(false, encodesTo("false", "other"));
// with escape
testing.expectEqual(true, encodesTo("\\", "\\\\"));
testing.expectEqual(true, encodesTo("with\nescape", "with\\nescape"));
// with unicode
testing.expectEqual(true, encodesTo("ą", "\\u0105"));
testing.expectEqual(true, encodesTo("😂", "\\ud83d\\ude02"));
testing.expectEqual(true, encodesTo("withąunicode😂", "with\\u0105unicode\\ud83d\\ude02"));
}
/// A single token slice into the parent string.
///
/// Use `token.slice()` on the input at the current position to get the current slice.
@ -1026,10 +1094,8 @@ pub const TokenStream = struct {
pub fn next(self: *TokenStream) Error!?Token {
if (self.token) |token| {
// TODO: Audit this pattern once #2915 is closed
const copy = token;
self.token = null;
return copy;
return token;
}
var t1: ?Token = undefined;
@ -1203,6 +1269,493 @@ pub const Value = union(enum) {
}
};
pub const ParseOptions = struct {
allocator: ?*Allocator = null,
/// Behaviour when a duplicate field is encountered.
duplicate_field_behavior: enum {
UseFirst,
Error,
UseLast,
} = .Error,
};
fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T {
switch (@typeInfo(T)) {
.Bool => {
return switch (token) {
.True => true,
.False => false,
else => error.UnexpectedToken,
};
},
.Float, .ComptimeFloat => {
const numberToken = switch (token) {
.Number => |n| n,
else => return error.UnexpectedToken,
};
return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1));
},
.Int, .ComptimeInt => {
const numberToken = switch (token) {
.Number => |n| n,
else => return error.UnexpectedToken,
};
if (!numberToken.is_integer) return error.UnexpectedToken;
return try std.fmt.parseInt(T, numberToken.slice(tokens.slice, tokens.i - 1), 10);
},
.Optional => |optionalInfo| {
if (token == .Null) {
return null;
} else {
return try parseInternal(optionalInfo.child, token, tokens, options);
}
},
.Enum => |enumInfo| {
switch (token) {
.Number => |numberToken| {
if (!numberToken.is_integer) return error.UnexpectedToken;
const n = try std.fmt.parseInt(enumInfo.tag_type, numberToken.slice(tokens.slice, tokens.i - 1), 10);
return try std.meta.intToEnum(T, n);
},
.String => |stringToken| {
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
.None => return std.meta.stringToEnum(T, source_slice) orelse return error.InvalidEnumTag,
.Some => {
inline for (enumInfo.fields) |field| {
if (field.name.len == stringToken.decodedLength() and encodesTo(field.name, source_slice)) {
return @field(T, field.name);
}
}
return error.InvalidEnumTag;
},
}
},
else => return error.UnexpectedToken,
}
},
.Union => |unionInfo| {
if (unionInfo.tag_type) |_| {
// try each of the union fields until we find one that matches
inline for (unionInfo.fields) |u_field| {
if (parseInternal(u_field.field_type, token, tokens, options)) |value| {
return @unionInit(T, u_field.name, value);
} else |err| {
// Bubble up error.OutOfMemory
// Parsing some types won't have OutOfMemory in their
// error-sets, for the condition to be valid, merge it in.
if (@as(@TypeOf(err) || error{OutOfMemory}, err) == error.OutOfMemory) return err;
// otherwise continue through the `inline for`
}
}
return error.NoUnionMembersMatched;
} else {
@compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'");
}
},
.Struct => |structInfo| {
switch (token) {
.ObjectBegin => {},
else => return error.UnexpectedToken,
}
var r: T = undefined;
var fields_seen = [_]bool{false} ** structInfo.fields.len;
errdefer {
inline for (structInfo.fields) |field, i| {
if (fields_seen[i]) {
parseFree(field.field_type, @field(r, field.name), options);
}
}
}
while (true) {
switch ((try tokens.next()) orelse return error.UnexpectedEndOfJson) {
.ObjectEnd => break,
.String => |stringToken| {
const key_source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
var found = false;
inline for (structInfo.fields) |field, i| {
// TODO: using switches here segfault the compiler (#2727?)
if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)))) {
// if (switch (stringToken.escapes) {
// .None => mem.eql(u8, field.name, key_source_slice),
// .Some => (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)),
// }) {
if (fields_seen[i]) {
// switch (options.duplicate_field_behavior) {
// .UseFirst => {},
// .Error => {},
// .UseLast => {},
// }
if (options.duplicate_field_behavior == .UseFirst) {
break;
} else if (options.duplicate_field_behavior == .Error) {
return error.DuplicateJSONField;
} else if (options.duplicate_field_behavior == .UseLast) {
parseFree(field.field_type, @field(r, field.name), options);
}
}
@field(r, field.name) = try parse(field.field_type, tokens, options);
fields_seen[i] = true;
found = true;
break;
}
}
if (!found) return error.UnknownField;
},
else => return error.UnexpectedToken,
}
}
inline for (structInfo.fields) |field, i| {
if (!fields_seen[i]) {
if (field.default_value) |default| {
@field(r, field.name) = default;
} else {
return error.MissingField;
}
}
}
return r;
},
.Array => |arrayInfo| {
switch (token) {
.ArrayBegin => {
var r: T = undefined;
var i: usize = 0;
errdefer {
while (true) : (i -= 1) {
parseFree(arrayInfo.child, r[i], options);
if (i == 0) break;
}
}
while (i < r.len) : (i += 1) {
r[i] = try parse(arrayInfo.child, tokens, options);
}
const tok = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
switch (tok) {
.ArrayEnd => {},
else => return error.UnexpectedToken,
}
return r;
},
.String => |stringToken| {
if (arrayInfo.child != u8) return error.UnexpectedToken;
var r: T = undefined;
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
.None => mem.copy(u8, &r, source_slice),
.Some => try unescapeString(&r, source_slice),
}
return r;
},
else => return error.UnexpectedToken,
}
},
.Pointer => |ptrInfo| {
const allocator = options.allocator orelse return error.AllocatorRequired;
switch (ptrInfo.size) {
.One => {
const r: T = allocator.create(ptrInfo.child);
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
return r;
},
.Slice => {
switch (token) {
.ArrayBegin => {
var arraylist = std.ArrayList(ptrInfo.child).init(allocator);
errdefer {
while (arraylist.popOrNull()) |v| {
parseFree(ptrInfo.child, v, options);
}
arraylist.deinit();
}
while (true) {
const tok = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
switch (tok) {
.ArrayEnd => break,
else => {},
}
try arraylist.ensureCapacity(arraylist.len + 1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}
return arraylist.toOwnedSlice();
},
.String => |stringToken| {
if (ptrInfo.child != u8) return error.UnexpectedToken;
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
.None => return mem.dupe(allocator, u8, source_slice),
.Some => |some_escapes| {
const output = try allocator.alloc(u8, stringToken.decodedLength());
errdefer allocator.free(output);
try unescapeString(output, source_slice);
return output;
},
}
},
else => return error.UnexpectedToken,
}
},
else => @compileError("Unable to parse into type '" ++ @typeName(T) ++ "'"),
}
},
else => @compileError("Unable to parse into type '" ++ @typeName(T) ++ "'"),
}
unreachable;
}
pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T {
const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson;
return parseInternal(T, token, tokens, options);
}
/// Releases resources created by `parse`.
/// Should be called with the same type and `ParseOptions` that were passed to `parse`
pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
switch (@typeInfo(T)) {
.Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {},
.Optional => {
if (value) |v| {
return parseFree(@TypeOf(v), v, options);
}
},
.Union => |unionInfo| {
if (unionInfo.tag_type) |UnionTagType| {
inline for (unionInfo.fields) |u_field| {
if (@enumToInt(@as(UnionTagType, value)) == u_field.enum_field.?.value) {
parseFree(u_field.field_type, @field(value, u_field.name), options);
break;
}
}
} else {
unreachable;
}
},
.Struct => |structInfo| {
inline for (structInfo.fields) |field| {
parseFree(field.field_type, @field(value, field.name), options);
}
},
.Array => |arrayInfo| {
for (value) |v| {
parseFree(arrayInfo.child, v, options);
}
},
.Pointer => |ptrInfo| {
const allocator = options.allocator orelse unreachable;
switch (ptrInfo.size) {
.One => {
parseFree(ptrInfo.child, value.*, options);
allocator.destroy(v);
},
.Slice => {
for (value) |v| {
parseFree(ptrInfo.child, v, options);
}
allocator.free(value);
},
else => unreachable,
}
},
else => unreachable,
}
}
test "parse" {
testing.expectEqual(false, try parse(bool, &TokenStream.init("false"), ParseOptions{}));
testing.expectEqual(true, try parse(bool, &TokenStream.init("true"), ParseOptions{}));
testing.expectEqual(@as(u1, 1), try parse(u1, &TokenStream.init("1"), ParseOptions{}));
testing.expectError(error.Overflow, parse(u1, &TokenStream.init("50"), ParseOptions{}));
testing.expectEqual(@as(u64, 42), try parse(u64, &TokenStream.init("42"), ParseOptions{}));
testing.expectEqual(@as(f64, 42), try parse(f64, &TokenStream.init("42.0"), ParseOptions{}));
testing.expectEqual(@as(?bool, null), try parse(?bool, &TokenStream.init("null"), ParseOptions{}));
testing.expectEqual(@as(?bool, true), try parse(?bool, &TokenStream.init("true"), ParseOptions{}));
testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("\"foo\""), ParseOptions{}));
testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("[102, 111, 111]"), ParseOptions{}));
}
test "parse into enum" {
const T = extern enum {
Foo = 42,
Bar,
@"with\\escape",
};
testing.expectEqual(@as(T, .Foo), try parse(T, &TokenStream.init("\"Foo\""), ParseOptions{}));
testing.expectEqual(@as(T, .Foo), try parse(T, &TokenStream.init("42"), ParseOptions{}));
testing.expectEqual(@as(T, .@"with\\escape"), try parse(T, &TokenStream.init("\"with\\\\escape\""), ParseOptions{}));
testing.expectError(error.InvalidEnumTag, parse(T, &TokenStream.init("5"), ParseOptions{}));
testing.expectError(error.InvalidEnumTag, parse(T, &TokenStream.init("\"Qux\""), ParseOptions{}));
}
test "parse into that allocates a slice" {
testing.expectError(error.AllocatorRequired, parse([]u8, &TokenStream.init("\"foo\""), ParseOptions{}));
const options = ParseOptions{ .allocator = testing.allocator };
{
const r = try parse([]u8, &TokenStream.init("\"foo\""), options);
defer parseFree([]u8, r, options);
testing.expectEqualSlices(u8, "foo", r);
}
{
const r = try parse([]u8, &TokenStream.init("[102, 111, 111]"), options);
defer parseFree([]u8, r, options);
testing.expectEqualSlices(u8, "foo", r);
}
{
const r = try parse([]u8, &TokenStream.init("\"with\\\\escape\""), options);
defer parseFree([]u8, r, options);
testing.expectEqualSlices(u8, "with\\escape", r);
}
}
test "parse into tagged union" {
{
const T = union(enum) {
int: i32,
float: f64,
string: []const u8,
};
testing.expectEqual(T{ .float = 1.5 }, try parse(T, &TokenStream.init("1.5"), ParseOptions{}));
}
{ // if union matches string member, fails with NoUnionMembersMatched rather than AllocatorRequired
// Note that this behaviour wasn't necessarily by design, but was
// what fell out of the implementation and may result in interesting
// API breakage if changed
const T = union(enum) {
int: i32,
float: f64,
string: []const u8,
};
testing.expectError(error.NoUnionMembersMatched, parse(T, &TokenStream.init("\"foo\""), ParseOptions{}));
}
{ // failing allocations should be bubbled up instantly without trying next member
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
const options = ParseOptions{ .allocator = &fail_alloc.allocator };
const T = union(enum) {
// both fields here match the input
string: []const u8,
array: [3]u8,
};
testing.expectError(error.OutOfMemory, parse(T, &TokenStream.init("[1,2,3]"), options));
}
{
// if multiple matches possible, takes first option
const T = union(enum) {
x: u8,
y: u8,
};
testing.expectEqual(T{ .x = 42 }, try parse(T, &TokenStream.init("42"), ParseOptions{}));
}
}
test "parseFree descends into tagged union" {
// tagged unions are broken on arm64: https://github.com/ziglang/zig/issues/4492
if (std.builtin.arch == .aarch64) return error.SkipZigTest;
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
const options = ParseOptions{ .allocator = &fail_alloc.allocator };
const T = union(enum) {
int: i32,
float: f64,
string: []const u8,
};
// use a string with unicode escape so we know result can't be a reference to global constant
const r = try parse(T, &TokenStream.init("\"with\\u0105unicode\""), options);
testing.expectEqual(@TagType(T).string, @as(@TagType(T), r));
testing.expectEqualSlices(u8, "withąunicode", r.string);
testing.expectEqual(@as(usize, 0), fail_alloc.deallocations);
parseFree(T, r, options);
testing.expectEqual(@as(usize, 1), fail_alloc.deallocations);
}
test "parse into struct with no fields" {
const T = struct {};
testing.expectEqual(T{}, try parse(T, &TokenStream.init("{}"), ParseOptions{}));
}
test "parse into struct with misc fields" {
@setEvalBranchQuota(10000);
const options = ParseOptions{ .allocator = testing.allocator };
const T = struct {
int: i64,
float: f64,
@"with\\escape": bool,
@"withąunicode😂": bool,
language: []const u8,
optional: ?bool,
default_field: i32 = 42,
static_array: [3]f64,
dynamic_array: []f64,
const Bar = struct {
nested: []const u8,
};
complex: Bar,
const Baz = struct {
foo: []const u8,
};
veryComplex: []Baz,
const Union = union(enum) {
x: u8,
float: f64,
string: []const u8,
};
a_union: Union,
};
const r = try parse(T, &TokenStream.init(
\\{
\\ "int": 420,
\\ "float": 3.14,
\\ "with\\escape": true,
\\ "with\u0105unicode\ud83d\ude02": false,
\\ "language": "zig",
\\ "optional": null,
\\ "static_array": [66.6, 420.420, 69.69],
\\ "dynamic_array": [66.6, 420.420, 69.69],
\\ "complex": {
\\ "nested": "zig"
\\ },
\\ "veryComplex": [
\\ {
\\ "foo": "zig"
\\ }, {
\\ "foo": "rocks"
\\ }
\\ ],
\\ "a_union": 100000
\\}
), options);
defer parseFree(T, r, options);
testing.expectEqual(@as(i64, 420), r.int);
testing.expectEqual(@as(f64, 3.14), r.float);
testing.expectEqual(true, r.@"with\\escape");
testing.expectEqual(false, r.@"withąunicode😂");
testing.expectEqualSlices(u8, "zig", r.language);
testing.expectEqual(@as(?bool, null), r.optional);
testing.expectEqual(@as(i32, 42), r.default_field);
testing.expectEqual(@as(f64, 66.6), r.static_array[0]);
testing.expectEqual(@as(f64, 420.420), r.static_array[1]);
testing.expectEqual(@as(f64, 69.69), r.static_array[2]);
testing.expectEqual(@as(usize, 3), r.dynamic_array.len);
testing.expectEqual(@as(f64, 66.6), r.dynamic_array[0]);
testing.expectEqual(@as(f64, 420.420), r.dynamic_array[1]);
testing.expectEqual(@as(f64, 69.69), r.dynamic_array[2]);
testing.expectEqualSlices(u8, r.complex.nested, "zig");
testing.expectEqualSlices(u8, "zig", r.veryComplex[0].foo);
testing.expectEqualSlices(u8, "rocks", r.veryComplex[1].foo);
testing.expectEqual(T.Union{ .float = 100000 }, r.a_union);
}
/// A non-stream JSON parser which constructs a tree of Value's.
pub const Parser = struct {
allocator: *Allocator,
@ -1688,3 +2241,269 @@ test "string copy option" {
}
testing.expect(found_nocopy);
}
pub const StringifyOptions = struct {
// TODO: indentation options?
// TODO: make escaping '/' in strings optional?
// TODO: allow picking if []u8 is string or array?
};
pub fn stringify(
value: var,
options: StringifyOptions,
context: var,
comptime Errors: type,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
const T = @TypeOf(value);
switch (@typeInfo(T)) {
.Float, .ComptimeFloat => {
return std.fmt.formatFloatScientific(value, std.fmt.FormatOptions{}, context, Errors, output);
},
.Int, .ComptimeInt => {
return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, context, Errors, output);
},
.Bool => {
return output(context, if (value) "true" else "false");
},
.Optional => {
if (value) |payload| {
return try stringify(payload, options, context, Errors, output);
} else {
return output(context, "null");
}
},
.Enum => {
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
return value.jsonStringify(options, context, Errors, output);
}
@compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'");
},
.Union => {
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
return value.jsonStringify(options, context, Errors, output);
}
const info = @typeInfo(T).Union;
if (info.tag_type) |UnionTagType| {
inline for (info.fields) |u_field| {
if (@enumToInt(@as(UnionTagType, value)) == u_field.enum_field.?.value) {
return try stringify(@field(value, u_field.name), options, context, Errors, output);
}
}
} else {
@compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'");
}
},
.Struct => |S| {
if (comptime std.meta.trait.hasFn("jsonStringify")(T)) {
return value.jsonStringify(options, context, Errors, output);
}
try output(context, "{");
comptime var field_output = false;
inline for (S.fields) |Field, field_i| {
// don't include void fields
if (Field.field_type == void) continue;
if (!field_output) {
field_output = true;
} else {
try output(context, ",");
}
try stringify(Field.name, options, context, Errors, output);
try output(context, ":");
try stringify(@field(value, Field.name), options, context, Errors, output);
}
try output(context, "}");
return;
},
.Pointer => |ptr_info| switch (ptr_info.size) {
.One => {
// TODO: avoid loops?
return try stringify(value.*, options, context, Errors, output);
},
// TODO: .Many when there is a sentinel (waiting for https://github.com/ziglang/zig/pull/3972)
.Slice => {
if (ptr_info.child == u8 and std.unicode.utf8ValidateSlice(value)) {
try output(context, "\"");
var i: usize = 0;
while (i < value.len) : (i += 1) {
switch (value[i]) {
// normal ascii characters
0x20...0x21, 0x23...0x2E, 0x30...0x5B, 0x5D...0x7F => try output(context, value[i .. i + 1]),
// control characters with short escapes
'\\' => try output(context, "\\\\"),
'\"' => try output(context, "\\\""),
'/' => try output(context, "\\/"),
0x8 => try output(context, "\\b"),
0xC => try output(context, "\\f"),
'\n' => try output(context, "\\n"),
'\r' => try output(context, "\\r"),
'\t' => try output(context, "\\t"),
else => {
const ulen = std.unicode.utf8ByteSequenceLength(value[i]) catch unreachable;
const codepoint = std.unicode.utf8Decode(value[i .. i + ulen]) catch unreachable;
if (codepoint <= 0xFFFF) {
// If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF),
// then it may be represented as a six-character sequence: a reverse solidus, followed
// by the lowercase letter u, followed by four hexadecimal digits that encode the character's code point.
try output(context, "\\u");
try std.fmt.formatIntValue(codepoint, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, context, Errors, output);
} else {
// To escape an extended character that is not in the Basic Multilingual Plane,
// the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair.
const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
try output(context, "\\u");
try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, context, Errors, output);
try output(context, "\\u");
try std.fmt.formatIntValue(low, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, context, Errors, output);
}
i += ulen - 1;
},
}
}
try output(context, "\"");
return;
}
try output(context, "[");
for (value) |x, i| {
if (i != 0) {
try output(context, ",");
}
try stringify(x, options, context, Errors, output);
}
try output(context, "]");
return;
},
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
},
.Array => |info| {
return try stringify(value[0..], options, context, Errors, output);
},
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
}
unreachable;
}
fn teststringify(expected: []const u8, value: var) !void {
const TestStringifyContext = struct {
expected_remaining: []const u8,
fn testStringifyWrite(context: *@This(), bytes: []const u8) !void {
if (context.expected_remaining.len < bytes.len) {
std.debug.warn(
\\====== expected this output: =========
\\{}
\\======== instead found this: =========
\\{}
\\======================================
, .{
context.expected_remaining,
bytes,
});
return error.TooMuchData;
}
if (!mem.eql(u8, context.expected_remaining[0..bytes.len], bytes)) {
std.debug.warn(
\\====== expected this output: =========
\\{}
\\======== instead found this: =========
\\{}
\\======================================
, .{
context.expected_remaining[0..bytes.len],
bytes,
});
return error.DifferentData;
}
context.expected_remaining = context.expected_remaining[bytes.len..];
}
};
var buf: [100]u8 = undefined;
var context = TestStringifyContext{ .expected_remaining = expected };
try stringify(value, StringifyOptions{}, &context, error{
TooMuchData,
DifferentData,
}, TestStringifyContext.testStringifyWrite);
if (context.expected_remaining.len > 0) return error.NotEnoughData;
}
test "stringify basic types" {
try teststringify("false", false);
try teststringify("true", true);
try teststringify("null", @as(?u8, null));
try teststringify("null", @as(?*u32, null));
try teststringify("42", 42);
try teststringify("4.2e+01", 42.0);
try teststringify("42", @as(u8, 42));
try teststringify("42", @as(u128, 42));
try teststringify("4.2e+01", @as(f32, 42));
try teststringify("4.2e+01", @as(f64, 42));
}
test "stringify string" {
try teststringify("\"hello\"", "hello");
try teststringify("\"with\\nescapes\\r\"", "with\nescapes\r");
try teststringify("\"with unicode\\u0001\"", "with unicode\u{1}");
try teststringify("\"with unicode\\u0080\"", "with unicode\u{80}");
try teststringify("\"with unicode\\u00ff\"", "with unicode\u{FF}");
try teststringify("\"with unicode\\u0100\"", "with unicode\u{100}");
try teststringify("\"with unicode\\u0800\"", "with unicode\u{800}");
try teststringify("\"with unicode\\u8000\"", "with unicode\u{8000}");
try teststringify("\"with unicode\\ud799\"", "with unicode\u{D799}");
try teststringify("\"with unicode\\ud800\\udc00\"", "with unicode\u{10000}");
try teststringify("\"with unicode\\udbff\\udfff\"", "with unicode\u{10FFFF}");
}
test "stringify tagged unions" {
try teststringify("42", union(enum) {
Foo: u32,
Bar: bool,
}{ .Foo = 42 });
}
test "stringify struct" {
try teststringify("{\"foo\":42}", struct {
foo: u32,
}{ .foo = 42 });
}
test "stringify struct with void field" {
try teststringify("{\"foo\":42}", struct {
foo: u32,
bar: void = {},
}{ .foo = 42 });
}
test "stringify array of structs" {
const MyStruct = struct {
foo: u32,
};
try teststringify("[{\"foo\":42},{\"foo\":100},{\"foo\":1000}]", [_]MyStruct{
MyStruct{ .foo = 42 },
MyStruct{ .foo = 100 },
MyStruct{ .foo = 1000 },
});
}
test "stringify struct with custom stringifier" {
try teststringify("[\"something special\",42]", struct {
foo: u32,
const Self = @This();
pub fn jsonStringify(
value: Self,
options: StringifyOptions,
context: var,
comptime Errors: type,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) !void {
try output(context, "[\"something special\",");
try stringify(42, options, context, Errors, output);
try output(context, "]");
}
}{ .foo = 42 });
}

View File

@ -1,6 +1,4 @@
const builtin = @import("builtin");
const std = @import("std.zig");
const TypeId = builtin.TypeId;
const assert = std.debug.assert;
const testing = std.testing;
@ -89,7 +87,7 @@ pub const snan = @import("math/nan.zig").snan;
pub const inf = @import("math/inf.zig").inf;
pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
assert(@typeId(T) == TypeId.Float);
assert(@typeInfo(T) == .Float);
return fabs(x - y) < epsilon;
}
@ -198,7 +196,7 @@ test "" {
}
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
assert(@typeInfo(T) == .Float);
return switch (T.bit_count) {
16 => 10,
@ -211,7 +209,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int {
}
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
assert(@typeInfo(T) == .Float);
return switch (T.bit_count) {
16 => 5,
@ -446,7 +444,7 @@ pub fn Log2Int(comptime T: type) type {
count += 1;
}
return @IntType(false, count);
return std.meta.IntType(false, count);
}
pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
@ -462,7 +460,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t
if (is_signed) {
magnitude_bits += 1;
}
return @IntType(is_signed, magnitude_bits);
return std.meta.IntType(is_signed, magnitude_bits);
}
test "math.IntFittingRange" {
@ -526,7 +524,7 @@ fn testOverflow() void {
pub fn absInt(x: var) !@TypeOf(x) {
const T = @TypeOf(x);
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
comptime assert(T.is_signed); // must pass a signed integer to absInt
if (x == minInt(@TypeOf(x))) {
@ -560,7 +558,7 @@ fn testAbsFloat() void {
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
@ -581,7 +579,7 @@ fn testDivTrunc() void {
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
@ -602,7 +600,7 @@ fn testDivFloor() void {
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
@ -676,13 +674,13 @@ pub fn absCast(x: var) t: {
if (@TypeOf(x) == comptime_int) {
break :t comptime_int;
} else {
break :t @IntType(false, @TypeOf(x).bit_count);
break :t std.meta.IntType(false, @TypeOf(x).bit_count);
}
} {
if (@TypeOf(x) == comptime_int) {
return if (x < 0) -x else x;
}
const uint = @IntType(false, @TypeOf(x).bit_count);
const uint = std.meta.IntType(false, @TypeOf(x).bit_count);
if (x >= 0) return @intCast(uint, x);
return @intCast(uint, -(x + 1)) + 1;
@ -703,10 +701,10 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
pub fn negateCast(x: var) !@IntType(true, @TypeOf(x).bit_count) {
pub fn negateCast(x: var) !std.meta.IntType(true, @TypeOf(x).bit_count) {
if (@TypeOf(x).is_signed) return negate(x);
const int = @IntType(true, @TypeOf(x).bit_count);
const int = std.meta.IntType(true, @TypeOf(x).bit_count);
if (x > -minInt(int)) return error.Overflow;
if (x == -minInt(int)) return minInt(int);
@ -727,8 +725,8 @@ test "math.negateCast" {
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer
comptime assert(@typeId(@TypeOf(x)) == builtin.TypeId.Int); // must pass an integer
comptime assert(@typeInfo(T) == .Int); // must pass an integer
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) {
return error.Overflow;
} else if (minInt(@TypeOf(x)) < minInt(T) and x < minInt(T)) {
@ -792,11 +790,11 @@ fn testFloorPowerOfTwo() void {
/// Returns the next power of two (if the value is not already a power of two).
/// Only unsigned integers can be used. Zero is not an allowed input.
/// Result is a type with 1 more bit than the input type.
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T.bit_count + 1) {
comptime assert(@typeId(T) == builtin.TypeId.Int);
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.IntType(T.is_signed, T.bit_count + 1) {
comptime assert(@typeInfo(T) == .Int);
comptime assert(!T.is_signed);
assert(value != 0);
comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const PromotedType = std.meta.IntType(T.is_signed, T.bit_count + 1);
comptime const shiftType = std.math.Log2Int(PromotedType);
return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
}
@ -805,9 +803,9 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T
/// Only unsigned integers can be used. Zero is not an allowed input.
/// If the value doesn't fit, returns an error.
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
comptime assert(@typeId(T) == builtin.TypeId.Int);
comptime assert(@typeInfo(T) == .Int);
comptime assert(!T.is_signed);
comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const PromotedType = std.meta.IntType(T.is_signed, T.bit_count + 1);
comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
var x = ceilPowerOfTwoPromote(T, value);
if (overflowBit & x != 0) {
@ -878,10 +876,10 @@ test "std.math.log2_int_ceil" {
pub fn lossyCast(comptime T: type, value: var) T {
switch (@typeInfo(@TypeOf(value))) {
builtin.TypeId.Int => return @intToFloat(T, value),
builtin.TypeId.Float => return @floatCast(T, value),
builtin.TypeId.ComptimeInt => return @as(T, value),
builtin.TypeId.ComptimeFloat => return @as(T, value),
.Int => return @intToFloat(T, value),
.Float => return @floatCast(T, value),
.ComptimeInt => return @as(T, value),
.ComptimeFloat => return @as(T, value),
else => @compileError("bad type"),
}
}
@ -949,8 +947,8 @@ test "max value type" {
testing.expect(x == 2147483647);
}
pub fn mulWide(comptime T: type, a: T, b: T) @IntType(T.is_signed, T.bit_count * 2) {
const ResultInt = @IntType(T.is_signed, T.bit_count * 2);
pub fn mulWide(comptime T: type, a: T, b: T) std.meta.IntType(T.is_signed, T.bit_count * 2) {
const ResultInt = std.meta.IntType(T.is_signed, T.bit_count * 2);
return @as(ResultInt, a) * @as(ResultInt, b);
}

View File

@ -1,5 +1,4 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
const math = std.math;
@ -9,10 +8,8 @@ const ArrayList = std.ArrayList;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const TypeId = builtin.TypeId;
pub const Limb = usize;
pub const DoubleLimb = @IntType(false, 2 * Limb.bit_count);
pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
pub const Log2Limb = math.Log2Int(Limb);
comptime {
@ -270,8 +267,8 @@ pub const Int = struct {
const T = @TypeOf(value);
switch (@typeInfo(T)) {
TypeId.Int => |info| {
const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
.Int => |info| {
const UT = if (T.is_signed) std.meta.IntType(false, T.bit_count - 1) else T;
try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb));
self.metadata = 0;
@ -294,7 +291,7 @@ pub const Int = struct {
}
}
},
TypeId.ComptimeInt => {
.ComptimeInt => {
comptime var w_value = if (value < 0) -value else value;
const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
@ -332,9 +329,9 @@ pub const Int = struct {
///
/// Returns an error if self cannot be narrowed into the requested type without truncation.
pub fn to(self: Int, comptime T: type) ConvertError!T {
switch (@typeId(T)) {
TypeId.Int => {
const UT = @IntType(false, T.bit_count);
switch (@typeInfo(T)) {
.Int => {
const UT = std.meta.IntType(false, T.bit_count);
if (self.bitCountTwosComp() > T.bit_count) {
return error.TargetTooSmall;

View File

@ -1,5 +1,4 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const debug = std.debug;
const math = std.math;
const mem = std.mem;
@ -7,8 +6,6 @@ const testing = std.testing;
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const TypeId = builtin.TypeId;
const bn = @import("int.zig");
const Limb = bn.Limb;
const DoubleLimb = bn.DoubleLimb;
@ -129,9 +126,9 @@ pub const Rational = struct {
/// completely represent the provided float.
pub fn setFloat(self: *Rational, comptime T: type, f: T) !void {
// Translated from golang.go/src/math/big/rat.go.
debug.assert(@typeId(T) == builtin.TypeId.Float);
debug.assert(@typeInfo(T) == .Float);
const UnsignedIntType = @IntType(false, T.bit_count);
const UnsignedIntType = std.meta.IntType(false, T.bit_count);
const f_bits = @bitCast(UnsignedIntType, f);
const exponent_bits = math.floatExponentBits(T);
@ -187,10 +184,10 @@ pub const Rational = struct {
pub fn toFloat(self: Rational, comptime T: type) !T {
// Translated from golang.go/src/math/big/rat.go.
// TODO: Indicate whether the result is not exact.
debug.assert(@typeId(T) == builtin.TypeId.Float);
debug.assert(@typeInfo(T) == .Float);
const fsize = T.bit_count;
const BitReprType = @IntType(false, T.bit_count);
const BitReprType = std.meta.IntType(false, T.bit_count);
const msize = math.floatMantissaBits(T);
const msize1 = msize + 1;
@ -465,7 +462,7 @@ pub const Rational = struct {
}
};
const SignedDoubleLimb = @IntType(true, DoubleLimb.bit_count);
const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
fn gcd(rma: *Int, x: Int, y: Int) !void {
rma.assertWritable();
@ -653,7 +650,7 @@ test "big.rational gcd one large" {
}
fn extractLowBits(a: Int, comptime T: type) T {
testing.expect(@typeId(T) == builtin.TypeId.Int);
testing.expect(@typeInfo(T) == .Int);
if (T.bit_count <= Limb.bit_count) {
return @truncate(T, a.limbs[0]);

View File

@ -44,7 +44,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn cos_(comptime T: type, x_: T) T {
const I = @IntType(true, T.bit_count);
const I = std.meta.IntType(true, T.bit_count);
var x = x_;
if (math.isNan(x) or math.isInf(x)) {

View File

@ -7,8 +7,6 @@
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const builtin = @import("builtin");
const TypeId = builtin.TypeId;
/// Returns the natural logarithm of x.
///
@ -19,21 +17,21 @@ const TypeId = builtin.TypeId;
/// - ln(nan) = nan
pub fn ln(x: var) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeId(T)) {
TypeId.ComptimeFloat => {
switch (@typeInfo(T)) {
.ComptimeFloat => {
return @as(comptime_float, ln_64(x));
},
TypeId.Float => {
.Float => {
return switch (T) {
f32 => ln_32(x),
f64 => ln_64(x),
else => @compileError("ln not implemented for " ++ @typeName(T)),
};
},
TypeId.ComptimeInt => {
.ComptimeInt => {
return @as(comptime_int, math.floor(ln_64(@as(f64, x))));
},
TypeId.Int => {
.Int => {
return @as(T, math.floor(ln_64(@as(f64, x))));
},
else => @compileError("ln not implemented for " ++ @typeName(T)),

View File

@ -6,8 +6,6 @@
const std = @import("../std.zig");
const math = std.math;
const builtin = @import("builtin");
const TypeId = builtin.TypeId;
const expect = std.testing.expect;
/// Returns the logarithm of x for the provided base.
@ -16,24 +14,24 @@ pub fn log(comptime T: type, base: T, x: T) T {
return math.log2(x);
} else if (base == 10) {
return math.log10(x);
} else if ((@typeId(T) == TypeId.Float or @typeId(T) == TypeId.ComptimeFloat) and base == math.e) {
} else if ((@typeInfo(T) == .Float or @typeInfo(T) == .ComptimeFloat) and base == math.e) {
return math.ln(x);
}
const float_base = math.lossyCast(f64, base);
switch (@typeId(T)) {
TypeId.ComptimeFloat => {
switch (@typeInfo(T)) {
.ComptimeFloat => {
return @as(comptime_float, math.ln(@as(f64, x)) / math.ln(float_base));
},
TypeId.ComptimeInt => {
.ComptimeInt => {
return @as(comptime_int, math.floor(math.ln(@as(f64, x)) / math.ln(float_base)));
},
builtin.TypeId.Int => {
.Int => {
// TODO implement integer log without using float math
return @floatToInt(T, math.floor(math.ln(@intToFloat(f64, x)) / math.ln(float_base)));
},
builtin.TypeId.Float => {
.Float => {
switch (T) {
f32 => return @floatCast(f32, math.ln(@as(f64, x)) / math.ln(float_base)),
f64 => return math.ln(x) / math.ln(float_base),

View File

@ -7,8 +7,6 @@
const std = @import("../std.zig");
const math = std.math;
const testing = std.testing;
const builtin = @import("builtin");
const TypeId = builtin.TypeId;
const maxInt = std.math.maxInt;
/// Returns the base-10 logarithm of x.
@ -20,21 +18,21 @@ const maxInt = std.math.maxInt;
/// - log10(nan) = nan
pub fn log10(x: var) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeId(T)) {
TypeId.ComptimeFloat => {
switch (@typeInfo(T)) {
.ComptimeFloat => {
return @as(comptime_float, log10_64(x));
},
TypeId.Float => {
.Float => {
return switch (T) {
f32 => log10_32(x),
f64 => log10_64(x),
else => @compileError("log10 not implemented for " ++ @typeName(T)),
};
},
TypeId.ComptimeInt => {
.ComptimeInt => {
return @as(comptime_int, math.floor(log10_64(@as(f64, x))));
},
TypeId.Int => {
.Int => {
return @floatToInt(T, math.floor(log10_64(@intToFloat(f64, x))));
},
else => @compileError("log10 not implemented for " ++ @typeName(T)),

View File

@ -7,8 +7,6 @@
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const builtin = @import("builtin");
const TypeId = builtin.TypeId;
const maxInt = std.math.maxInt;
/// Returns the base-2 logarithm of x.
@ -20,18 +18,18 @@ const maxInt = std.math.maxInt;
/// - log2(nan) = nan
pub fn log2(x: var) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeId(T)) {
TypeId.ComptimeFloat => {
switch (@typeInfo(T)) {
.ComptimeFloat => {
return @as(comptime_float, log2_64(x));
},
TypeId.Float => {
.Float => {
return switch (T) {
f32 => log2_32(x),
f64 => log2_64(x),
else => @compileError("log2 not implemented for " ++ @typeName(T)),
};
},
TypeId.ComptimeInt => comptime {
.ComptimeInt => comptime {
var result = 0;
var x_shifted = x;
while (b: {
@ -40,7 +38,7 @@ pub fn log2(x: var) @TypeOf(x) {
}) : (result += 1) {}
return result;
},
TypeId.Int => {
.Int => {
return math.log2_int(T, x);
},
else => @compileError("log2 not implemented for " ++ @typeName(T)),

View File

@ -145,7 +145,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
var i = @floatToInt(@IntType(true, T.bit_count), yi);
var i = @floatToInt(std.meta.IntType(true, T.bit_count), yi);
while (i != 0) : (i >>= 1) {
const overflow_shift = math.floatExponentBits(T) + 1;
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {

View File

@ -45,7 +45,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn sin_(comptime T: type, x_: T) T {
const I = @IntType(true, T.bit_count);
const I = std.meta.IntType(true, T.bit_count);
var x = x_;
if (x == 0 or math.isNan(x)) {

View File

@ -31,7 +31,7 @@ pub fn sqrt(x: var) Sqrt(@TypeOf(x)) {
}
}
fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
fn sqrt_int(comptime T: type, value: T) std.meta.IntType(false, T.bit_count / 2) {
var op = value;
var res: T = 0;
var one: T = 1 << (T.bit_count - 2);
@ -50,7 +50,7 @@ fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
one >>= 2;
}
const ResultType = @IntType(false, T.bit_count / 2);
const ResultType = std.meta.IntType(false, T.bit_count / 2);
return @intCast(ResultType, res);
}
@ -66,7 +66,7 @@ test "math.sqrt_int" {
/// Returns the return type `sqrt` will return given an operand of type `T`.
pub fn Sqrt(comptime T: type) type {
return switch (@typeInfo(T)) {
.Int => |int| @IntType(false, int.bits / 2),
.Int => |int| std.meta.IntType(false, int.bits / 2),
else => T,
};
}

View File

@ -38,7 +38,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn tan_(comptime T: type, x_: T) T {
const I = @IntType(true, T.bit_count);
const I = std.meta.IntType(true, T.bit_count);
var x = x_;
if (x == 0 or math.isNan(x)) {

View File

@ -132,7 +132,7 @@ pub const Allocator = struct {
// their own frame with @Frame(func).
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
} else {
return @bytesToSlice(T, @alignCast(a, byte_slice));
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
}
@ -173,7 +173,7 @@ pub const Allocator = struct {
return @as([*]align(new_alignment) T, undefined)[0..0];
}
const old_byte_slice = @sliceToBytes(old_mem);
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
@ -181,7 +181,7 @@ pub const Allocator = struct {
if (new_n > old_mem.len) {
@memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
}
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
@ -221,18 +221,18 @@ pub const Allocator = struct {
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = @sliceToBytes(old_mem);
const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
pub fn free(self: *Allocator, memory: var) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = @sliceToBytes(memory);
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@ -276,18 +276,67 @@ pub fn set(comptime T: type, dest: []T, value: T) void {
d.* = value;
}
/// Generally, Zig users are encouraged to explicitly initialize all fields of a struct explicitly rather than using this function.
/// However, it is recognized that there are sometimes use cases for initializing all fields to a "zero" value. For example, when
/// interfacing with a C API where this practice is more common and relied upon. If you are performing code review and see this
/// function used, examine closely - it may be a code smell.
/// Zero initializes the type.
/// This can be used to zero initialize a C-struct.
/// This can be used to zero initialize a any type for which it makes sense. Structs will be initialized recursively.
pub fn zeroes(comptime T: type) T {
if (@sizeOf(T) == 0) return T{};
if (comptime meta.containerLayout(T) != .Extern) {
@compileError("TODO: Currently this only works for extern types");
switch (@typeInfo(T)) {
.ComptimeInt, .Int, .ComptimeFloat, .Float => {
return @as(T, 0);
},
.Enum, .EnumLiteral => {
return @intToEnum(T, 0);
},
.Void => {
return {};
},
.Bool => {
return false;
},
.Optional, .Null => {
return null;
},
.Struct => |struct_info| {
if (@sizeOf(T) == 0) return T{};
if (comptime meta.containerLayout(T) == .Extern) {
var item: T = undefined;
@memset(@ptrCast([*]u8, &item), 0, @sizeOf(T));
return item;
} else {
var structure: T = undefined;
inline for (struct_info.fields) |field| {
@field(structure, field.name) = zeroes(@TypeOf(@field(structure, field.name)));
}
return structure;
}
},
.Pointer => |ptr_info| {
switch (ptr_info.size) {
.Slice => {
return &[_]ptr_info.child{};
},
.C => {
return null;
},
.One, .Many => {
@compileError("Can't set a non nullable pointer to zero.");
},
}
},
.Array => |info| {
var array: T = undefined;
for (array) |*element| {
element.* = zeroes(info.child);
}
return array;
},
.Vector, .ErrorUnion, .ErrorSet, .Union, .Fn, .BoundFn, .Type, .NoReturn, .Undefined, .Opaque, .Frame, .AnyFrame, => {
@compileError("Can't set a "++ @typeName(T) ++" to zero.");
},
}
var item: T = undefined;
@memset(@ptrCast([*]u8, &item), 0, @sizeOf(T));
return item;
}
test "mem.zeroes" {
@ -301,6 +350,62 @@ test "mem.zeroes" {
testing.expect(a.x == 0);
testing.expect(a.y == 10);
const ZigStruct = struct {
const IntegralTypes = struct {
integer_0: i0,
integer_8: i8,
integer_16: i16,
integer_32: i32,
integer_64: i64,
integer_128: i128,
unsigned_0: u0,
unsigned_8: u8,
unsigned_16: u16,
unsigned_32: u32,
unsigned_64: u64,
unsigned_128: u128,
float_32: f32,
float_64: f64,
};
integral_types: IntegralTypes,
const Pointers = struct {
optional: ?*u8,
c_pointer: [*c]u8,
slice: []u8,
};
pointers: Pointers,
array: [2]u32,
optional_int: ?u8,
empty: void,
};
const b = zeroes(ZigStruct);
testing.expectEqual(@as(i8, 0), b.integral_types.integer_0);
testing.expectEqual(@as(i8, 0), b.integral_types.integer_8);
testing.expectEqual(@as(i16, 0), b.integral_types.integer_16);
testing.expectEqual(@as(i32, 0), b.integral_types.integer_32);
testing.expectEqual(@as(i64, 0), b.integral_types.integer_64);
testing.expectEqual(@as(i128, 0), b.integral_types.integer_128);
testing.expectEqual(@as(u8, 0), b.integral_types.unsigned_0);
testing.expectEqual(@as(u8, 0), b.integral_types.unsigned_8);
testing.expectEqual(@as(u16, 0), b.integral_types.unsigned_16);
testing.expectEqual(@as(u32, 0), b.integral_types.unsigned_32);
testing.expectEqual(@as(u64, 0), b.integral_types.unsigned_64);
testing.expectEqual(@as(u128, 0), b.integral_types.unsigned_128);
testing.expectEqual(@as(f32, 0), b.integral_types.float_32);
testing.expectEqual(@as(f64, 0), b.integral_types.float_64);
testing.expectEqual(@as(?*u8, null), b.pointers.optional);
testing.expectEqual(@as([*c]u8, null), b.pointers.c_pointer);
testing.expectEqual(@as([]u8, &[_]u8{}), b.pointers.slice);
for (b.array) |e| {
testing.expectEqual(@as(u32, 0), e);
}
testing.expectEqual(@as(?u8, null), b.optional_int);
}
pub fn secureZero(comptime T: type, s: []T) void {
@ -387,13 +492,21 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
return true;
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
/// Copies `m` to newly allocated memory. Caller owns the memory.
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
}
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
const new_buf = try allocator.alloc(T, m.len + 1);
copy(T, new_buf, m);
new_buf[m.len] = 0;
return new_buf[0..m.len :0];
}
/// Remove values from the beginning of a slice.
pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0;
@ -700,7 +813,7 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
assert(buffer.len >= @divExact(T.bit_count, 8));
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
const uint = @IntType(false, T.bit_count);
const uint = std.meta.IntType(false, T.bit_count);
var bits = @truncate(uint, value);
for (buffer) |*b| {
b.* = @truncate(u8, bits);
@ -717,7 +830,7 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
assert(buffer.len >= @divExact(T.bit_count, 8));
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
const uint = @IntType(false, T.bit_count);
const uint = std.meta.IntType(false, T.bit_count);
var bits = @truncate(uint, value);
var index: usize = buffer.len;
while (index != 0) {
@ -1478,6 +1591,162 @@ test "bytesToValue" {
testing.expect(deadbeef == @as(u32, 0xDEADBEEF));
}
//TODO copy also is_volatile, etc. I tried to use @typeInfo, modify child type, use @Type, but ran into issues.
fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type {
if (!(trait.isSlice(bytesType) and meta.Child(bytesType) == u8) and !(trait.isPtrTo(.Array)(bytesType) and meta.Child(meta.Child(bytesType)) == u8)) {
@compileError("expected []u8 or *[_]u8, passed " ++ @typeName(bytesType));
}
if (trait.isPtrTo(.Array)(bytesType) and @typeInfo(meta.Child(bytesType)).Array.len % @sizeOf(T) != 0) {
@compileError("number of bytes in " ++ @typeName(bytesType) ++ " is not divisible by size of " ++ @typeName(T));
}
const alignment = meta.alignment(bytesType);
return if (trait.isConstPtr(bytesType)) []align(alignment) const T else []align(alignment) T;
}
pub fn bytesAsSlice(comptime T: type, bytes: var) BytesAsSliceReturnType(T, @TypeOf(bytes)) {
const bytesSlice = if (comptime trait.isPtrTo(.Array)(@TypeOf(bytes))) bytes[0..] else bytes;
// let's not give an undefined pointer to @ptrCast
// it may be equal to zero and fail a null check
if (bytesSlice.len == 0) {
return &[0]T{};
}
const bytesType = @TypeOf(bytesSlice);
const alignment = comptime meta.alignment(bytesType);
const castTarget = if (comptime trait.isConstPtr(bytesType)) [*]align(alignment) const T else [*]align(alignment) T;
return @ptrCast(castTarget, bytesSlice.ptr)[0..@divExact(bytes.len, @sizeOf(T))];
}
test "bytesAsSlice" {
const bytes = [_]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
const slice = bytesAsSlice(u16, bytes[0..]);
testing.expect(slice.len == 2);
testing.expect(bigToNative(u16, slice[0]) == 0xDEAD);
testing.expect(bigToNative(u16, slice[1]) == 0xBEEF);
}
test "bytesAsSlice keeps pointer alignment" {
var bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 };
const numbers = bytesAsSlice(u32, bytes[0..]);
comptime testing.expect(@TypeOf(numbers) == []align(@alignOf(@TypeOf(bytes))) u32);
}
test "bytesAsSlice on a packed struct" {
const F = packed struct {
a: u8,
};
var b = [1]u8{9};
var f = bytesAsSlice(F, &b);
testing.expect(f[0].a == 9);
}
test "bytesAsSlice with specified alignment" {
var bytes align(4) = [_]u8{
0x33,
0x33,
0x33,
0x33,
};
const slice: []u32 = std.mem.bytesAsSlice(u32, bytes[0..]);
testing.expect(slice[0] == 0x33333333);
}
//TODO copy also is_volatile, etc. I tried to use @typeInfo, modify child type, use @Type, but ran into issues.
fn SliceAsBytesReturnType(comptime sliceType: type) type {
if (!trait.isSlice(sliceType) and !trait.isPtrTo(.Array)(sliceType)) {
@compileError("expected []T or *[_]T, passed " ++ @typeName(sliceType));
}
const alignment = meta.alignment(sliceType);
return if (trait.isConstPtr(sliceType)) []align(alignment) const u8 else []align(alignment) u8;
}
pub fn sliceAsBytes(slice: var) SliceAsBytesReturnType(@TypeOf(slice)) {
const actualSlice = if (comptime trait.isPtrTo(.Array)(@TypeOf(slice))) slice[0..] else slice;
// let's not give an undefined pointer to @ptrCast
// it may be equal to zero and fail a null check
if (actualSlice.len == 0) {
return &[0]u8{};
}
const sliceType = @TypeOf(actualSlice);
const alignment = comptime meta.alignment(sliceType);
const castTarget = if (comptime trait.isConstPtr(sliceType)) [*]align(alignment) const u8 else [*]align(alignment) u8;
return @ptrCast(castTarget, actualSlice.ptr)[0 .. actualSlice.len * @sizeOf(comptime meta.Child(sliceType))];
}
test "sliceAsBytes" {
const bytes = [_]u16{ 0xDEAD, 0xBEEF };
const slice = sliceAsBytes(bytes[0..]);
testing.expect(slice.len == 4);
testing.expect(eql(u8, slice, switch (builtin.endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xAD\xDE\xEF\xBE",
}));
}
test "sliceAsBytes packed struct at runtime and comptime" {
const Foo = packed struct {
a: u4,
b: u4,
};
const S = struct {
fn doTheTest() void {
var foo: Foo = undefined;
var slice = sliceAsBytes(@as(*[1]Foo, &foo)[0..1]);
slice[0] = 0x13;
switch (builtin.endian) {
.Big => {
testing.expect(foo.a == 0x1);
testing.expect(foo.b == 0x3);
},
.Little => {
testing.expect(foo.a == 0x3);
testing.expect(foo.b == 0x1);
},
}
}
};
S.doTheTest();
comptime S.doTheTest();
}
test "sliceAsBytes and bytesAsSlice back" {
testing.expect(@sizeOf(i32) == 4);
var big_thing_array = [_]i32{ 1, 2, 3, 4 };
const big_thing_slice: []i32 = big_thing_array[0..];
const bytes = sliceAsBytes(big_thing_slice);
testing.expect(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
testing.expect(big_thing_slice[1] == 0);
const big_thing_again = bytesAsSlice(i32, bytes);
testing.expect(big_thing_again[2] == 3);
big_thing_again[2] = -1;
testing.expect(bytes[8] == math.maxInt(u8));
testing.expect(bytes[9] == math.maxInt(u8));
testing.expect(bytes[10] == math.maxInt(u8));
testing.expect(bytes[11] == math.maxInt(u8));
}
fn SubArrayPtrReturnType(comptime T: type, comptime length: usize) type {
if (trait.isConstPtr(T))
return *const [length]meta.Child(meta.Child(T));

View File

@ -437,7 +437,7 @@ pub fn eql(a: var, b: @TypeOf(a)) bool {
},
.Pointer => |info| {
return switch (info.size) {
.One, .Many, .C, => a == b,
.One, .Many, .C => a == b,
.Slice => a.ptr == b.ptr and a.len == b.len,
};
},
@ -536,9 +536,8 @@ test "intToEnum with error return" {
pub const IntToEnumError = error{InvalidEnumTag};
pub fn intToEnum(comptime Tag: type, tag_int: var) IntToEnumError!Tag {
comptime var i = 0;
inline while (i != @memberCount(Tag)) : (i += 1) {
const this_tag_value = @field(Tag, @memberName(Tag, i));
inline for (@typeInfo(Tag).Enum.fields) |f| {
const this_tag_value = @field(Tag, f.name);
if (tag_int == @enumToInt(this_tag_value)) {
return this_tag_value;
}
@ -559,7 +558,9 @@ pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int {
/// Given a type, reference all the declarations inside, so that the semantic analyzer sees them.
pub fn refAllDecls(comptime T: type) void {
if (!builtin.is_test) return;
_ = declarations(T);
inline for (declarations(T)) |decl| {
_ = decl;
}
}
/// Returns a slice of pointers to public declarations of a namespace.
@ -579,3 +580,12 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
return &array;
}
}
pub fn IntType(comptime is_signed: bool, comptime bit_count: u16) type {
return @Type(TypeInfo{
.Int = .{
.is_signed = is_signed,
.bits = bit_count,
},
});
}

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const builtin = std.builtin;
const mem = std.mem;
const debug = std.debug;
const testing = std.testing;
@ -54,7 +54,7 @@ pub fn hasFn(comptime name: []const u8) TraitFn {
if (!comptime isContainer(T)) return false;
if (!comptime @hasDecl(T, name)) return false;
const DeclType = @TypeOf(@field(T, name));
return @typeId(DeclType) == .Fn;
return @typeInfo(DeclType) == .Fn;
}
};
return Closure.trait;
@ -105,7 +105,7 @@ test "std.meta.trait.hasField" {
pub fn is(comptime id: builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
return id == @typeId(T);
return id == @typeInfo(T);
}
};
return Closure.trait;
@ -123,7 +123,7 @@ pub fn isPtrTo(comptime id: builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
if (!comptime isSingleItemPtr(T)) return false;
return id == @typeId(meta.Child(T));
return id == @typeInfo(meta.Child(T));
}
};
return Closure.trait;
@ -135,6 +135,22 @@ test "std.meta.trait.isPtrTo" {
testing.expect(!isPtrTo(.Struct)(**struct {}));
}
pub fn isSliceOf(comptime id: builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
if (!comptime isSlice(T)) return false;
return id == @typeInfo(meta.Child(T));
}
};
return Closure.trait;
}
test "std.meta.trait.isSliceOf" {
testing.expect(!isSliceOf(.Struct)(struct {}));
testing.expect(isSliceOf(.Struct)([]struct {}));
testing.expect(!isSliceOf(.Struct)([][]struct {}));
}
///////////Strait trait Fns
//@TODO:
@ -269,7 +285,7 @@ test "std.meta.trait.isIndexable" {
}
pub fn isNumber(comptime T: type) bool {
return switch (@typeId(T)) {
return switch (@typeInfo(T)) {
.Int, .Float, .ComptimeInt, .ComptimeFloat => true,
else => false,
};
@ -304,7 +320,7 @@ test "std.meta.trait.isConstPtr" {
}
pub fn isContainer(comptime T: type) bool {
return switch (@typeId(T)) {
return switch (@typeInfo(T)) {
.Struct, .Union, .Enum => true,
else => false,
};

View File

@ -18,7 +18,7 @@ pub const Address = extern union {
in6: os.sockaddr_in6,
un: if (has_unix_sockets) os.sockaddr_un else void,
// TODO this crashed the compiler
// TODO this crashed the compiler. https://github.com/ziglang/zig/issues/3512
//pub const localhost = initIp4(parseIp4("127.0.0.1") catch unreachable, 0);
pub fn parseIp(name: []const u8, port: u16) !Address {
@ -120,7 +120,7 @@ pub const Address = extern union {
ip_slice[10] = 0xff;
ip_slice[11] = 0xff;
const ptr = @sliceToBytes(@as(*const [1]u32, &addr)[0..]);
const ptr = mem.sliceAsBytes(@as(*const [1]u32, &addr)[0..]);
ip_slice[12] = ptr[0];
ip_slice[13] = ptr[1];
@ -164,7 +164,7 @@ pub const Address = extern union {
.addr = undefined,
},
};
const out_ptr = @sliceToBytes(@as(*[1]u32, &result.in.addr)[0..]);
const out_ptr = mem.sliceAsBytes(@as(*[1]u32, &result.in.addr)[0..]);
var x: u8 = 0;
var index: u8 = 0;

View File

@ -70,6 +70,8 @@ else switch (builtin.os) {
pub usingnamespace @import("os/bits.zig");
/// See also `getenv`. Populated by startup code before main().
/// TODO this is a footgun because the value will be undefined when using `zig build-lib`.
/// https://github.com/ziglang/zig/issues/4524
pub var environ: [][*:0]u8 = undefined;
/// Populated by startup code before main().
@ -916,10 +918,17 @@ pub const ExecveError = error{
NameTooLong,
} || UnexpectedError;
/// Deprecated in favor of `execveZ`.
pub const execveC = execveZ;
/// Like `execve` except the parameters are null-terminated,
/// matching the syscall API on all targets. This removes the need for an allocator.
/// This function ignores PATH environment variable. See `execvpeC` for that.
pub fn execveC(path: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8) ExecveError {
/// This function ignores PATH environment variable. See `execvpeZ` for that.
pub fn execveZ(
path: [*:0]const u8,
child_argv: [*:null]const ?[*:0]const u8,
envp: [*:null]const ?[*:0]const u8,
) ExecveError {
switch (errno(system.execve(path, child_argv, envp))) {
0 => unreachable,
EFAULT => unreachable,
@ -942,19 +951,42 @@ pub fn execveC(path: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, en
}
}
/// Like `execvpe` except the parameters are null-terminated,
/// matching the syscall API on all targets. This removes the need for an allocator.
/// This function also uses the PATH environment variable to get the full path to the executable.
/// If `file` is an absolute path, this is the same as `execveC`.
pub fn execvpeC(file: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8) ExecveError {
const file_slice = mem.toSliceConst(u8, file);
if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveC(file, child_argv, envp);
/// Deprecated in favor of `execvpeZ`.
pub const execvpeC = execvpeZ;
const PATH = getenv("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
pub const Arg0Expand = enum {
expand,
no_expand,
};
/// Like `execvpeZ` except if `arg0_expand` is `.expand`, then `argv` is mutable,
/// and `argv[0]` is expanded to be the same absolute path that is passed to the execve syscall.
/// If this function returns with an error, `argv[0]` will be restored to the value it was when it was passed in.
pub fn execvpeZ_expandArg0(
comptime arg0_expand: Arg0Expand,
file: [*:0]const u8,
child_argv: switch (arg0_expand) {
.expand => [*:null]?[*:0]const u8,
.no_expand => [*:null]const ?[*:0]const u8,
},
envp: [*:null]const ?[*:0]const u8,
) ExecveError {
const file_slice = mem.toSliceConst(u8, file);
if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp);
const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
var path_buf: [MAX_PATH_BYTES]u8 = undefined;
var it = mem.tokenize(PATH, ":");
var seen_eacces = false;
var err: ExecveError = undefined;
// In case of expanding arg0 we must put it back if we return with an error.
const prev_arg0 = child_argv[0];
defer switch (arg0_expand) {
.expand => child_argv[0] = prev_arg0,
.no_expand => {},
};
while (it.next()) |search_path| {
if (path_buf.len < search_path.len + file_slice.len + 1) return error.NameTooLong;
mem.copy(u8, &path_buf, search_path);
@ -962,7 +994,12 @@ pub fn execvpeC(file: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, e
mem.copy(u8, path_buf[search_path.len + 1 ..], file_slice);
const path_len = search_path.len + file_slice.len + 1;
path_buf[path_len] = 0;
err = execveC(path_buf[0..path_len :0].ptr, child_argv, envp);
const full_path = path_buf[0..path_len :0].ptr;
switch (arg0_expand) {
.expand => child_argv[0] = full_path,
.no_expand => {},
}
err = execveZ(full_path, child_argv, envp);
switch (err) {
error.AccessDenied => seen_eacces = true,
error.FileNotFound, error.NotDir => {},
@ -973,13 +1010,24 @@ pub fn execvpeC(file: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, e
return err;
}
/// This function must allocate memory to add a null terminating bytes on path and each arg.
/// It must also convert to KEY=VALUE\0 format for environment variables, and include null
/// pointers after the args and after the environment variables.
/// `argv_slice[0]` is the executable path.
/// Like `execvpe` except the parameters are null-terminated,
/// matching the syscall API on all targets. This removes the need for an allocator.
/// This function also uses the PATH environment variable to get the full path to the executable.
pub fn execvpe(
/// If `file` is an absolute path, this is the same as `execveZ`.
pub fn execvpeZ(
file: [*:0]const u8,
argv: [*:null]const ?[*:0]const u8,
envp: [*:null]const ?[*:0]const u8,
) ExecveError {
return execvpeZ_expandArg0(.no_expand, file, argv, envp);
}
/// This is the same as `execvpe` except if the `arg0_expand` parameter is set to `.expand`,
/// then argv[0] will be replaced with the expanded version of it, after resolving in accordance
/// with the PATH environment variable.
pub fn execvpe_expandArg0(
allocator: *mem.Allocator,
arg0_expand: Arg0Expand,
argv_slice: []const []const u8,
env_map: *const std.BufMap,
) (ExecveError || error{OutOfMemory}) {
@ -1004,7 +1052,23 @@ pub fn execvpe(
const envp_buf = try createNullDelimitedEnvMap(allocator, env_map);
defer freeNullDelimitedEnvMap(allocator, envp_buf);
return execvpeC(argv_buf.ptr[0].?, argv_ptr, envp_buf.ptr);
switch (arg0_expand) {
.expand => return execvpeZ_expandArg0(.expand, argv_buf.ptr[0].?, argv_ptr, envp_buf.ptr),
.no_expand => return execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_ptr, envp_buf.ptr),
}
}
/// This function must allocate memory to add a null terminating bytes on path and each arg.
/// It must also convert to KEY=VALUE\0 format for environment variables, and include null
/// pointers after the args and after the environment variables.
/// `argv_slice[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
pub fn execvpe(
allocator: *mem.Allocator,
argv_slice: []const []const u8,
env_map: *const std.BufMap,
) (ExecveError || error{OutOfMemory}) {
return execvpe_expandArg0(allocator, .no_expand, argv_slice, env_map);
}
pub fn createNullDelimitedEnvMap(allocator: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
@ -1038,9 +1102,37 @@ pub fn freeNullDelimitedEnvMap(allocator: *mem.Allocator, envp_buf: []?[*:0]u8)
}
/// Get an environment variable.
/// See also `getenvC`.
/// TODO make this go through libc when we have it
/// See also `getenvZ`.
pub fn getenv(key: []const u8) ?[]const u8 {
if (builtin.link_libc) {
var small_key_buf: [64]u8 = undefined;
if (key.len < small_key_buf.len) {
mem.copy(u8, &small_key_buf, key);
small_key_buf[key.len] = 0;
const key0 = small_key_buf[0..key.len :0];
return getenvZ(key0);
}
// Search the entire `environ` because we don't have a null terminated pointer.
var ptr = std.c.environ;
while (ptr.*) |line| : (ptr += 1) {
var line_i: usize = 0;
while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {}
const this_key = line[0..line_i];
if (!mem.eql(u8, this_key, key)) continue;
var end_i: usize = line_i;
while (line[end_i] != 0) : (end_i += 1) {}
const value = line[line_i + 1 .. end_i];
return value;
}
return null;
}
if (builtin.os == .windows) {
@compileError("std.os.getenv is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API.");
}
// TODO see https://github.com/ziglang/zig/issues/4524
for (environ) |ptr| {
var line_i: usize = 0;
while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {}
@ -1056,16 +1148,50 @@ pub fn getenv(key: []const u8) ?[]const u8 {
return null;
}
/// Deprecated in favor of `getenvZ`.
pub const getenvC = getenvZ;
/// Get an environment variable with a null-terminated name.
/// See also `getenv`.
pub fn getenvC(key: [*:0]const u8) ?[]const u8 {
pub fn getenvZ(key: [*:0]const u8) ?[]const u8 {
if (builtin.link_libc) {
const value = system.getenv(key) orelse return null;
return mem.toSliceConst(u8, value);
}
if (builtin.os == .windows) {
@compileError("std.os.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API.");
}
return getenv(mem.toSliceConst(u8, key));
}
/// Windows-only. Get an environment variable with a null-terminated, WTF-16 encoded name.
/// See also `getenv`.
pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 {
if (builtin.os != .windows) {
@compileError("std.os.getenvW is a Windows-only API");
}
const key_slice = mem.toSliceConst(u16, key);
const ptr = windows.peb().ProcessParameters.Environment;
var i: usize = 0;
while (ptr[i] != 0) {
const key_start = i;
while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {}
const this_key = ptr[key_start..i];
if (ptr[i] == '=') i += 1;
const value_start = i;
while (ptr[i] != 0) : (i += 1) {}
const this_value = ptr[value_start..i :0];
if (mem.eql(u16, key_slice, this_key)) return this_value;
i += 1; // skip over null byte
}
return null;
}
pub const GetCwdError = error{
NameTooLong,
CurrentWorkingDirectoryUnlinked,
@ -1726,7 +1852,7 @@ pub fn isCygwinPty(handle: fd_t) bool {
const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + @as(usize, name_info.FileNameLength)];
const name_wide = @bytesToSlice(u16, name_bytes);
const name_wide = mem.bytesAsSlice(u16, name_bytes);
return mem.indexOf(u16, name_wide, &[_]u16{ 'm', 's', 'y', 's', '-' }) != null or
mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null;
}
@ -2452,6 +2578,9 @@ pub const AccessError = error{
InputOutput,
SystemResources,
BadPathName,
FileBusy,
SymLinkLoop,
ReadOnlyFileSystem,
/// On Windows, file paths must be valid Unicode.
InvalidUtf8,
@ -2469,8 +2598,11 @@ pub fn access(path: []const u8, mode: u32) AccessError!void {
return accessC(&path_c, mode);
}
/// Deprecated in favor of `accessZ`.
pub const accessC = accessZ;
/// Same as `access` except `path` is null-terminated.
pub fn accessC(path: [*:0]const u8, mode: u32) AccessError!void {
pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void {
if (builtin.os == .windows) {
const path_w = try windows.cStrToPrefixedFileW(path);
_ = try windows.GetFileAttributesW(&path_w);
@ -2479,12 +2611,11 @@ pub fn accessC(path: [*:0]const u8, mode: u32) AccessError!void {
switch (errno(system.access(path, mode))) {
0 => return,
EACCES => return error.PermissionDenied,
EROFS => return error.PermissionDenied,
ELOOP => return error.PermissionDenied,
ETXTBSY => return error.PermissionDenied,
EROFS => return error.ReadOnlyFileSystem,
ELOOP => return error.SymLinkLoop,
ETXTBSY => return error.FileBusy,
ENOTDIR => return error.FileNotFound,
ENOENT => return error.FileNotFound,
ENAMETOOLONG => return error.NameTooLong,
EINVAL => unreachable,
EFAULT => unreachable,
@ -2510,6 +2641,79 @@ pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!v
}
}
/// Check user's permissions for a file, based on an open directory handle.
/// TODO currently this ignores `mode` and `flags` on Windows.
pub fn faccessat(dirfd: fd_t, path: []const u8, mode: u32, flags: u32) AccessError!void {
if (builtin.os == .windows) {
const path_w = try windows.sliceToPrefixedFileW(path);
return faccessatW(dirfd, &path_w, mode, flags);
}
const path_c = try toPosixPath(path);
return faccessatZ(dirfd, &path_c, mode, flags);
}
/// Same as `faccessat` except the path parameter is null-terminated.
pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) AccessError!void {
if (builtin.os == .windows) {
const path_w = try windows.cStrToPrefixedFileW(path);
return faccessatW(dirfd, &path_w, mode, flags);
}
switch (errno(system.faccessat(dirfd, path, mode, flags))) {
0 => return,
EACCES => return error.PermissionDenied,
EROFS => return error.ReadOnlyFileSystem,
ELOOP => return error.SymLinkLoop,
ETXTBSY => return error.FileBusy,
ENOTDIR => return error.FileNotFound,
ENOENT => return error.FileNotFound,
ENAMETOOLONG => return error.NameTooLong,
EINVAL => unreachable,
EFAULT => unreachable,
EIO => return error.InputOutput,
ENOMEM => return error.SystemResources,
else => |err| return unexpectedErrno(err),
}
}
/// Same as `faccessat` except asserts the target is Windows and the path parameter
/// is NtDll-prefixed, null-terminated, WTF-16 encoded.
/// TODO currently this ignores `mode` and `flags`
pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void {
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
return;
}
if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) {
return;
}
const path_len_bytes = math.cast(u16, mem.toSliceConst(u16, sub_path_w).len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
var nt_name = windows.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
};
var attr = windows.OBJECT_ATTRIBUTES{
.Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
.RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w)) null else dirfd,
.Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here.
.ObjectName = &nt_name,
.SecurityDescriptor = null,
.SecurityQualityOfService = null,
};
var basic_info: windows.FILE_BASIC_INFORMATION = undefined;
switch (windows.ntdll.NtQueryAttributesFile(&attr, &basic_info)) {
.SUCCESS => return,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.INVALID_PARAMETER => unreachable,
.ACCESS_DENIED => return error.PermissionDenied,
.OBJECT_PATH_SYNTAX_BAD => unreachable,
else => |rc| return windows.unexpectedStatus(rc),
}
}
pub const PipeError = error{
SystemFdQuotaExceeded,
ProcessFdQuotaExceeded,
@ -2844,18 +3048,26 @@ pub fn nanosleep(seconds: u64, nanoseconds: u64) void {
}
pub fn dl_iterate_phdr(
comptime T: type,
callback: extern fn (info: *dl_phdr_info, size: usize, data: ?*T) i32,
data: ?*T,
) isize {
context: var,
comptime Error: type,
comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void,
) Error!void {
const Context = @TypeOf(context);
if (builtin.object_format != .elf)
@compileError("dl_iterate_phdr is not available for this target");
if (builtin.link_libc) {
return system.dl_iterate_phdr(
@ptrCast(std.c.dl_iterate_phdr_callback, callback),
@ptrCast(?*c_void, data),
);
switch (system.dl_iterate_phdr(struct {
fn callbackC(info: *dl_phdr_info, size: usize, data: ?*c_void) callconv(.C) c_int {
const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data));
callback(info, size, context_ptr.*) catch |err| return @errorToInt(err);
return 0;
}
}.callbackC, @intToPtr(?*c_void, @ptrToInt(&context)))) {
0 => return,
else => |err| return @errSetCast(Error, @intToError(@intCast(u16, err))), // TODO don't hardcode u16
}
}
const elf_base = std.process.getBaseAddress();
@ -2877,11 +3089,10 @@ pub fn dl_iterate_phdr(
.dlpi_phnum = ehdr.e_phnum,
};
return callback(&info, @sizeOf(dl_phdr_info), data);
return callback(&info, @sizeOf(dl_phdr_info), context);
}
// Last return value from the callback function
var last_r: isize = 0;
while (it.next()) |entry| {
var dlpi_phdr: [*]elf.Phdr = undefined;
var dlpi_phnum: u16 = undefined;
@ -2903,11 +3114,8 @@ pub fn dl_iterate_phdr(
.dlpi_phnum = dlpi_phnum,
};
last_r = callback(&info, @sizeOf(dl_phdr_info), data);
if (last_r != 0) break;
try callback(&info, @sizeOf(dl_phdr_info), context);
}
return last_r;
}
pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError;
@ -3141,7 +3349,7 @@ pub fn res_mkquery(
// Make a reasonably unpredictable id
var ts: timespec = undefined;
clock_gettime(CLOCK_REALTIME, &ts) catch {};
const UInt = @IntType(false, @TypeOf(ts.tv_nsec).bit_count);
const UInt = std.meta.IntType(false, @TypeOf(ts.tv_nsec).bit_count);
const unsec = @bitCast(UInt, ts.tv_nsec);
const id = @truncate(u32, unsec + unsec / 65536);
q[0] = @truncate(u8, id / 256);

View File

@ -1004,7 +1004,7 @@ pub const dl_phdr_info = extern struct {
pub const CPU_SETSIZE = 128;
pub const cpu_set_t = [CPU_SETSIZE / @sizeOf(usize)]usize;
pub const cpu_count_t = @IntType(false, std.math.log2(CPU_SETSIZE * 8));
pub const cpu_count_t = std.meta.IntType(false, std.math.log2(CPU_SETSIZE * 8));
pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
var sum: cpu_count_t = 0;

View File

@ -152,7 +152,7 @@ pub fn setThreadPointer(addr: usize) void {
: [addr] "r" (addr)
);
},
.arm => |arm| {
.arm => {
const rc = std.os.linux.syscall1(std.os.linux.SYS_set_tls, addr);
assert(rc == 0);
},

View File

@ -29,7 +29,7 @@ test "makePath, put some files in it, deleteTree" {
test "access file" {
try fs.makePath(a, "os_test_tmp");
if (File.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt")) |ok| {
if (fs.cwd().access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
@panic("expected error");
} else |err| {
expect(err == error.FileNotFound);
@ -165,16 +165,19 @@ test "sigaltstack" {
// analyzed
const dl_phdr_info = if (@hasDecl(os, "dl_phdr_info")) os.dl_phdr_info else c_void;
fn iter_fn(info: *dl_phdr_info, size: usize, data: ?*usize) callconv(.C) i32 {
if (builtin.os == .windows or builtin.os == .wasi or builtin.os == .macosx)
return 0;
const IterFnError = error{
MissingPtLoadSegment,
MissingLoad,
BadElfMagic,
FailedConsistencyCheck,
};
var counter = data.?;
fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
// Count how many libraries are loaded
counter.* += @as(usize, 1);
// The image should contain at least a PT_LOAD segment
if (info.dlpi_phnum < 1) return -1;
if (info.dlpi_phnum < 1) return error.MissingPtLoadSegment;
// Quick & dirty validation of the phdr pointers, make sure we're not
// pointing to some random gibberish
@ -189,17 +192,15 @@ fn iter_fn(info: *dl_phdr_info, size: usize, data: ?*usize) callconv(.C) i32 {
// Find the ELF header
const elf_header = @intToPtr(*elf.Ehdr, reloc_addr - phdr.p_offset);
// Validate the magic
if (!mem.eql(u8, elf_header.e_ident[0..4], "\x7fELF")) return -1;
if (!mem.eql(u8, elf_header.e_ident[0..4], "\x7fELF")) return error.BadElfMagic;
// Consistency check
if (elf_header.e_phnum != info.dlpi_phnum) return -1;
if (elf_header.e_phnum != info.dlpi_phnum) return error.FailedConsistencyCheck;
found_load = true;
break;
}
if (!found_load) return -1;
return 42;
if (!found_load) return error.MissingLoad;
}
test "dl_iterate_phdr" {
@ -207,7 +208,7 @@ test "dl_iterate_phdr" {
return error.SkipZigTest;
var counter: usize = 0;
expect(os.dl_iterate_phdr(usize, iter_fn, &counter) != 0);
try os.dl_iterate_phdr(&counter, IterFnError, iter_fn);
expect(counter != 0);
}
@ -350,3 +351,11 @@ test "mmap" {
try fs.cwd().deleteFile(test_out_file);
}
test "getenv" {
if (builtin.os == .windows) {
expect(os.getenvW(&[_:0]u16{ 'B', 'O', 'G', 'U', 'S', 0x11, 0x22, 0x33, 0x44, 0x55 }) == null);
} else {
expect(os.getenvZ("BOGUSDOESNOTEXISTENVVAR") == null);
}
}

View File

@ -1187,7 +1187,7 @@ pub const RTL_USER_PROCESS_PARAMETERS = extern struct {
DllPath: UNICODE_STRING,
ImagePathName: UNICODE_STRING,
CommandLine: UNICODE_STRING,
Environment: [*]WCHAR,
Environment: [*:0]WCHAR,
dwX: ULONG,
dwY: ULONG,
dwXSize: ULONG,

View File

@ -8,6 +8,12 @@ pub extern "NtDll" fn NtQueryInformationFile(
Length: ULONG,
FileInformationClass: FILE_INFORMATION_CLASS,
) callconv(.Stdcall) NTSTATUS;
pub extern "NtDll" fn NtQueryAttributesFile(
ObjectAttributes: *OBJECT_ATTRIBUTES,
FileAttributes: *FILE_BASIC_INFORMATION,
) callconv(.Stdcall) NTSTATUS;
pub extern "NtDll" fn NtCreateFile(
FileHandle: *HANDLE,
DesiredAccess: ACCESS_MASK,

View File

@ -34,13 +34,13 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: builtin.Endian) type {
//we bitcast the desired Int type to an unsigned version of itself
// to avoid issues with shifting signed ints.
const UnInt = @IntType(false, int_bits);
const UnInt = std.meta.IntType(false, int_bits);
//The maximum container int type
const MinIo = @IntType(false, min_io_bits);
const MinIo = std.meta.IntType(false, min_io_bits);
//The minimum container int type
const MaxIo = @IntType(false, max_io_bits);
const MaxIo = std.meta.IntType(false, max_io_bits);
return struct {
pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
@ -322,7 +322,7 @@ test "PackedIntArray" {
inline while (bits <= 256) : (bits += 1) {
//alternate unsigned and signed
const even = bits % 2 == 0;
const I = @IntType(even, bits);
const I = std.meta.IntType(even, bits);
const PackedArray = PackedIntArray(I, int_count);
const expected_bytes = ((bits * int_count) + 7) / 8;
@ -369,7 +369,7 @@ test "PackedIntSlice" {
inline while (bits <= 256) : (bits += 1) {
//alternate unsigned and signed
const even = bits % 2 == 0;
const I = @IntType(even, bits);
const I = std.meta.IntType(even, bits);
const P = PackedIntSlice(I);
var data = P.init(&buffer, int_count);
@ -399,7 +399,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" {
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
const Int = @IntType(false, bits);
const Int = std.meta.IntType(false, bits);
const PackedArray = PackedIntArray(Int, int_count);
var packed_array = @as(PackedArray, undefined);

View File

@ -1,5 +1,5 @@
const builtin = @import("builtin");
const std = @import("std.zig");
const builtin = std.builtin;
const os = std.os;
const fs = std.fs;
const BufMap = std.BufMap;
@ -31,20 +31,16 @@ test "getCwdAlloc" {
testing.allocator.free(cwd);
}
/// Caller must free result when done.
/// TODO make this go through libc when we have it
/// Caller owns resulting `BufMap`.
pub fn getEnvMap(allocator: *Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
if (builtin.os == .windows) {
const ptr = try os.windows.GetEnvironmentStringsW();
defer os.windows.FreeEnvironmentStringsW(ptr);
const ptr = os.windows.peb().ProcessParameters.Environment;
var i: usize = 0;
while (true) {
if (ptr[i] == 0) return result;
while (ptr[i] != 0) {
const key_start = i;
while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {}
@ -64,6 +60,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
try result.setMove(key, value);
}
return result;
} else if (builtin.os == .wasi) {
var environ_count: usize = undefined;
var environ_buf_size: usize = undefined;
@ -95,15 +92,29 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
}
}
return result;
} else {
for (os.environ) |ptr| {
} else if (builtin.link_libc) {
var ptr = std.c.environ;
while (ptr.*) |line| : (ptr += 1) {
var line_i: usize = 0;
while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {}
const key = ptr[0..line_i];
while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {}
const key = line[0..line_i];
var end_i: usize = line_i;
while (ptr[end_i] != 0) : (end_i += 1) {}
const value = ptr[line_i + 1 .. end_i];
while (line[end_i] != 0) : (end_i += 1) {}
const value = line[line_i + 1 .. end_i];
try result.set(key, value);
}
return result;
} else {
for (os.environ) |line| {
var line_i: usize = 0;
while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {}
const key = line[0..line_i];
var end_i: usize = line_i;
while (line[end_i] != 0) : (end_i += 1) {}
const value = line[line_i + 1 .. end_i];
try result.set(key, value);
}
@ -125,37 +136,20 @@ pub const GetEnvVarOwnedError = error{
};
/// Caller must free returned memory.
/// TODO make this go through libc when we have it
pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
if (builtin.os == .windows) {
const key_with_null = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
defer allocator.free(key_with_null);
const result_w = blk: {
const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
defer allocator.free(key_w);
var buf = try allocator.alloc(u16, 256);
defer allocator.free(buf);
while (true) {
const windows_buf_len = math.cast(os.windows.DWORD, buf.len) catch return error.OutOfMemory;
const result = os.windows.GetEnvironmentVariableW(
key_with_null.ptr,
buf.ptr,
windows_buf_len,
) catch |err| switch (err) {
error.Unexpected => return error.EnvironmentVariableNotFound,
else => |e| return e,
};
if (result > buf.len) {
buf = try allocator.realloc(buf, result);
continue;
}
return std.unicode.utf16leToUtf8Alloc(allocator, buf[0..result]) catch |err| switch (err) {
error.DanglingSurrogateHalf => return error.InvalidUtf8,
error.ExpectedSecondSurrogateHalf => return error.InvalidUtf8,
error.UnexpectedSecondSurrogateHalf => return error.InvalidUtf8,
else => |e| return e,
};
}
break :blk std.os.getenvW(key_w) orelse return error.EnvironmentVariableNotFound;
};
return std.unicode.utf16leToUtf8Alloc(allocator, result_w) catch |err| switch (err) {
error.DanglingSurrogateHalf => return error.InvalidUtf8,
error.ExpectedSecondSurrogateHalf => return error.InvalidUtf8,
error.UnexpectedSecondSurrogateHalf => return error.InvalidUtf8,
else => |e| return e,
};
} else {
const result = os.getenv(key) orelse return error.EnvironmentVariableNotFound;
return mem.dupe(allocator, u8, result);
@ -436,7 +430,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][]u8 {
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
errdefer allocator.free(buf);
const result_slice_list = @bytesToSlice([]u8, buf[0..slice_list_bytes]);
const result_slice_list = mem.bytesAsSlice([]u8, buf[0..slice_list_bytes]);
const result_contents = buf[slice_list_bytes..];
mem.copy(u8, result_contents, contents_slice);
@ -613,3 +607,59 @@ pub fn getBaseAddress() usize {
else => @compileError("Unsupported OS"),
}
}
/// Caller owns the result value and each inner slice.
pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 {
switch (builtin.link_mode) {
.Static => return &[_][:0]u8{},
.Dynamic => {},
}
const List = std.ArrayList([:0]u8);
switch (builtin.os) {
.linux,
.freebsd,
.netbsd,
.dragonfly,
=> {
var paths = List.init(allocator);
errdefer {
const slice = paths.toOwnedSlice();
for (slice) |item| {
allocator.free(item);
}
allocator.free(slice);
}
try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct {
fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void {
const name = info.dlpi_name orelse return;
if (name[0] == '/') {
const item = try mem.dupeZ(list.allocator, u8, mem.toSliceConst(u8, name));
errdefer list.allocator.free(item);
try list.append(item);
}
}
}.callback);
return paths.toOwnedSlice();
},
.macosx, .ios, .watchos, .tvos => {
var paths = List.init(allocator);
errdefer {
const slice = paths.toOwnedSlice();
for (slice) |item| {
allocator.free(item);
}
allocator.free(slice);
}
const img_count = std.c._dyld_image_count();
var i: u32 = 0;
while (i < img_count) : (i += 1) {
const name = std.c._dyld_get_image_name(i);
const item = try mem.dupeZ(allocator, u8, mem.toSliceConst(u8, name));
errdefer allocator.free(item);
try paths.append(item);
}
return paths.toOwnedSlice();
},
else => @compileError("getSelfExeSharedLibPaths unimplemented for this target"),
}
}

View File

@ -45,8 +45,8 @@ pub const Random = struct {
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: *Random, comptime T: type) T {
const UnsignedT = @IntType(false, T.bit_count);
const ByteAlignedT = @IntType(false, @divTrunc(T.bit_count + 7, 8) * 8);
const UnsignedT = std.meta.IntType(false, T.bit_count);
const ByteAlignedT = std.meta.IntType(false, @divTrunc(T.bit_count + 7, 8) * 8);
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@ -85,9 +85,9 @@ pub const Random = struct {
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
// Small is typically u32
const Small = @IntType(false, @divTrunc(T.bit_count + 31, 32) * 32);
const Small = std.meta.IntType(false, @divTrunc(T.bit_count + 31, 32) * 32);
// Large is typically u64
const Large = @IntType(false, Small.bit_count * 2);
const Large = std.meta.IntType(false, Small.bit_count * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
@ -99,7 +99,7 @@ pub const Random = struct {
// TODO: workaround for https://github.com/ziglang/zig/issues/1770
// should be:
// var t: Small = -%less_than;
var t: Small = @bitCast(Small, -%@bitCast(@IntType(true, Small.bit_count), @as(Small, less_than)));
var t: Small = @bitCast(Small, -%@bitCast(std.meta.IntType(true, Small.bit_count), @as(Small, less_than)));
if (t >= less_than) {
t -= less_than;
@ -145,7 +145,7 @@ pub const Random = struct {
assert(at_least < less_than);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = @IntType(false, T.bit_count);
const UnsignedT = std.meta.IntType(false, T.bit_count);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
@ -163,7 +163,7 @@ pub const Random = struct {
assert(at_least < less_than);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = @IntType(false, T.bit_count);
const UnsignedT = std.meta.IntType(false, T.bit_count);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
@ -180,7 +180,7 @@ pub const Random = struct {
assert(at_least <= at_most);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = @IntType(false, T.bit_count);
const UnsignedT = std.meta.IntType(false, T.bit_count);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
@ -198,7 +198,7 @@ pub const Random = struct {
assert(at_least <= at_most);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = @IntType(false, T.bit_count);
const UnsignedT = std.meta.IntType(false, T.bit_count);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
@ -281,7 +281,7 @@ pub const Random = struct {
/// This function introduces a minor bias.
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
comptime assert(T.is_signed == false);
const T2 = @IntType(false, T.bit_count * 2);
const T2 = std.meta.IntType(false, T.bit_count * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html

View File

@ -96,6 +96,8 @@ pub fn main() !void {
builder.verbose_cimport = true;
} else if (mem.eql(u8, arg, "--verbose-cc")) {
builder.verbose_cc = true;
} else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) {
builder.verbose_llvm_cpu_features = true;
} else if (mem.eql(u8, arg, "--")) {
builder.args = argsRest(args, arg_idx);
break;
@ -126,7 +128,7 @@ pub fn main() !void {
}
fn runBuild(builder: *Builder) anyerror!void {
switch (@typeId(@TypeOf(root.build).ReturnType)) {
switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),
else => @compileError("expected return type of build to be 'void' or '!void'"),
@ -185,16 +187,17 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
try out_stream.write(
\\
\\Advanced Options:
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to zig cache directory
\\ --override-lib-dir [arg] Override path to Zig lib directory
\\ --verbose-tokenize Enable compiler debug output for tokenization
\\ --verbose-ast Enable compiler debug output for parsing into an AST
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-ir Enable compiler debug output for Zig IR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
\\ --verbose-cimport Enable compiler debug output for C imports
\\ --verbose-cc Enable compiler debug output for C compilation
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to zig cache directory
\\ --override-lib-dir [arg] Override path to Zig lib directory
\\ --verbose-tokenize Enable compiler debug output for tokenization
\\ --verbose-ast Enable compiler debug output for parsing into an AST
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-ir Enable compiler debug output for Zig IR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
\\ --verbose-cimport Enable compiler debug output for C imports
\\ --verbose-cc Enable compiler debug output for C compilation
\\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features
\\
);
}

View File

@ -511,7 +511,7 @@ export fn roundf(a: f32) f32 {
fn generic_fmod(comptime T: type, x: T, y: T) T {
@setRuntimeSafety(false);
const uint = @IntType(false, T.bit_count);
const uint = std.meta.IntType(false, T.bit_count);
const log2uint = math.Log2Int(uint);
const digits = if (T == f32) 23 else 52;
const exp_bits = if (T == f32) 9 else 12;

View File

@ -54,21 +54,21 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
const Z = @IntType(false, T.bit_count);
const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
const Z = std.meta.IntType(false, T.bit_count);
const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
const shift = @clz(@IntType(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
const shift = @clz(std.meta.IntType(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(S, shift);
return 1 - shift;
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn addXf3(comptime T: type, a: T, b: T) T {
const Z = @IntType(false, T.bit_count);
const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const Z = std.meta.IntType(false, T.bit_count);
const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
@ -182,7 +182,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(@IntType(false, T.bit_count), implicitBit << 3));
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.IntType(false, T.bit_count), implicitBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}

View File

@ -22,8 +22,8 @@ const GE = extern enum(i32) {
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
const srep_t = @IntType(true, T.bit_count);
const rep_t = @IntType(false, T.bit_count);
const srep_t = std.meta.IntType(true, T.bit_count);
const rep_t = std.meta.IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@ -68,7 +68,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
const rep_t = @IntType(false, T.bit_count);
const rep_t = std.meta.IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);

View File

@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, f64.bit_count);
const SignedZ = @IntType(true, f64.bit_count);
const Z = std.meta.IntType(false, f64.bit_count);
const SignedZ = std.meta.IntType(true, f64.bit_count);
const typeWidth = f64.bit_count;
const significandBits = std.math.floatMantissaBits(f64);
@ -312,9 +312,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;

View File

@ -7,7 +7,7 @@ const builtin = @import("builtin");
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, f32.bit_count);
const Z = std.meta.IntType(false, f32.bit_count);
const typeWidth = f32.bit_count;
const significandBits = std.math.floatMantissaBits(f32);
@ -185,9 +185,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;

View File

@ -30,11 +30,11 @@ pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
const CHAR_BIT = 8;
fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: @IntType(false, @typeInfo(src_t).Float.bits)) dst_t {
fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.IntType(false, @typeInfo(src_t).Float.bits)) dst_t {
@setRuntimeSafety(builtin.is_test);
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const src_rep_t = std.meta.IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);

View File

@ -45,7 +45,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
if (exponent < 0) return 0;
// The unsigned result needs to be large enough to handle an fixint_t or rep_t
const fixuint_t = @IntType(false, fixint_t.bit_count);
const fixuint_t = std.meta.IntType(false, fixint_t.bit_count);
const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
var uint_result: UintResultType = undefined;

View File

@ -10,7 +10,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
f128 => u128,
else => unreachable,
};
const srep_t = @IntType(true, rep_t.bit_count);
const srep_t = @import("std").meta.IntType(true, rep_t.bit_count);
const significandBits = switch (fp_t) {
f32 => 23,
f64 => 52,

View File

@ -5,8 +5,8 @@ const maxInt = std.math.maxInt;
fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, T.bit_count);
const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const Z = std.meta.IntType(false, T.bit_count);
const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
if (a == 0) {
return @as(T, 0.0);

View File

@ -28,7 +28,7 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
@ -264,9 +264,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;

View File

@ -19,7 +19,7 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
}
fn negXf2(comptime T: type, a: T) T {
const Z = @IntType(false, T.bit_count);
const Z = std.meta.IntType(false, T.bit_count);
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);

View File

@ -36,8 +36,8 @@ pub fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
}
inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const src_rep_t = std.meta.IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);

View File

@ -10,8 +10,8 @@ const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
const SingleInt = @import("std").meta.IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @import("std").meta.IntType(true, DoubleInt.bit_count);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421

View File

@ -21,7 +21,9 @@ comptime {
@export(main, .{ .name = "main", .linkage = .Weak });
}
} else if (builtin.os == .windows) {
if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and !@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup")) {
if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and
!@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup"))
{
@export(WinMainCRTStartup, .{ .name = "WinMainCRTStartup" });
}
} else if (builtin.os == .uefi) {
@ -34,7 +36,11 @@ comptime {
}
}
fn _DllMainCRTStartup(hinstDLL: std.os.windows.HINSTANCE, fdwReason: std.os.windows.DWORD, lpReserved: std.os.windows.LPVOID) callconv(.Stdcall) std.os.windows.BOOL {
fn _DllMainCRTStartup(
hinstDLL: std.os.windows.HINSTANCE,
fdwReason: std.os.windows.DWORD,
lpReserved: std.os.windows.LPVOID,
) callconv(.Stdcall) std.os.windows.BOOL {
if (@hasDecl(root, "DllMain")) {
return root.DllMain(hinstDLL, fdwReason, lpReserved);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,9 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
a35,
a53,
a55,
a57,
a65,
a72,
a73,
a75,
a76,
aes,
aggressive_fma,
@ -46,11 +40,7 @@ pub const Feature = enum {
dotprod,
ete,
exynos_cheap_as_move,
exynosm1,
exynosm2,
exynosm3,
exynosm4,
falkor,
fmi,
force_32bit_jump_tables,
fp_armv8,
@ -64,7 +54,6 @@ pub const Feature = enum {
fuse_csel,
fuse_literals,
jsconv,
kryo,
lor,
lse,
lsl_fast,
@ -112,7 +101,6 @@ pub const Feature = enum {
reserve_x6,
reserve_x7,
reserve_x9,
saphira,
sb,
sel2,
sha2,
@ -132,11 +120,6 @@ pub const Feature = enum {
sve2_sha3,
sve2_sm4,
tagged_globals,
thunderx,
thunderx2t99,
thunderxt81,
thunderxt83,
thunderxt88,
tlb_rmi,
tme,
tpidr_el1,
@ -144,7 +127,6 @@ pub const Feature = enum {
tpidr_el3,
tracev8_4,
trbe,
tsv110,
uaops,
use_aa,
use_postra_scheduler,
@ -163,72 +145,13 @@ pub const Feature = enum {
zcz_gp,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
@setEvalBranchQuota(2000);
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
result[@enumToInt(Feature.a35)] = .{
.llvm_name = "a35",
.description = "Cortex-A35 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.neon,
.perfmon,
}),
};
result[@enumToInt(Feature.a53)] = .{
.llvm_name = "a53",
.description = "Cortex-A53 ARM processors",
.dependencies = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fp_armv8,
.fuse_aes,
.neon,
.perfmon,
.use_aa,
.use_postra_scheduler,
}),
};
result[@enumToInt(Feature.a55)] = .{
.llvm_name = "a55",
.description = "Cortex-A55 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fp_armv8,
.fullfp16,
.fuse_aes,
.neon,
.perfmon,
.rcpc,
.v8_2a,
}),
};
result[@enumToInt(Feature.a57)] = .{
.llvm_name = "a57",
.description = "Cortex-A57 ARM processors",
.dependencies = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fp_armv8,
.fuse_aes,
.fuse_literals,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.a65)] = .{
.llvm_name = "a65",
.description = "Cortex-A65 ARM processors",
@ -244,54 +167,13 @@ pub const all_features = blk: {
.v8_2a,
}),
};
result[@enumToInt(Feature.a72)] = .{
.llvm_name = "a72",
.description = "Cortex-A72 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.fuse_aes,
.neon,
.perfmon,
}),
};
result[@enumToInt(Feature.a73)] = .{
.llvm_name = "a73",
.description = "Cortex-A73 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.fuse_aes,
.neon,
.perfmon,
}),
};
result[@enumToInt(Feature.a75)] = .{
.llvm_name = "a75",
.description = "Cortex-A75 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fp_armv8,
.fullfp16,
.fuse_aes,
.neon,
.perfmon,
.rcpc,
.v8_2a,
}),
};
result[@enumToInt(Feature.a76)] = .{
.llvm_name = "a76",
.description = "Cortex-A76 ARM processors",
.dependencies = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fp_armv8,
.fullfp16,
.neon,
.rcpc,
.ssbs,
.v8_2a,
@ -563,58 +445,6 @@ pub const all_features = blk: {
.custom_cheap_as_move,
}),
};
result[@enumToInt(Feature.exynosm1)] = .{
.llvm_name = null,
.description = "Samsung Exynos-M1 processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.use_reciprocal_square_root,
.zcz_fp,
}),
};
result[@enumToInt(Feature.exynosm2)] = .{
.llvm_name = null,
.description = "Samsung Exynos-M2 processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.zcz_fp,
}),
};
result[@enumToInt(Feature.exynosm3)] = .{
.llvm_name = "exynosm3",
.description = "Samsung Exynos-M3 processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_address,
.fuse_aes,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.zcz_fp,
}),
};
result[@enumToInt(Feature.exynosm4)] = .{
.llvm_name = "exynosm4",
.description = "Samsung Exynos-M4 processors",
@ -638,24 +468,6 @@ pub const all_features = blk: {
.zcz,
}),
};
result[@enumToInt(Feature.falkor)] = .{
.llvm_name = "falkor",
.description = "Qualcomm Falkor processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.fp_armv8,
.lsl_fast,
.neon,
.perfmon,
.predictable_select_expensive,
.rdm,
.slow_strqro_store,
.use_postra_scheduler,
.zcz,
}),
};
result[@enumToInt(Feature.fmi)] = .{
.llvm_name = "fmi",
.description = "Enable v8.4-A Flag Manipulation Instructions",
@ -727,22 +539,6 @@ pub const all_features = blk: {
.fp_armv8,
}),
};
result[@enumToInt(Feature.kryo)] = .{
.llvm_name = "kryo",
.description = "Qualcomm Kryo processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.fp_armv8,
.lsl_fast,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.zcz,
}),
};
result[@enumToInt(Feature.lor)] = .{
.llvm_name = "lor",
.description = "Enables ARM v8.1 Limited Ordering Regions extension",
@ -1005,23 +801,6 @@ pub const all_features = blk: {
.description = "Reserve X9, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.saphira)] = .{
.llvm_name = "saphira",
.description = "Qualcomm Saphira processors",
.dependencies = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.fp_armv8,
.lsl_fast,
.neon,
.perfmon,
.predictable_select_expensive,
.spe,
.use_postra_scheduler,
.v8_4a,
.zcz,
}),
};
result[@enumToInt(Feature.sb)] = .{
.llvm_name = "sb",
.description = "Enable v8.5 Speculation Barrier",
@ -1137,74 +916,6 @@ pub const all_features = blk: {
.description = "Use an instruction sequence for taking the address of a global that allows a memory tag in the upper address bits",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.thunderx)] = .{
.llvm_name = "thunderx",
.description = "Cavium ThunderX processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@enumToInt(Feature.thunderx2t99)] = .{
.llvm_name = "thunderx2t99",
.description = "Cavium ThunderX2 processors",
.dependencies = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.crc,
.crypto,
.fp_armv8,
.lse,
.neon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8_1a,
}),
};
result[@enumToInt(Feature.thunderxt81)] = .{
.llvm_name = "thunderxt81",
.description = "Cavium ThunderX processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@enumToInt(Feature.thunderxt83)] = .{
.llvm_name = "thunderxt83",
.description = "Cavium ThunderX processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@enumToInt(Feature.thunderxt88)] = .{
.llvm_name = "thunderxt88",
.description = "Cavium ThunderX processors",
.dependencies = featureSet(&[_]Feature{
.crc,
.crypto,
.fp_armv8,
.neon,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@enumToInt(Feature.tlb_rmi)] = .{
.llvm_name = "tlb-rmi",
.description = "Enable v8.4-A TLB Range and Maintenance Instructions",
@ -1240,24 +951,6 @@ pub const all_features = blk: {
.description = "Enable Trace Buffer Extension",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.tsv110)] = .{
.llvm_name = "tsv110",
.description = "HiSilicon TS-V110 processors",
.dependencies = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.dotprod,
.fp_armv8,
.fp16fml,
.fullfp16,
.fuse_aes,
.neon,
.perfmon,
.spe,
.use_postra_scheduler,
.v8_2a,
}),
};
result[@enumToInt(Feature.uaops)] = .{
.llvm_name = "uaops",
.description = "Enable v8.2 UAO PState",
@ -1398,282 +1091,417 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const apple_a10 = Cpu{
pub const apple_a10 = CpuModel{
.name = "apple_a10",
.llvm_name = "apple-a10",
.features = featureSet(&[_]Feature{
.apple_a10,
}),
};
pub const apple_a11 = Cpu{
pub const apple_a11 = CpuModel{
.name = "apple_a11",
.llvm_name = "apple-a11",
.features = featureSet(&[_]Feature{
.apple_a11,
}),
};
pub const apple_a12 = Cpu{
pub const apple_a12 = CpuModel{
.name = "apple_a12",
.llvm_name = "apple-a12",
.features = featureSet(&[_]Feature{
.apple_a12,
}),
};
pub const apple_a13 = Cpu{
pub const apple_a13 = CpuModel{
.name = "apple_a13",
.llvm_name = "apple-a13",
.features = featureSet(&[_]Feature{
.apple_a13,
}),
};
pub const apple_a7 = Cpu{
pub const apple_a7 = CpuModel{
.name = "apple_a7",
.llvm_name = "apple-a7",
.features = featureSet(&[_]Feature{
.apple_a7,
}),
};
pub const apple_a8 = Cpu{
pub const apple_a8 = CpuModel{
.name = "apple_a8",
.llvm_name = "apple-a8",
.features = featureSet(&[_]Feature{
.apple_a7,
}),
};
pub const apple_a9 = Cpu{
pub const apple_a9 = CpuModel{
.name = "apple_a9",
.llvm_name = "apple-a9",
.features = featureSet(&[_]Feature{
.apple_a7,
}),
};
pub const apple_latest = Cpu{
pub const apple_latest = CpuModel{
.name = "apple_latest",
.llvm_name = "apple-latest",
.features = featureSet(&[_]Feature{
.apple_a13,
}),
};
pub const apple_s4 = Cpu{
pub const apple_s4 = CpuModel{
.name = "apple_s4",
.llvm_name = "apple-s4",
.features = featureSet(&[_]Feature{
.apple_a12,
}),
};
pub const apple_s5 = Cpu{
pub const apple_s5 = CpuModel{
.name = "apple_s5",
.llvm_name = "apple-s5",
.features = featureSet(&[_]Feature{
.apple_a12,
}),
};
pub const cortex_a35 = Cpu{
pub const cortex_a35 = CpuModel{
.name = "cortex_a35",
.llvm_name = "cortex-a35",
.features = featureSet(&[_]Feature{
.a35,
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const cortex_a53 = Cpu{
pub const cortex_a53 = CpuModel{
.name = "cortex_a53",
.llvm_name = "cortex-a53",
.features = featureSet(&[_]Feature{
.a53,
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fuse_aes,
.perfmon,
.use_aa,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a55 = Cpu{
pub const cortex_a55 = CpuModel{
.name = "cortex_a55",
.llvm_name = "cortex-a55",
.features = featureSet(&[_]Feature{
.a55,
.crypto,
.dotprod,
.fullfp16,
.fuse_aes,
.perfmon,
.rcpc,
.v8_2a,
}),
};
pub const cortex_a57 = Cpu{
pub const cortex_a57 = CpuModel{
.name = "cortex_a57",
.llvm_name = "cortex-a57",
.features = featureSet(&[_]Feature{
.a57,
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fuse_aes,
.fuse_literals,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a65 = Cpu{
pub const cortex_a65 = CpuModel{
.name = "cortex_a65",
.llvm_name = "cortex-a65",
.features = featureSet(&[_]Feature{
.a65,
}),
};
pub const cortex_a65ae = Cpu{
pub const cortex_a65ae = CpuModel{
.name = "cortex_a65ae",
.llvm_name = "cortex-a65ae",
.features = featureSet(&[_]Feature{
.a65,
}),
};
pub const cortex_a72 = Cpu{
pub const cortex_a72 = CpuModel{
.name = "cortex_a72",
.llvm_name = "cortex-a72",
.features = featureSet(&[_]Feature{
.a72,
.crc,
.crypto,
.fuse_aes,
.perfmon,
.v8a,
}),
};
pub const cortex_a73 = Cpu{
pub const cortex_a73 = CpuModel{
.name = "cortex_a73",
.llvm_name = "cortex-a73",
.features = featureSet(&[_]Feature{
.a73,
.crc,
.crypto,
.fuse_aes,
.perfmon,
.v8a,
}),
};
pub const cortex_a75 = Cpu{
pub const cortex_a75 = CpuModel{
.name = "cortex_a75",
.llvm_name = "cortex-a75",
.features = featureSet(&[_]Feature{
.a75,
.crypto,
.dotprod,
.fullfp16,
.fuse_aes,
.perfmon,
.rcpc,
.v8_2a,
}),
};
pub const cortex_a76 = Cpu{
pub const cortex_a76 = CpuModel{
.name = "cortex_a76",
.llvm_name = "cortex-a76",
.features = featureSet(&[_]Feature{
.a76,
}),
};
pub const cortex_a76ae = Cpu{
pub const cortex_a76ae = CpuModel{
.name = "cortex_a76ae",
.llvm_name = "cortex-a76ae",
.features = featureSet(&[_]Feature{
.a76,
}),
};
pub const cyclone = Cpu{
pub const cyclone = CpuModel{
.name = "cyclone",
.llvm_name = "cyclone",
.features = featureSet(&[_]Feature{
.apple_a7,
}),
};
pub const exynos_m1 = Cpu{
pub const exynos_m1 = CpuModel{
.name = "exynos_m1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.exynosm1,
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.use_reciprocal_square_root,
.v8a,
.zcz_fp,
}),
};
pub const exynos_m2 = Cpu{
pub const exynos_m2 = CpuModel{
.name = "exynos_m2",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.exynosm2,
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.v8a,
.zcz_fp,
}),
};
pub const exynos_m3 = Cpu{
pub const exynos_m3 = CpuModel{
.name = "exynos_m3",
.llvm_name = "exynos-m3",
.features = featureSet(&[_]Feature{
.exynosm3,
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_address,
.fuse_aes,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
.zcz_fp,
}),
};
pub const exynos_m4 = Cpu{
pub const exynos_m4 = CpuModel{
.name = "exynos_m4",
.llvm_name = "exynos-m4",
.features = featureSet(&[_]Feature{
.exynosm4,
}),
};
pub const exynos_m5 = Cpu{
pub const exynos_m5 = CpuModel{
.name = "exynos_m5",
.llvm_name = "exynos-m5",
.features = featureSet(&[_]Feature{
.exynosm4,
}),
};
pub const falkor = Cpu{
pub const falkor = CpuModel{
.name = "falkor",
.llvm_name = "falkor",
.features = featureSet(&[_]Feature{
.falkor,
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rdm,
.slow_strqro_store,
.use_postra_scheduler,
.v8a,
.zcz,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.ete,
.fp_armv8,
.fuse_aes,
.neon,
.perfmon,
.use_postra_scheduler,
.v8a,
}),
};
pub const kryo = Cpu{
pub const kryo = CpuModel{
.name = "kryo",
.llvm_name = "kryo",
.features = featureSet(&[_]Feature{
.kryo,
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.zcz,
.v8a,
}),
};
pub const neoverse_e1 = Cpu{
pub const neoverse_e1 = CpuModel{
.name = "neoverse_e1",
.llvm_name = "neoverse-e1",
.features = featureSet(&[_]Feature{
.neoversee1,
}),
};
pub const neoverse_n1 = Cpu{
pub const neoverse_n1 = CpuModel{
.name = "neoverse_n1",
.llvm_name = "neoverse-n1",
.features = featureSet(&[_]Feature{
.neoversen1,
}),
};
pub const saphira = Cpu{
pub const saphira = CpuModel{
.name = "saphira",
.llvm_name = "saphira",
.features = featureSet(&[_]Feature{
.saphira,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.spe,
.use_postra_scheduler,
.v8_4a,
.zcz,
}),
};
pub const thunderx = Cpu{
pub const thunderx = CpuModel{
.name = "thunderx",
.llvm_name = "thunderx",
.features = featureSet(&[_]Feature{
.thunderx,
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderx2t99 = Cpu{
pub const thunderx2t99 = CpuModel{
.name = "thunderx2t99",
.llvm_name = "thunderx2t99",
.features = featureSet(&[_]Feature{
.thunderx2t99,
.aggressive_fma,
.arith_bcc_fusion,
.crc,
.crypto,
.lse,
.predictable_select_expensive,
.use_postra_scheduler,
.v8_1a,
}),
};
pub const thunderxt81 = Cpu{
pub const thunderxt81 = CpuModel{
.name = "thunderxt81",
.llvm_name = "thunderxt81",
.features = featureSet(&[_]Feature{
.thunderxt81,
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt83 = Cpu{
pub const thunderxt83 = CpuModel{
.name = "thunderxt83",
.llvm_name = "thunderxt83",
.features = featureSet(&[_]Feature{
.thunderxt83,
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt88 = Cpu{
pub const thunderxt88 = CpuModel{
.name = "thunderxt88",
.llvm_name = "thunderxt88",
.features = featureSet(&[_]Feature{
.thunderxt88,
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const tsv110 = Cpu{
pub const tsv110 = CpuModel{
.name = "tsv110",
.llvm_name = "tsv110",
.features = featureSet(&[_]Feature{
.tsv110,
.crypto,
.custom_cheap_as_move,
.dotprod,
.fp16fml,
.fullfp16,
.fuse_aes,
.perfmon,
.spe,
.use_postra_scheduler,
.v8_2a,
}),
};
};
@ -1681,7 +1509,7 @@ pub const cpu = struct {
/// All aarch64 CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.apple_a10,
&cpu.apple_a11,
&cpu.apple_a12,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
@"16_bit_insts",
@ -112,12 +113,12 @@ pub const Feature = enum {
xnack,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.@"16_bit_insts")] = .{
.llvm_name = "16-bit-insts",
.description = "Has i16/f16 instructions",
@ -784,7 +785,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const bonaire = Cpu{
pub const bonaire = CpuModel{
.name = "bonaire",
.llvm_name = "bonaire",
.features = featureSet(&[_]Feature{
@ -794,7 +795,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const carrizo = Cpu{
pub const carrizo = CpuModel{
.name = "carrizo",
.llvm_name = "carrizo",
.features = featureSet(&[_]Feature{
@ -807,7 +808,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const fiji = Cpu{
pub const fiji = CpuModel{
.name = "fiji",
.llvm_name = "fiji",
.features = featureSet(&[_]Feature{
@ -818,14 +819,14 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.wavefrontsize64,
}),
};
pub const generic_hsa = Cpu{
pub const generic_hsa = CpuModel{
.name = "generic_hsa",
.llvm_name = "generic-hsa",
.features = featureSet(&[_]Feature{
@ -833,7 +834,7 @@ pub const cpu = struct {
.wavefrontsize64,
}),
};
pub const gfx1010 = Cpu{
pub const gfx1010 = CpuModel{
.name = "gfx1010",
.llvm_name = "gfx1010",
.features = featureSet(&[_]Feature{
@ -859,7 +860,7 @@ pub const cpu = struct {
.wavefrontsize32,
}),
};
pub const gfx1011 = Cpu{
pub const gfx1011 = CpuModel{
.name = "gfx1011",
.llvm_name = "gfx1011",
.features = featureSet(&[_]Feature{
@ -888,7 +889,7 @@ pub const cpu = struct {
.wavefrontsize32,
}),
};
pub const gfx1012 = Cpu{
pub const gfx1012 = CpuModel{
.name = "gfx1012",
.llvm_name = "gfx1012",
.features = featureSet(&[_]Feature{
@ -918,7 +919,7 @@ pub const cpu = struct {
.wavefrontsize32,
}),
};
pub const gfx600 = Cpu{
pub const gfx600 = CpuModel{
.name = "gfx600",
.llvm_name = "gfx600",
.features = featureSet(&[_]Feature{
@ -930,7 +931,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const gfx601 = Cpu{
pub const gfx601 = CpuModel{
.name = "gfx601",
.llvm_name = "gfx601",
.features = featureSet(&[_]Feature{
@ -940,7 +941,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const gfx700 = Cpu{
pub const gfx700 = CpuModel{
.name = "gfx700",
.llvm_name = "gfx700",
.features = featureSet(&[_]Feature{
@ -950,7 +951,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const gfx701 = Cpu{
pub const gfx701 = CpuModel{
.name = "gfx701",
.llvm_name = "gfx701",
.features = featureSet(&[_]Feature{
@ -962,7 +963,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const gfx702 = Cpu{
pub const gfx702 = CpuModel{
.name = "gfx702",
.llvm_name = "gfx702",
.features = featureSet(&[_]Feature{
@ -973,7 +974,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const gfx703 = Cpu{
pub const gfx703 = CpuModel{
.name = "gfx703",
.llvm_name = "gfx703",
.features = featureSet(&[_]Feature{
@ -983,7 +984,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const gfx704 = Cpu{
pub const gfx704 = CpuModel{
.name = "gfx704",
.llvm_name = "gfx704",
.features = featureSet(&[_]Feature{
@ -993,7 +994,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const gfx801 = Cpu{
pub const gfx801 = CpuModel{
.name = "gfx801",
.llvm_name = "gfx801",
.features = featureSet(&[_]Feature{
@ -1006,7 +1007,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const gfx802 = Cpu{
pub const gfx802 = CpuModel{
.name = "gfx802",
.llvm_name = "gfx802",
.features = featureSet(&[_]Feature{
@ -1018,7 +1019,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const gfx803 = Cpu{
pub const gfx803 = CpuModel{
.name = "gfx803",
.llvm_name = "gfx803",
.features = featureSet(&[_]Feature{
@ -1029,7 +1030,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const gfx810 = Cpu{
pub const gfx810 = CpuModel{
.name = "gfx810",
.llvm_name = "gfx810",
.features = featureSet(&[_]Feature{
@ -1039,7 +1040,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const gfx900 = Cpu{
pub const gfx900 = CpuModel{
.name = "gfx900",
.llvm_name = "gfx900",
.features = featureSet(&[_]Feature{
@ -1051,7 +1052,7 @@ pub const cpu = struct {
.no_xnack_support,
}),
};
pub const gfx902 = Cpu{
pub const gfx902 = CpuModel{
.name = "gfx902",
.llvm_name = "gfx902",
.features = featureSet(&[_]Feature{
@ -1063,7 +1064,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const gfx904 = Cpu{
pub const gfx904 = CpuModel{
.name = "gfx904",
.llvm_name = "gfx904",
.features = featureSet(&[_]Feature{
@ -1075,7 +1076,7 @@ pub const cpu = struct {
.no_xnack_support,
}),
};
pub const gfx906 = Cpu{
pub const gfx906 = CpuModel{
.name = "gfx906",
.llvm_name = "gfx906",
.features = featureSet(&[_]Feature{
@ -1090,7 +1091,7 @@ pub const cpu = struct {
.no_xnack_support,
}),
};
pub const gfx908 = Cpu{
pub const gfx908 = CpuModel{
.name = "gfx908",
.llvm_name = "gfx908",
.features = featureSet(&[_]Feature{
@ -1113,7 +1114,7 @@ pub const cpu = struct {
.sram_ecc,
}),
};
pub const gfx909 = Cpu{
pub const gfx909 = CpuModel{
.name = "gfx909",
.llvm_name = "gfx909",
.features = featureSet(&[_]Feature{
@ -1124,7 +1125,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const hainan = Cpu{
pub const hainan = CpuModel{
.name = "hainan",
.llvm_name = "hainan",
.features = featureSet(&[_]Feature{
@ -1134,7 +1135,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const hawaii = Cpu{
pub const hawaii = CpuModel{
.name = "hawaii",
.llvm_name = "hawaii",
.features = featureSet(&[_]Feature{
@ -1146,7 +1147,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const iceland = Cpu{
pub const iceland = CpuModel{
.name = "iceland",
.llvm_name = "iceland",
.features = featureSet(&[_]Feature{
@ -1158,7 +1159,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const kabini = Cpu{
pub const kabini = CpuModel{
.name = "kabini",
.llvm_name = "kabini",
.features = featureSet(&[_]Feature{
@ -1168,7 +1169,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const kaveri = Cpu{
pub const kaveri = CpuModel{
.name = "kaveri",
.llvm_name = "kaveri",
.features = featureSet(&[_]Feature{
@ -1178,7 +1179,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const mullins = Cpu{
pub const mullins = CpuModel{
.name = "mullins",
.llvm_name = "mullins",
.features = featureSet(&[_]Feature{
@ -1188,7 +1189,7 @@ pub const cpu = struct {
.sea_islands,
}),
};
pub const oland = Cpu{
pub const oland = CpuModel{
.name = "oland",
.llvm_name = "oland",
.features = featureSet(&[_]Feature{
@ -1198,7 +1199,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const pitcairn = Cpu{
pub const pitcairn = CpuModel{
.name = "pitcairn",
.llvm_name = "pitcairn",
.features = featureSet(&[_]Feature{
@ -1208,7 +1209,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const polaris10 = Cpu{
pub const polaris10 = CpuModel{
.name = "polaris10",
.llvm_name = "polaris10",
.features = featureSet(&[_]Feature{
@ -1219,7 +1220,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const polaris11 = Cpu{
pub const polaris11 = CpuModel{
.name = "polaris11",
.llvm_name = "polaris11",
.features = featureSet(&[_]Feature{
@ -1230,7 +1231,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const stoney = Cpu{
pub const stoney = CpuModel{
.name = "stoney",
.llvm_name = "stoney",
.features = featureSet(&[_]Feature{
@ -1240,7 +1241,7 @@ pub const cpu = struct {
.xnack,
}),
};
pub const tahiti = Cpu{
pub const tahiti = CpuModel{
.name = "tahiti",
.llvm_name = "tahiti",
.features = featureSet(&[_]Feature{
@ -1252,7 +1253,7 @@ pub const cpu = struct {
.southern_islands,
}),
};
pub const tonga = Cpu{
pub const tonga = CpuModel{
.name = "tonga",
.llvm_name = "tonga",
.features = featureSet(&[_]Feature{
@ -1264,7 +1265,7 @@ pub const cpu = struct {
.volcanic_islands,
}),
};
pub const verde = Cpu{
pub const verde = CpuModel{
.name = "verde",
.llvm_name = "verde",
.features = featureSet(&[_]Feature{
@ -1279,7 +1280,7 @@ pub const cpu = struct {
/// All amdgpu CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.bonaire,
&cpu.carrizo,
&cpu.fiji,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
alu32,
@ -7,12 +8,12 @@ pub const Feature = enum {
dwarfris,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.alu32)] = .{
.llvm_name = "alu32",
.description = "Enable ALU32 instructions",
@ -37,27 +38,27 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{}),
};
pub const probe = Cpu{
pub const probe = CpuModel{
.name = "probe",
.llvm_name = "probe",
.features = featureSet(&[_]Feature{}),
};
pub const v1 = Cpu{
pub const v1 = CpuModel{
.name = "v1",
.llvm_name = "v1",
.features = featureSet(&[_]Feature{}),
};
pub const v2 = Cpu{
pub const v2 = CpuModel{
.name = "v2",
.llvm_name = "v2",
.features = featureSet(&[_]Feature{}),
};
pub const v3 = Cpu{
pub const v3 = CpuModel{
.name = "v3",
.llvm_name = "v3",
.features = featureSet(&[_]Feature{}),
@ -67,7 +68,7 @@ pub const cpu = struct {
/// All bpf CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.generic,
&cpu.probe,
&cpu.v1,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
duplex,
@ -28,12 +29,12 @@ pub const Feature = enum {
zreg,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.duplex)] = .{
.llvm_name = "duplex",
.description = "Enable generation of duplex instruction",
@ -186,7 +187,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
@ -201,7 +202,7 @@ pub const cpu = struct {
.v60,
}),
};
pub const hexagonv5 = Cpu{
pub const hexagonv5 = CpuModel{
.name = "hexagonv5",
.llvm_name = "hexagonv5",
.features = featureSet(&[_]Feature{
@ -214,7 +215,7 @@ pub const cpu = struct {
.v5,
}),
};
pub const hexagonv55 = Cpu{
pub const hexagonv55 = CpuModel{
.name = "hexagonv55",
.llvm_name = "hexagonv55",
.features = featureSet(&[_]Feature{
@ -228,7 +229,7 @@ pub const cpu = struct {
.v55,
}),
};
pub const hexagonv60 = Cpu{
pub const hexagonv60 = CpuModel{
.name = "hexagonv60",
.llvm_name = "hexagonv60",
.features = featureSet(&[_]Feature{
@ -243,7 +244,7 @@ pub const cpu = struct {
.v60,
}),
};
pub const hexagonv62 = Cpu{
pub const hexagonv62 = CpuModel{
.name = "hexagonv62",
.llvm_name = "hexagonv62",
.features = featureSet(&[_]Feature{
@ -259,7 +260,7 @@ pub const cpu = struct {
.v62,
}),
};
pub const hexagonv65 = Cpu{
pub const hexagonv65 = CpuModel{
.name = "hexagonv65",
.llvm_name = "hexagonv65",
.features = featureSet(&[_]Feature{
@ -277,7 +278,7 @@ pub const cpu = struct {
.v65,
}),
};
pub const hexagonv66 = Cpu{
pub const hexagonv66 = CpuModel{
.name = "hexagonv66",
.llvm_name = "hexagonv66",
.features = featureSet(&[_]Feature{
@ -301,7 +302,7 @@ pub const cpu = struct {
/// All hexagon CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.generic,
&cpu.hexagonv5,
&cpu.hexagonv55,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
abs2008,
@ -55,12 +56,12 @@ pub const Feature = enum {
xgot,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.abs2008)] = .{
.llvm_name = "abs2008",
.description = "Disable IEEE 754-2008 abs.fmt mode",
@ -386,119 +387,119 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.mips32,
}),
};
pub const mips1 = Cpu{
pub const mips1 = CpuModel{
.name = "mips1",
.llvm_name = "mips1",
.features = featureSet(&[_]Feature{
.mips1,
}),
};
pub const mips2 = Cpu{
pub const mips2 = CpuModel{
.name = "mips2",
.llvm_name = "mips2",
.features = featureSet(&[_]Feature{
.mips2,
}),
};
pub const mips3 = Cpu{
pub const mips3 = CpuModel{
.name = "mips3",
.llvm_name = "mips3",
.features = featureSet(&[_]Feature{
.mips3,
}),
};
pub const mips32 = Cpu{
pub const mips32 = CpuModel{
.name = "mips32",
.llvm_name = "mips32",
.features = featureSet(&[_]Feature{
.mips32,
}),
};
pub const mips32r2 = Cpu{
pub const mips32r2 = CpuModel{
.name = "mips32r2",
.llvm_name = "mips32r2",
.features = featureSet(&[_]Feature{
.mips32r2,
}),
};
pub const mips32r3 = Cpu{
pub const mips32r3 = CpuModel{
.name = "mips32r3",
.llvm_name = "mips32r3",
.features = featureSet(&[_]Feature{
.mips32r3,
}),
};
pub const mips32r5 = Cpu{
pub const mips32r5 = CpuModel{
.name = "mips32r5",
.llvm_name = "mips32r5",
.features = featureSet(&[_]Feature{
.mips32r5,
}),
};
pub const mips32r6 = Cpu{
pub const mips32r6 = CpuModel{
.name = "mips32r6",
.llvm_name = "mips32r6",
.features = featureSet(&[_]Feature{
.mips32r6,
}),
};
pub const mips4 = Cpu{
pub const mips4 = CpuModel{
.name = "mips4",
.llvm_name = "mips4",
.features = featureSet(&[_]Feature{
.mips4,
}),
};
pub const mips5 = Cpu{
pub const mips5 = CpuModel{
.name = "mips5",
.llvm_name = "mips5",
.features = featureSet(&[_]Feature{
.mips5,
}),
};
pub const mips64 = Cpu{
pub const mips64 = CpuModel{
.name = "mips64",
.llvm_name = "mips64",
.features = featureSet(&[_]Feature{
.mips64,
}),
};
pub const mips64r2 = Cpu{
pub const mips64r2 = CpuModel{
.name = "mips64r2",
.llvm_name = "mips64r2",
.features = featureSet(&[_]Feature{
.mips64r2,
}),
};
pub const mips64r3 = Cpu{
pub const mips64r3 = CpuModel{
.name = "mips64r3",
.llvm_name = "mips64r3",
.features = featureSet(&[_]Feature{
.mips64r3,
}),
};
pub const mips64r5 = Cpu{
pub const mips64r5 = CpuModel{
.name = "mips64r5",
.llvm_name = "mips64r5",
.features = featureSet(&[_]Feature{
.mips64r5,
}),
};
pub const mips64r6 = Cpu{
pub const mips64r6 = CpuModel{
.name = "mips64r6",
.llvm_name = "mips64r6",
.features = featureSet(&[_]Feature{
.mips64r6,
}),
};
pub const octeon = Cpu{
pub const octeon = CpuModel{
.name = "octeon",
.llvm_name = "octeon",
.features = featureSet(&[_]Feature{
@ -506,7 +507,7 @@ pub const cpu = struct {
.mips64r2,
}),
};
pub const @"octeon+" = Cpu{
pub const @"octeon+" = CpuModel{
.name = "octeon+",
.llvm_name = "octeon+",
.features = featureSet(&[_]Feature{
@ -515,7 +516,7 @@ pub const cpu = struct {
.mips64r2,
}),
};
pub const p5600 = Cpu{
pub const p5600 = CpuModel{
.name = "p5600",
.llvm_name = "p5600",
.features = featureSet(&[_]Feature{
@ -527,7 +528,7 @@ pub const cpu = struct {
/// All mips CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.generic,
&cpu.mips1,
&cpu.mips2,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
ext,
@ -8,12 +9,12 @@ pub const Feature = enum {
hwmultf5,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.ext)] = .{
.llvm_name = "ext",
.description = "Enable MSP430-X extensions",
@ -43,17 +44,17 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{}),
};
pub const msp430 = Cpu{
pub const msp430 = CpuModel{
.name = "msp430",
.llvm_name = "msp430",
.features = featureSet(&[_]Feature{}),
};
pub const msp430x = Cpu{
pub const msp430x = CpuModel{
.name = "msp430x",
.llvm_name = "msp430x",
.features = featureSet(&[_]Feature{
@ -65,7 +66,7 @@ pub const cpu = struct {
/// All msp430 CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.generic,
&cpu.msp430,
&cpu.msp430x,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
ptx32,
@ -29,12 +30,12 @@ pub const Feature = enum {
sm_75,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.ptx32)] = .{
.llvm_name = "ptx32",
.description = "Use PTX version 3.2",
@ -169,28 +170,28 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const sm_20 = Cpu{
pub const sm_20 = CpuModel{
.name = "sm_20",
.llvm_name = "sm_20",
.features = featureSet(&[_]Feature{
.sm_20,
}),
};
pub const sm_21 = Cpu{
pub const sm_21 = CpuModel{
.name = "sm_21",
.llvm_name = "sm_21",
.features = featureSet(&[_]Feature{
.sm_21,
}),
};
pub const sm_30 = Cpu{
pub const sm_30 = CpuModel{
.name = "sm_30",
.llvm_name = "sm_30",
.features = featureSet(&[_]Feature{
.sm_30,
}),
};
pub const sm_32 = Cpu{
pub const sm_32 = CpuModel{
.name = "sm_32",
.llvm_name = "sm_32",
.features = featureSet(&[_]Feature{
@ -198,14 +199,14 @@ pub const cpu = struct {
.sm_32,
}),
};
pub const sm_35 = Cpu{
pub const sm_35 = CpuModel{
.name = "sm_35",
.llvm_name = "sm_35",
.features = featureSet(&[_]Feature{
.sm_35,
}),
};
pub const sm_37 = Cpu{
pub const sm_37 = CpuModel{
.name = "sm_37",
.llvm_name = "sm_37",
.features = featureSet(&[_]Feature{
@ -213,7 +214,7 @@ pub const cpu = struct {
.sm_37,
}),
};
pub const sm_50 = Cpu{
pub const sm_50 = CpuModel{
.name = "sm_50",
.llvm_name = "sm_50",
.features = featureSet(&[_]Feature{
@ -221,7 +222,7 @@ pub const cpu = struct {
.sm_50,
}),
};
pub const sm_52 = Cpu{
pub const sm_52 = CpuModel{
.name = "sm_52",
.llvm_name = "sm_52",
.features = featureSet(&[_]Feature{
@ -229,7 +230,7 @@ pub const cpu = struct {
.sm_52,
}),
};
pub const sm_53 = Cpu{
pub const sm_53 = CpuModel{
.name = "sm_53",
.llvm_name = "sm_53",
.features = featureSet(&[_]Feature{
@ -237,7 +238,7 @@ pub const cpu = struct {
.sm_53,
}),
};
pub const sm_60 = Cpu{
pub const sm_60 = CpuModel{
.name = "sm_60",
.llvm_name = "sm_60",
.features = featureSet(&[_]Feature{
@ -245,7 +246,7 @@ pub const cpu = struct {
.sm_60,
}),
};
pub const sm_61 = Cpu{
pub const sm_61 = CpuModel{
.name = "sm_61",
.llvm_name = "sm_61",
.features = featureSet(&[_]Feature{
@ -253,7 +254,7 @@ pub const cpu = struct {
.sm_61,
}),
};
pub const sm_62 = Cpu{
pub const sm_62 = CpuModel{
.name = "sm_62",
.llvm_name = "sm_62",
.features = featureSet(&[_]Feature{
@ -261,7 +262,7 @@ pub const cpu = struct {
.sm_62,
}),
};
pub const sm_70 = Cpu{
pub const sm_70 = CpuModel{
.name = "sm_70",
.llvm_name = "sm_70",
.features = featureSet(&[_]Feature{
@ -269,7 +270,7 @@ pub const cpu = struct {
.sm_70,
}),
};
pub const sm_72 = Cpu{
pub const sm_72 = CpuModel{
.name = "sm_72",
.llvm_name = "sm_72",
.features = featureSet(&[_]Feature{
@ -277,7 +278,7 @@ pub const cpu = struct {
.sm_72,
}),
};
pub const sm_75 = Cpu{
pub const sm_75 = CpuModel{
.name = "sm_75",
.llvm_name = "sm_75",
.features = featureSet(&[_]Feature{
@ -290,7 +291,7 @@ pub const cpu = struct {
/// All nvptx CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.sm_20,
&cpu.sm_21,
&cpu.sm_30,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
@"64bit",
@ -56,12 +57,12 @@ pub const Feature = enum {
vsx,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.@"64bit")] = .{
.llvm_name = "64bit",
.description = "Enable 64-bit instructions",
@ -383,7 +384,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const @"440" = Cpu{
pub const @"440" = CpuModel{
.name = "440",
.llvm_name = "440",
.features = featureSet(&[_]Feature{
@ -395,7 +396,7 @@ pub const cpu = struct {
.msync,
}),
};
pub const @"450" = Cpu{
pub const @"450" = CpuModel{
.name = "450",
.llvm_name = "450",
.features = featureSet(&[_]Feature{
@ -407,21 +408,21 @@ pub const cpu = struct {
.msync,
}),
};
pub const @"601" = Cpu{
pub const @"601" = CpuModel{
.name = "601",
.llvm_name = "601",
.features = featureSet(&[_]Feature{
.fpu,
}),
};
pub const @"602" = Cpu{
pub const @"602" = CpuModel{
.name = "602",
.llvm_name = "602",
.features = featureSet(&[_]Feature{
.fpu,
}),
};
pub const @"603" = Cpu{
pub const @"603" = CpuModel{
.name = "603",
.llvm_name = "603",
.features = featureSet(&[_]Feature{
@ -429,7 +430,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"603e" = Cpu{
pub const @"603e" = CpuModel{
.name = "603e",
.llvm_name = "603e",
.features = featureSet(&[_]Feature{
@ -437,7 +438,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"603ev" = Cpu{
pub const @"603ev" = CpuModel{
.name = "603ev",
.llvm_name = "603ev",
.features = featureSet(&[_]Feature{
@ -445,7 +446,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"604" = Cpu{
pub const @"604" = CpuModel{
.name = "604",
.llvm_name = "604",
.features = featureSet(&[_]Feature{
@ -453,7 +454,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"604e" = Cpu{
pub const @"604e" = CpuModel{
.name = "604e",
.llvm_name = "604e",
.features = featureSet(&[_]Feature{
@ -461,7 +462,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"620" = Cpu{
pub const @"620" = CpuModel{
.name = "620",
.llvm_name = "620",
.features = featureSet(&[_]Feature{
@ -469,7 +470,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"7400" = Cpu{
pub const @"7400" = CpuModel{
.name = "7400",
.llvm_name = "7400",
.features = featureSet(&[_]Feature{
@ -478,7 +479,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"7450" = Cpu{
pub const @"7450" = CpuModel{
.name = "7450",
.llvm_name = "7450",
.features = featureSet(&[_]Feature{
@ -487,7 +488,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"750" = Cpu{
pub const @"750" = CpuModel{
.name = "750",
.llvm_name = "750",
.features = featureSet(&[_]Feature{
@ -495,7 +496,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"970" = Cpu{
pub const @"970" = CpuModel{
.name = "970",
.llvm_name = "970",
.features = featureSet(&[_]Feature{
@ -508,7 +509,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const a2 = Cpu{
pub const a2 = CpuModel{
.name = "a2",
.llvm_name = "a2",
.features = featureSet(&[_]Feature{
@ -533,7 +534,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const a2q = Cpu{
pub const a2q = CpuModel{
.name = "a2q",
.llvm_name = "a2q",
.features = featureSet(&[_]Feature{
@ -559,7 +560,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const e500 = Cpu{
pub const e500 = CpuModel{
.name = "e500",
.llvm_name = "e500",
.features = featureSet(&[_]Feature{
@ -569,7 +570,7 @@ pub const cpu = struct {
.spe,
}),
};
pub const e500mc = Cpu{
pub const e500mc = CpuModel{
.name = "e500mc",
.llvm_name = "e500mc",
.features = featureSet(&[_]Feature{
@ -579,7 +580,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const e5500 = Cpu{
pub const e5500 = CpuModel{
.name = "e5500",
.llvm_name = "e5500",
.features = featureSet(&[_]Feature{
@ -591,7 +592,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const future = Cpu{
pub const future = CpuModel{
.name = "future",
.llvm_name = "future",
.features = featureSet(&[_]Feature{
@ -630,7 +631,7 @@ pub const cpu = struct {
.vsx,
}),
};
pub const g3 = Cpu{
pub const g3 = CpuModel{
.name = "g3",
.llvm_name = "g3",
.features = featureSet(&[_]Feature{
@ -638,7 +639,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const g4 = Cpu{
pub const g4 = CpuModel{
.name = "g4",
.llvm_name = "g4",
.features = featureSet(&[_]Feature{
@ -647,7 +648,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const @"g4+" = Cpu{
pub const @"g4+" = CpuModel{
.name = "g4+",
.llvm_name = "g4+",
.features = featureSet(&[_]Feature{
@ -656,7 +657,7 @@ pub const cpu = struct {
.frsqrte,
}),
};
pub const g5 = Cpu{
pub const g5 = CpuModel{
.name = "g5",
.llvm_name = "g5",
.features = featureSet(&[_]Feature{
@ -669,28 +670,28 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.hard_float,
}),
};
pub const ppc = Cpu{
pub const ppc = CpuModel{
.name = "ppc",
.llvm_name = "ppc",
.features = featureSet(&[_]Feature{
.hard_float,
}),
};
pub const ppc32 = Cpu{
pub const ppc32 = CpuModel{
.name = "ppc32",
.llvm_name = "ppc32",
.features = featureSet(&[_]Feature{
.hard_float,
}),
};
pub const ppc64 = Cpu{
pub const ppc64 = CpuModel{
.name = "ppc64",
.llvm_name = "ppc64",
.features = featureSet(&[_]Feature{
@ -703,7 +704,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const ppc64le = Cpu{
pub const ppc64le = CpuModel{
.name = "ppc64le",
.llvm_name = "ppc64le",
.features = featureSet(&[_]Feature{
@ -739,7 +740,7 @@ pub const cpu = struct {
.vsx,
}),
};
pub const pwr3 = Cpu{
pub const pwr3 = CpuModel{
.name = "pwr3",
.llvm_name = "pwr3",
.features = featureSet(&[_]Feature{
@ -751,7 +752,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr4 = Cpu{
pub const pwr4 = CpuModel{
.name = "pwr4",
.llvm_name = "pwr4",
.features = featureSet(&[_]Feature{
@ -764,7 +765,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr5 = Cpu{
pub const pwr5 = CpuModel{
.name = "pwr5",
.llvm_name = "pwr5",
.features = featureSet(&[_]Feature{
@ -779,7 +780,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr5x = Cpu{
pub const pwr5x = CpuModel{
.name = "pwr5x",
.llvm_name = "pwr5x",
.features = featureSet(&[_]Feature{
@ -795,7 +796,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr6 = Cpu{
pub const pwr6 = CpuModel{
.name = "pwr6",
.llvm_name = "pwr6",
.features = featureSet(&[_]Feature{
@ -815,7 +816,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr6x = Cpu{
pub const pwr6x = CpuModel{
.name = "pwr6x",
.llvm_name = "pwr6x",
.features = featureSet(&[_]Feature{
@ -835,7 +836,7 @@ pub const cpu = struct {
.stfiwx,
}),
};
pub const pwr7 = Cpu{
pub const pwr7 = CpuModel{
.name = "pwr7",
.llvm_name = "pwr7",
.features = featureSet(&[_]Feature{
@ -864,7 +865,7 @@ pub const cpu = struct {
.vsx,
}),
};
pub const pwr8 = Cpu{
pub const pwr8 = CpuModel{
.name = "pwr8",
.llvm_name = "pwr8",
.features = featureSet(&[_]Feature{
@ -900,7 +901,7 @@ pub const cpu = struct {
.vsx,
}),
};
pub const pwr9 = Cpu{
pub const pwr9 = CpuModel{
.name = "pwr9",
.llvm_name = "pwr9",
.features = featureSet(&[_]Feature{
@ -947,7 +948,7 @@ pub const cpu = struct {
/// All powerpc CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.@"440",
&cpu.@"450",
&cpu.@"601",

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
@"64bit",
@ -44,12 +45,12 @@ pub const Feature = enum {
rvc_hints,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.@"64bit")] = .{
.llvm_name = "64bit",
.description = "Implements RV64",
@ -261,7 +262,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const baseline_rv32 = Cpu{
pub const baseline_rv32 = CpuModel{
.name = "baseline_rv32",
.llvm_name = null,
.features = featureSet(&[_]Feature{
@ -273,7 +274,7 @@ pub const cpu = struct {
}),
};
pub const baseline_rv64 = Cpu{
pub const baseline_rv64 = CpuModel{
.name = "baseline_rv64",
.llvm_name = null,
.features = featureSet(&[_]Feature{
@ -286,14 +287,14 @@ pub const cpu = struct {
}),
};
pub const generic_rv32 = Cpu{
pub const generic_rv32 = CpuModel{
.name = "generic_rv32",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.rvc_hints,
}),
};
pub const generic_rv64 = Cpu{
pub const generic_rv64 = CpuModel{
.name = "generic_rv64",
.llvm_name = null,
.features = featureSet(&[_]Feature{
@ -306,7 +307,7 @@ pub const cpu = struct {
/// All riscv CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.baseline_rv32,
&cpu.baseline_rv64,
&cpu.generic_rv32,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
deprecated_v8,
@ -23,12 +24,12 @@ pub const Feature = enum {
vis3,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.deprecated_v8)] = .{
.llvm_name = "deprecated-v8",
.description = "Enable deprecated V8 instructions in V9 mode",
@ -133,7 +134,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const at697e = Cpu{
pub const at697e = CpuModel{
.name = "at697e",
.llvm_name = "at697e",
.features = featureSet(&[_]Feature{
@ -141,7 +142,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const at697f = Cpu{
pub const at697f = CpuModel{
.name = "at697f",
.llvm_name = "at697f",
.features = featureSet(&[_]Feature{
@ -149,17 +150,17 @@ pub const cpu = struct {
.leon,
}),
};
pub const f934 = Cpu{
pub const f934 = CpuModel{
.name = "f934",
.llvm_name = "f934",
.features = featureSet(&[_]Feature{}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{}),
};
pub const gr712rc = Cpu{
pub const gr712rc = CpuModel{
.name = "gr712rc",
.llvm_name = "gr712rc",
.features = featureSet(&[_]Feature{
@ -167,7 +168,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const gr740 = Cpu{
pub const gr740 = CpuModel{
.name = "gr740",
.llvm_name = "gr740",
.features = featureSet(&[_]Feature{
@ -178,19 +179,19 @@ pub const cpu = struct {
.leonpwrpsr,
}),
};
pub const hypersparc = Cpu{
pub const hypersparc = CpuModel{
.name = "hypersparc",
.llvm_name = "hypersparc",
.features = featureSet(&[_]Feature{}),
};
pub const leon2 = Cpu{
pub const leon2 = CpuModel{
.name = "leon2",
.llvm_name = "leon2",
.features = featureSet(&[_]Feature{
.leon,
}),
};
pub const leon3 = Cpu{
pub const leon3 = CpuModel{
.name = "leon3",
.llvm_name = "leon3",
.features = featureSet(&[_]Feature{
@ -198,7 +199,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const leon4 = Cpu{
pub const leon4 = CpuModel{
.name = "leon4",
.llvm_name = "leon4",
.features = featureSet(&[_]Feature{
@ -207,7 +208,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2080 = Cpu{
pub const ma2080 = CpuModel{
.name = "ma2080",
.llvm_name = "ma2080",
.features = featureSet(&[_]Feature{
@ -215,7 +216,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2085 = Cpu{
pub const ma2085 = CpuModel{
.name = "ma2085",
.llvm_name = "ma2085",
.features = featureSet(&[_]Feature{
@ -223,7 +224,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2100 = Cpu{
pub const ma2100 = CpuModel{
.name = "ma2100",
.llvm_name = "ma2100",
.features = featureSet(&[_]Feature{
@ -231,7 +232,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2150 = Cpu{
pub const ma2150 = CpuModel{
.name = "ma2150",
.llvm_name = "ma2150",
.features = featureSet(&[_]Feature{
@ -239,7 +240,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2155 = Cpu{
pub const ma2155 = CpuModel{
.name = "ma2155",
.llvm_name = "ma2155",
.features = featureSet(&[_]Feature{
@ -247,7 +248,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2450 = Cpu{
pub const ma2450 = CpuModel{
.name = "ma2450",
.llvm_name = "ma2450",
.features = featureSet(&[_]Feature{
@ -255,7 +256,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2455 = Cpu{
pub const ma2455 = CpuModel{
.name = "ma2455",
.llvm_name = "ma2455",
.features = featureSet(&[_]Feature{
@ -263,7 +264,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2480 = Cpu{
pub const ma2480 = CpuModel{
.name = "ma2480",
.llvm_name = "ma2480",
.features = featureSet(&[_]Feature{
@ -271,7 +272,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2485 = Cpu{
pub const ma2485 = CpuModel{
.name = "ma2485",
.llvm_name = "ma2485",
.features = featureSet(&[_]Feature{
@ -279,7 +280,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2x5x = Cpu{
pub const ma2x5x = CpuModel{
.name = "ma2x5x",
.llvm_name = "ma2x5x",
.features = featureSet(&[_]Feature{
@ -287,7 +288,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const ma2x8x = Cpu{
pub const ma2x8x = CpuModel{
.name = "ma2x8x",
.llvm_name = "ma2x8x",
.features = featureSet(&[_]Feature{
@ -295,7 +296,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const myriad2 = Cpu{
pub const myriad2 = CpuModel{
.name = "myriad2",
.llvm_name = "myriad2",
.features = featureSet(&[_]Feature{
@ -303,7 +304,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const myriad2_1 = Cpu{
pub const myriad2_1 = CpuModel{
.name = "myriad2_1",
.llvm_name = "myriad2.1",
.features = featureSet(&[_]Feature{
@ -311,7 +312,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const myriad2_2 = Cpu{
pub const myriad2_2 = CpuModel{
.name = "myriad2_2",
.llvm_name = "myriad2.2",
.features = featureSet(&[_]Feature{
@ -319,7 +320,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const myriad2_3 = Cpu{
pub const myriad2_3 = CpuModel{
.name = "myriad2_3",
.llvm_name = "myriad2.3",
.features = featureSet(&[_]Feature{
@ -327,7 +328,7 @@ pub const cpu = struct {
.leon,
}),
};
pub const niagara = Cpu{
pub const niagara = CpuModel{
.name = "niagara",
.llvm_name = "niagara",
.features = featureSet(&[_]Feature{
@ -337,7 +338,7 @@ pub const cpu = struct {
.vis2,
}),
};
pub const niagara2 = Cpu{
pub const niagara2 = CpuModel{
.name = "niagara2",
.llvm_name = "niagara2",
.features = featureSet(&[_]Feature{
@ -348,7 +349,7 @@ pub const cpu = struct {
.vis2,
}),
};
pub const niagara3 = Cpu{
pub const niagara3 = CpuModel{
.name = "niagara3",
.llvm_name = "niagara3",
.features = featureSet(&[_]Feature{
@ -359,7 +360,7 @@ pub const cpu = struct {
.vis2,
}),
};
pub const niagara4 = Cpu{
pub const niagara4 = CpuModel{
.name = "niagara4",
.llvm_name = "niagara4",
.features = featureSet(&[_]Feature{
@ -371,32 +372,32 @@ pub const cpu = struct {
.vis3,
}),
};
pub const sparclet = Cpu{
pub const sparclet = CpuModel{
.name = "sparclet",
.llvm_name = "sparclet",
.features = featureSet(&[_]Feature{}),
};
pub const sparclite = Cpu{
pub const sparclite = CpuModel{
.name = "sparclite",
.llvm_name = "sparclite",
.features = featureSet(&[_]Feature{}),
};
pub const sparclite86x = Cpu{
pub const sparclite86x = CpuModel{
.name = "sparclite86x",
.llvm_name = "sparclite86x",
.features = featureSet(&[_]Feature{}),
};
pub const supersparc = Cpu{
pub const supersparc = CpuModel{
.name = "supersparc",
.llvm_name = "supersparc",
.features = featureSet(&[_]Feature{}),
};
pub const tsc701 = Cpu{
pub const tsc701 = CpuModel{
.name = "tsc701",
.llvm_name = "tsc701",
.features = featureSet(&[_]Feature{}),
};
pub const ultrasparc = Cpu{
pub const ultrasparc = CpuModel{
.name = "ultrasparc",
.llvm_name = "ultrasparc",
.features = featureSet(&[_]Feature{
@ -405,7 +406,7 @@ pub const cpu = struct {
.vis,
}),
};
pub const ultrasparc3 = Cpu{
pub const ultrasparc3 = CpuModel{
.name = "ultrasparc3",
.llvm_name = "ultrasparc3",
.features = featureSet(&[_]Feature{
@ -415,7 +416,7 @@ pub const cpu = struct {
.vis2,
}),
};
pub const ut699 = Cpu{
pub const ut699 = CpuModel{
.name = "ut699",
.llvm_name = "ut699",
.features = featureSet(&[_]Feature{
@ -426,7 +427,7 @@ pub const cpu = struct {
.no_fsmuld,
}),
};
pub const v7 = Cpu{
pub const v7 = CpuModel{
.name = "v7",
.llvm_name = "v7",
.features = featureSet(&[_]Feature{
@ -434,12 +435,12 @@ pub const cpu = struct {
.soft_mul_div,
}),
};
pub const v8 = Cpu{
pub const v8 = CpuModel{
.name = "v8",
.llvm_name = "v8",
.features = featureSet(&[_]Feature{}),
};
pub const v9 = Cpu{
pub const v9 = CpuModel{
.name = "v9",
.llvm_name = "v9",
.features = featureSet(&[_]Feature{
@ -451,7 +452,7 @@ pub const cpu = struct {
/// All sparc CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.at697e,
&cpu.at697f,
&cpu.f934,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
deflate_conversion,
@ -39,12 +40,12 @@ pub const Feature = enum {
vector_packed_decimal_enhancement,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.deflate_conversion)] = .{
.llvm_name = "deflate-conversion",
.description = "Assume that the deflate-conversion facility is installed",
@ -229,7 +230,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const arch10 = Cpu{
pub const arch10 = CpuModel{
.name = "arch10",
.llvm_name = "arch10",
.features = featureSet(&[_]Feature{
@ -252,7 +253,7 @@ pub const cpu = struct {
.transactional_execution,
}),
};
pub const arch11 = Cpu{
pub const arch11 = CpuModel{
.name = "arch11",
.llvm_name = "arch11",
.features = featureSet(&[_]Feature{
@ -280,7 +281,7 @@ pub const cpu = struct {
.vector,
}),
};
pub const arch12 = Cpu{
pub const arch12 = CpuModel{
.name = "arch12",
.llvm_name = "arch12",
.features = featureSet(&[_]Feature{
@ -315,7 +316,7 @@ pub const cpu = struct {
.vector_packed_decimal,
}),
};
pub const arch13 = Cpu{
pub const arch13 = CpuModel{
.name = "arch13",
.llvm_name = "arch13",
.features = featureSet(&[_]Feature{
@ -356,12 +357,12 @@ pub const cpu = struct {
.vector_packed_decimal_enhancement,
}),
};
pub const arch8 = Cpu{
pub const arch8 = CpuModel{
.name = "arch8",
.llvm_name = "arch8",
.features = featureSet(&[_]Feature{}),
};
pub const arch9 = Cpu{
pub const arch9 = CpuModel{
.name = "arch9",
.llvm_name = "arch9",
.features = featureSet(&[_]Feature{
@ -377,17 +378,17 @@ pub const cpu = struct {
.reset_reference_bits_multiple,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{}),
};
pub const z10 = Cpu{
pub const z10 = CpuModel{
.name = "z10",
.llvm_name = "z10",
.features = featureSet(&[_]Feature{}),
};
pub const z13 = Cpu{
pub const z13 = CpuModel{
.name = "z13",
.llvm_name = "z13",
.features = featureSet(&[_]Feature{
@ -415,7 +416,7 @@ pub const cpu = struct {
.vector,
}),
};
pub const z14 = Cpu{
pub const z14 = CpuModel{
.name = "z14",
.llvm_name = "z14",
.features = featureSet(&[_]Feature{
@ -450,7 +451,7 @@ pub const cpu = struct {
.vector_packed_decimal,
}),
};
pub const z15 = Cpu{
pub const z15 = CpuModel{
.name = "z15",
.llvm_name = "z15",
.features = featureSet(&[_]Feature{
@ -491,7 +492,7 @@ pub const cpu = struct {
.vector_packed_decimal_enhancement,
}),
};
pub const z196 = Cpu{
pub const z196 = CpuModel{
.name = "z196",
.llvm_name = "z196",
.features = featureSet(&[_]Feature{
@ -507,7 +508,7 @@ pub const cpu = struct {
.reset_reference_bits_multiple,
}),
};
pub const zEC12 = Cpu{
pub const zEC12 = CpuModel{
.name = "zEC12",
.llvm_name = "zEC12",
.features = featureSet(&[_]Feature{
@ -535,7 +536,7 @@ pub const cpu = struct {
/// All systemz CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.arch10,
&cpu.arch11,
&cpu.arch12,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
atomics,
@ -14,12 +15,12 @@ pub const Feature = enum {
unimplemented_simd128,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.atomics)] = .{
.llvm_name = "atomics",
.description = "Enable Atomics",
@ -81,7 +82,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const bleeding_edge = Cpu{
pub const bleeding_edge = CpuModel{
.name = "bleeding_edge",
.llvm_name = "bleeding-edge",
.features = featureSet(&[_]Feature{
@ -92,12 +93,12 @@ pub const cpu = struct {
.simd128,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{}),
};
pub const mvp = Cpu{
pub const mvp = CpuModel{
.name = "mvp",
.llvm_name = "mvp",
.features = featureSet(&[_]Feature{}),
@ -107,7 +108,7 @@ pub const cpu = struct {
/// All wasm CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.bleeding_edge,
&cpu.generic,
&cpu.mvp,

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
@"3dnow",
@ -129,12 +130,12 @@ pub const Feature = enum {
xsaves,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.@"3dnow")] = .{
.llvm_name = "3dnow",
.description = "Enable 3DNow! instructions",
@ -851,7 +852,7 @@ pub const all_features = blk: {
};
pub const cpu = struct {
pub const amdfam10 = Cpu{
pub const amdfam10 = CpuModel{
.name = "amdfam10",
.llvm_name = "amdfam10",
.features = featureSet(&[_]Feature{
@ -872,7 +873,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon = Cpu{
pub const athlon = CpuModel{
.name = "athlon",
.llvm_name = "athlon",
.features = featureSet(&[_]Feature{
@ -886,7 +887,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon_4 = Cpu{
pub const athlon_4 = CpuModel{
.name = "athlon_4",
.llvm_name = "athlon-4",
.features = featureSet(&[_]Feature{
@ -902,7 +903,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon_fx = Cpu{
pub const athlon_fx = CpuModel{
.name = "athlon_fx",
.llvm_name = "athlon-fx",
.features = featureSet(&[_]Feature{
@ -920,7 +921,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon_mp = Cpu{
pub const athlon_mp = CpuModel{
.name = "athlon_mp",
.llvm_name = "athlon-mp",
.features = featureSet(&[_]Feature{
@ -936,7 +937,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon_tbird = Cpu{
pub const athlon_tbird = CpuModel{
.name = "athlon_tbird",
.llvm_name = "athlon-tbird",
.features = featureSet(&[_]Feature{
@ -950,7 +951,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon_xp = Cpu{
pub const athlon_xp = CpuModel{
.name = "athlon_xp",
.llvm_name = "athlon-xp",
.features = featureSet(&[_]Feature{
@ -966,7 +967,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon64 = Cpu{
pub const athlon64 = CpuModel{
.name = "athlon64",
.llvm_name = "athlon64",
.features = featureSet(&[_]Feature{
@ -984,7 +985,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const athlon64_sse3 = Cpu{
pub const athlon64_sse3 = CpuModel{
.name = "athlon64_sse3",
.llvm_name = "athlon64-sse3",
.features = featureSet(&[_]Feature{
@ -1003,7 +1004,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const atom = Cpu{
pub const atom = CpuModel{
.name = "atom",
.llvm_name = "atom",
.features = featureSet(&[_]Feature{
@ -1028,7 +1029,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const barcelona = Cpu{
pub const barcelona = CpuModel{
.name = "barcelona",
.llvm_name = "barcelona",
.features = featureSet(&[_]Feature{
@ -1049,7 +1050,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const bdver1 = Cpu{
pub const bdver1 = CpuModel{
.name = "bdver1",
.llvm_name = "bdver1",
.features = featureSet(&[_]Feature{
@ -1077,7 +1078,7 @@ pub const cpu = struct {
.xsave,
}),
};
pub const bdver2 = Cpu{
pub const bdver2 = CpuModel{
.name = "bdver2",
.llvm_name = "bdver2",
.features = featureSet(&[_]Feature{
@ -1110,7 +1111,7 @@ pub const cpu = struct {
.xsave,
}),
};
pub const bdver3 = Cpu{
pub const bdver3 = CpuModel{
.name = "bdver3",
.llvm_name = "bdver3",
.features = featureSet(&[_]Feature{
@ -1145,7 +1146,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const bdver4 = Cpu{
pub const bdver4 = CpuModel{
.name = "bdver4",
.llvm_name = "bdver4",
.features = featureSet(&[_]Feature{
@ -1183,7 +1184,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const bonnell = Cpu{
pub const bonnell = CpuModel{
.name = "bonnell",
.llvm_name = "bonnell",
.features = featureSet(&[_]Feature{
@ -1208,7 +1209,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const broadwell = Cpu{
pub const broadwell = CpuModel{
.name = "broadwell",
.llvm_name = "broadwell",
.features = featureSet(&[_]Feature{
@ -1253,7 +1254,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const btver1 = Cpu{
pub const btver1 = CpuModel{
.name = "btver1",
.llvm_name = "btver1",
.features = featureSet(&[_]Feature{
@ -1278,7 +1279,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const btver2 = Cpu{
pub const btver2 = CpuModel{
.name = "btver2",
.llvm_name = "btver2",
.features = featureSet(&[_]Feature{
@ -1313,7 +1314,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const c3 = Cpu{
pub const c3 = CpuModel{
.name = "c3",
.llvm_name = "c3",
.features = featureSet(&[_]Feature{
@ -1323,7 +1324,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const c3_2 = Cpu{
pub const c3_2 = CpuModel{
.name = "c3_2",
.llvm_name = "c3-2",
.features = featureSet(&[_]Feature{
@ -1337,7 +1338,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const cannonlake = Cpu{
pub const cannonlake = CpuModel{
.name = "cannonlake",
.llvm_name = "cannonlake",
.features = featureSet(&[_]Feature{
@ -1397,7 +1398,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const cascadelake = Cpu{
pub const cascadelake = CpuModel{
.name = "cascadelake",
.llvm_name = "cascadelake",
.features = featureSet(&[_]Feature{
@ -1456,7 +1457,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const cooperlake = Cpu{
pub const cooperlake = CpuModel{
.name = "cooperlake",
.llvm_name = "cooperlake",
.features = featureSet(&[_]Feature{
@ -1516,7 +1517,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const core_avx_i = Cpu{
pub const core_avx_i = CpuModel{
.name = "core_avx_i",
.llvm_name = "core-avx-i",
.features = featureSet(&[_]Feature{
@ -1549,7 +1550,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const core_avx2 = Cpu{
pub const core_avx2 = CpuModel{
.name = "core_avx2",
.llvm_name = "core-avx2",
.features = featureSet(&[_]Feature{
@ -1591,7 +1592,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const core2 = Cpu{
pub const core2 = CpuModel{
.name = "core2",
.llvm_name = "core2",
.features = featureSet(&[_]Feature{
@ -1610,7 +1611,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const corei7 = Cpu{
pub const corei7 = CpuModel{
.name = "corei7",
.llvm_name = "corei7",
.features = featureSet(&[_]Feature{
@ -1629,7 +1630,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const corei7_avx = Cpu{
pub const corei7_avx = CpuModel{
.name = "corei7_avx",
.llvm_name = "corei7-avx",
.features = featureSet(&[_]Feature{
@ -1659,7 +1660,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const generic = Cpu{
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
@ -1669,7 +1670,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const geode = Cpu{
pub const geode = CpuModel{
.name = "geode",
.llvm_name = "geode",
.features = featureSet(&[_]Feature{
@ -1680,7 +1681,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const goldmont = Cpu{
pub const goldmont = CpuModel{
.name = "goldmont",
.llvm_name = "goldmont",
.features = featureSet(&[_]Feature{
@ -1717,7 +1718,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const goldmont_plus = Cpu{
pub const goldmont_plus = CpuModel{
.name = "goldmont_plus",
.llvm_name = "goldmont-plus",
.features = featureSet(&[_]Feature{
@ -1756,7 +1757,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const haswell = Cpu{
pub const haswell = CpuModel{
.name = "haswell",
.llvm_name = "haswell",
.features = featureSet(&[_]Feature{
@ -1798,7 +1799,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const _i386 = Cpu{
pub const _i386 = CpuModel{
.name = "_i386",
.llvm_name = "i386",
.features = featureSet(&[_]Feature{
@ -1807,7 +1808,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const _i486 = Cpu{
pub const _i486 = CpuModel{
.name = "_i486",
.llvm_name = "i486",
.features = featureSet(&[_]Feature{
@ -1816,7 +1817,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const _i586 = Cpu{
pub const _i586 = CpuModel{
.name = "_i586",
.llvm_name = "i586",
.features = featureSet(&[_]Feature{
@ -1826,7 +1827,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const _i686 = Cpu{
pub const _i686 = CpuModel{
.name = "_i686",
.llvm_name = "i686",
.features = featureSet(&[_]Feature{
@ -1837,7 +1838,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const icelake_client = Cpu{
pub const icelake_client = CpuModel{
.name = "icelake_client",
.llvm_name = "icelake-client",
.features = featureSet(&[_]Feature{
@ -1906,7 +1907,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const icelake_server = Cpu{
pub const icelake_server = CpuModel{
.name = "icelake_server",
.llvm_name = "icelake-server",
.features = featureSet(&[_]Feature{
@ -1977,7 +1978,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const ivybridge = Cpu{
pub const ivybridge = CpuModel{
.name = "ivybridge",
.llvm_name = "ivybridge",
.features = featureSet(&[_]Feature{
@ -2010,7 +2011,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const k6 = Cpu{
pub const k6 = CpuModel{
.name = "k6",
.llvm_name = "k6",
.features = featureSet(&[_]Feature{
@ -2021,7 +2022,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const k6_2 = Cpu{
pub const k6_2 = CpuModel{
.name = "k6_2",
.llvm_name = "k6-2",
.features = featureSet(&[_]Feature{
@ -2032,7 +2033,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const k6_3 = Cpu{
pub const k6_3 = CpuModel{
.name = "k6_3",
.llvm_name = "k6-3",
.features = featureSet(&[_]Feature{
@ -2043,7 +2044,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const k8 = Cpu{
pub const k8 = CpuModel{
.name = "k8",
.llvm_name = "k8",
.features = featureSet(&[_]Feature{
@ -2061,7 +2062,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const k8_sse3 = Cpu{
pub const k8_sse3 = CpuModel{
.name = "k8_sse3",
.llvm_name = "k8-sse3",
.features = featureSet(&[_]Feature{
@ -2080,7 +2081,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const knl = Cpu{
pub const knl = CpuModel{
.name = "knl",
.llvm_name = "knl",
.features = featureSet(&[_]Feature{
@ -2123,7 +2124,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const knm = Cpu{
pub const knm = CpuModel{
.name = "knm",
.llvm_name = "knm",
.features = featureSet(&[_]Feature{
@ -2167,14 +2168,14 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const lakemont = Cpu{
pub const lakemont = CpuModel{
.name = "lakemont",
.llvm_name = "lakemont",
.features = featureSet(&[_]Feature{
.vzeroupper,
}),
};
pub const nehalem = Cpu{
pub const nehalem = CpuModel{
.name = "nehalem",
.llvm_name = "nehalem",
.features = featureSet(&[_]Feature{
@ -2193,7 +2194,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const nocona = Cpu{
pub const nocona = CpuModel{
.name = "nocona",
.llvm_name = "nocona",
.features = featureSet(&[_]Feature{
@ -2210,7 +2211,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const opteron = Cpu{
pub const opteron = CpuModel{
.name = "opteron",
.llvm_name = "opteron",
.features = featureSet(&[_]Feature{
@ -2228,7 +2229,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const opteron_sse3 = Cpu{
pub const opteron_sse3 = CpuModel{
.name = "opteron_sse3",
.llvm_name = "opteron-sse3",
.features = featureSet(&[_]Feature{
@ -2247,7 +2248,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const penryn = Cpu{
pub const penryn = CpuModel{
.name = "penryn",
.llvm_name = "penryn",
.features = featureSet(&[_]Feature{
@ -2266,7 +2267,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium = Cpu{
pub const pentium = CpuModel{
.name = "pentium",
.llvm_name = "pentium",
.features = featureSet(&[_]Feature{
@ -2276,7 +2277,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium_m = Cpu{
pub const pentium_m = CpuModel{
.name = "pentium_m",
.llvm_name = "pentium-m",
.features = featureSet(&[_]Feature{
@ -2291,7 +2292,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium_mmx = Cpu{
pub const pentium_mmx = CpuModel{
.name = "pentium_mmx",
.llvm_name = "pentium-mmx",
.features = featureSet(&[_]Feature{
@ -2302,7 +2303,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium2 = Cpu{
pub const pentium2 = CpuModel{
.name = "pentium2",
.llvm_name = "pentium2",
.features = featureSet(&[_]Feature{
@ -2316,7 +2317,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium3 = Cpu{
pub const pentium3 = CpuModel{
.name = "pentium3",
.llvm_name = "pentium3",
.features = featureSet(&[_]Feature{
@ -2331,7 +2332,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium3m = Cpu{
pub const pentium3m = CpuModel{
.name = "pentium3m",
.llvm_name = "pentium3m",
.features = featureSet(&[_]Feature{
@ -2346,7 +2347,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium4 = Cpu{
pub const pentium4 = CpuModel{
.name = "pentium4",
.llvm_name = "pentium4",
.features = featureSet(&[_]Feature{
@ -2361,7 +2362,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentium4m = Cpu{
pub const pentium4m = CpuModel{
.name = "pentium4m",
.llvm_name = "pentium4m",
.features = featureSet(&[_]Feature{
@ -2376,7 +2377,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const pentiumpro = Cpu{
pub const pentiumpro = CpuModel{
.name = "pentiumpro",
.llvm_name = "pentiumpro",
.features = featureSet(&[_]Feature{
@ -2388,7 +2389,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const prescott = Cpu{
pub const prescott = CpuModel{
.name = "prescott",
.llvm_name = "prescott",
.features = featureSet(&[_]Feature{
@ -2403,7 +2404,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const sandybridge = Cpu{
pub const sandybridge = CpuModel{
.name = "sandybridge",
.llvm_name = "sandybridge",
.features = featureSet(&[_]Feature{
@ -2433,7 +2434,7 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const silvermont = Cpu{
pub const silvermont = CpuModel{
.name = "silvermont",
.llvm_name = "silvermont",
.features = featureSet(&[_]Feature{
@ -2462,7 +2463,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const skx = Cpu{
pub const skx = CpuModel{
.name = "skx",
.llvm_name = "skx",
.features = featureSet(&[_]Feature{
@ -2520,7 +2521,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const skylake = Cpu{
pub const skylake = CpuModel{
.name = "skylake",
.llvm_name = "skylake",
.features = featureSet(&[_]Feature{
@ -2571,7 +2572,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const skylake_avx512 = Cpu{
pub const skylake_avx512 = CpuModel{
.name = "skylake_avx512",
.llvm_name = "skylake-avx512",
.features = featureSet(&[_]Feature{
@ -2629,7 +2630,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const slm = Cpu{
pub const slm = CpuModel{
.name = "slm",
.llvm_name = "slm",
.features = featureSet(&[_]Feature{
@ -2658,7 +2659,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const tigerlake = Cpu{
pub const tigerlake = CpuModel{
.name = "tigerlake",
.llvm_name = "tigerlake",
.features = featureSet(&[_]Feature{
@ -2731,7 +2732,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const tremont = Cpu{
pub const tremont = CpuModel{
.name = "tremont",
.llvm_name = "tremont",
.features = featureSet(&[_]Feature{
@ -2775,7 +2776,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const westmere = Cpu{
pub const westmere = CpuModel{
.name = "westmere",
.llvm_name = "westmere",
.features = featureSet(&[_]Feature{
@ -2795,7 +2796,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const winchip_c6 = Cpu{
pub const winchip_c6 = CpuModel{
.name = "winchip_c6",
.llvm_name = "winchip-c6",
.features = featureSet(&[_]Feature{
@ -2805,7 +2806,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const winchip2 = Cpu{
pub const winchip2 = CpuModel{
.name = "winchip2",
.llvm_name = "winchip2",
.features = featureSet(&[_]Feature{
@ -2815,7 +2816,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const x86_64 = Cpu{
pub const x86_64 = CpuModel{
.name = "x86_64",
.llvm_name = "x86-64",
.features = featureSet(&[_]Feature{
@ -2833,7 +2834,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const yonah = Cpu{
pub const yonah = CpuModel{
.name = "yonah",
.llvm_name = "yonah",
.features = featureSet(&[_]Feature{
@ -2848,7 +2849,7 @@ pub const cpu = struct {
.x87,
}),
};
pub const znver1 = Cpu{
pub const znver1 = CpuModel{
.name = "znver1",
.llvm_name = "znver1",
.features = featureSet(&[_]Feature{
@ -2893,7 +2894,7 @@ pub const cpu = struct {
.xsaves,
}),
};
pub const znver2 = Cpu{
pub const znver2 = CpuModel{
.name = "znver2",
.llvm_name = "znver2",
.features = featureSet(&[_]Feature{
@ -2946,7 +2947,7 @@ pub const cpu = struct {
/// All x86 CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
pub const all_cpus = &[_]*const CpuModel{
&cpu.amdfam10,
&cpu.athlon,
&cpu.athlon_4,

View File

@ -148,7 +148,7 @@ pub const Thread = struct {
const default_stack_size = 16 * 1024 * 1024;
const Context = @TypeOf(context);
comptime assert(@ArgType(@TypeOf(startFn), 0) == Context);
comptime assert(@typeInfo(@TypeOf(startFn)).Fn.args[0].arg_type.? == Context);
if (builtin.os == builtin.Os.windows) {
const WinThread = struct {
@ -158,7 +158,7 @@ pub const Thread = struct {
};
fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
switch (@typeId(@TypeOf(startFn).ReturnType)) {
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
.Int => {
return startFn(arg);
},
@ -201,7 +201,7 @@ pub const Thread = struct {
fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 {
const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
switch (@typeId(@TypeOf(startFn).ReturnType)) {
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
.Int => {
return startFn(arg);
},

View File

@ -8,6 +8,7 @@ const math = std.math;
pub const epoch = @import("time/epoch.zig");
/// Spurious wakeups are possible and no precision of timing is guaranteed.
/// TODO integrate with evented I/O
pub fn sleep(nanoseconds: u64) void {
if (builtin.os == .windows) {
const ns_per_ms = ns_per_s / ms_per_s;
@ -152,15 +153,9 @@ pub const Timer = struct {
}
/// Reads the timer value since start or the last reset in nanoseconds
pub fn read(self: *Timer) u64 {
pub fn read(self: Timer) u64 {
var clock = clockNative() - self.start_time;
if (builtin.os == .windows) {
return @divFloor(clock * ns_per_s, self.frequency);
}
if (comptime std.Target.current.isDarwin()) {
return @divFloor(clock * self.frequency.numer, self.frequency.denom);
}
return clock;
return self.nativeDurationToNanos(clock);
}
/// Resets the timer value to 0/now.
@ -171,7 +166,7 @@ pub const Timer = struct {
/// Returns the current value of the timer in nanoseconds, then resets it
pub fn lap(self: *Timer) u64 {
var now = clockNative();
var lap_time = self.read();
var lap_time = self.nativeDurationToNanos(now - self.start_time);
self.start_time = now;
return lap_time;
}
@ -187,6 +182,16 @@ pub const Timer = struct {
os.clock_gettime(monotonic_clock_id, &ts) catch unreachable;
return @intCast(u64, ts.tv_sec) * @as(u64, ns_per_s) + @intCast(u64, ts.tv_nsec);
}
fn nativeDurationToNanos(self: Timer, duration: u64) u64 {
if (builtin.os == .windows) {
return @divFloor(duration * ns_per_s, self.frequency);
}
if (comptime std.Target.current.isDarwin()) {
return @divFloor(duration * self.frequency.numer, self.frequency.denom);
}
return duration;
}
};
test "sleep" {

View File

@ -243,7 +243,7 @@ pub const Utf16LeIterator = struct {
pub fn init(s: []const u16) Utf16LeIterator {
return Utf16LeIterator{
.bytes = @sliceToBytes(s),
.bytes = mem.sliceAsBytes(s),
.i = 0,
};
}
@ -496,7 +496,7 @@ pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
test "utf16leToUtf8" {
var utf16le: [2]u16 = undefined;
const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
const utf16le_as_bytes = mem.sliceAsBytes(utf16le[0..]);
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 'A');
@ -606,12 +606,12 @@ test "utf8ToUtf16Le" {
{
const length = try utf8ToUtf16Le(utf16le[0..], "𐐷");
testing.expectEqual(@as(usize, 2), length);
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", @sliceToBytes(utf16le[0..]));
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16le[0..]));
}
{
const length = try utf8ToUtf16Le(utf16le[0..], "\u{10FFFF}");
testing.expectEqual(@as(usize, 2), length);
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", @sliceToBytes(utf16le[0..]));
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16le[0..]));
}
}
@ -619,13 +619,13 @@ test "utf8ToUtf16LeWithNull" {
{
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "𐐷");
defer testing.allocator.free(utf16);
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", @sliceToBytes(utf16[0..]));
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16[0..]));
testing.expect(utf16[2] == 0);
}
{
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "\u{10FFFF}");
defer testing.allocator.free(utf16);
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", @sliceToBytes(utf16[0..]));
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16[0..]));
testing.expect(utf16[2] == 0);
}
}

View File

@ -5,6 +5,7 @@ pub const parse = @import("zig/parse.zig").parse;
pub const parseStringLiteral = @import("zig/parse_string_literal.zig").parseStringLiteral;
pub const render = @import("zig/render.zig").render;
pub const ast = @import("zig/ast.zig");
pub const system = @import("zig/system.zig");
test "std.zig tests" {
_ = @import("zig/ast.zig");

View File

@ -460,10 +460,9 @@ pub const Node = struct {
}
pub fn iterate(base: *Node, index: usize) ?*Node {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
const T = @field(Node, @memberName(Id, i));
inline for (@typeInfo(Id).Enum.fields) |f| {
if (base.id == @field(Id, f.name)) {
const T = @field(Node, f.name);
return @fieldParentPtr(T, "base", base).iterate(index);
}
}
@ -471,10 +470,9 @@ pub const Node = struct {
}
pub fn firstToken(base: *const Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
const T = @field(Node, @memberName(Id, i));
inline for (@typeInfo(Id).Enum.fields) |f| {
if (base.id == @field(Id, f.name)) {
const T = @field(Node, f.name);
return @fieldParentPtr(T, "base", base).firstToken();
}
}
@ -482,10 +480,9 @@ pub const Node = struct {
}
pub fn lastToken(base: *const Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
const T = @field(Node, @memberName(Id, i));
inline for (@typeInfo(Id).Enum.fields) |f| {
if (base.id == @field(Id, f.name)) {
const T = @field(Node, f.name);
return @fieldParentPtr(T, "base", base).lastToken();
}
}
@ -493,10 +490,9 @@ pub const Node = struct {
}
pub fn typeToId(comptime T: type) Id {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (T == @field(Node, @memberName(Id, i))) {
return @field(Id, @memberName(Id, i));
inline for (@typeInfo(Id).Enum.fields) |f| {
if (T == @field(Node, f.name)) {
return @field(Id, f.name);
}
}
unreachable;
@ -1567,7 +1563,9 @@ pub const Node = struct {
pub const Op = union(enum) {
AddressOf,
ArrayType: ArrayInfo,
Await,
Await: struct {
noasync_token: ?TokenIndex = null,
},
BitNot,
BoolNot,
Cancel,
@ -2184,10 +2182,10 @@ pub const Node = struct {
pub fn iterate(self: *Asm, index: usize) ?*Node {
var i = index;
if (i < self.outputs.len) return &self.outputs.at(index).*.base;
if (i < self.outputs.len) return &self.outputs.at(i).*.base;
i -= self.outputs.len;
if (i < self.inputs.len) return &self.inputs.at(index).*.base;
if (i < self.inputs.len) return &self.inputs.at(i).*.base;
i -= self.inputs.len;
return null;

View File

@ -1129,7 +1129,7 @@ fn parseErrorUnionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
/// / KEYWORD_noasync PrimaryTypeExpr SuffixOp* FnCallArguments
/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
var maybe_async = eatAnnotatedToken(it, .Keyword_async) orelse eatAnnotatedToken(it, .Keyword_noasync);
const maybe_async = eatAnnotatedToken(it, .Keyword_async) orelse eatAnnotatedToken(it, .Keyword_noasync);
if (maybe_async) |async_token| {
const token_fn = eatToken(it, .Keyword_fn);
if (async_token.ptr.id == .Keyword_async and token_fn != null) {
@ -2179,7 +2179,19 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
.MinusPercent => ops{ .NegationWrap = {} },
.Ampersand => ops{ .AddressOf = {} },
.Keyword_try => ops{ .Try = {} },
.Keyword_await => ops{ .Await = {} },
.Keyword_await => ops{ .Await = .{} },
.Keyword_noasync => if (eatToken(it, .Keyword_await)) |await_tok| {
const node = try arena.create(Node.PrefixOp);
node.* = Node.PrefixOp{
.op_token = await_tok,
.op = .{ .Await = .{ .noasync_token = token.index } },
.rhs = undefined, // set by caller
};
return &node.base;
} else {
putBackToken(it, token.index);
return null;
},
else => {
putBackToken(it, token.index);
return null;

View File

@ -1,3 +1,12 @@
test "zig fmt: noasync await" {
try testCanonical(
\\fn foo() void {
\\ x = noasync await y;
\\}
\\
);
}
test "zig fmt: trailing comma in container declaration" {
try testCanonical(
\\const X = struct { foo: i32 };
@ -83,10 +92,12 @@ test "zig fmt: convert extern/nakedcc/stdcallcc into callconv(...)" {
\\nakedcc fn foo1() void {}
\\stdcallcc fn foo2() void {}
\\extern fn foo3() void {}
\\extern "mylib" fn foo4() void {}
,
\\fn foo1() callconv(.Naked) void {}
\\fn foo2() callconv(.Stdcall) void {}
\\fn foo3() callconv(.C) void {}
\\fn foo4() callconv(.C) void {}
\\
);
}
@ -1399,7 +1410,7 @@ test "zig fmt: same-line comment after non-block if expression" {
test "zig fmt: same-line comment on comptime expression" {
try testCanonical(
\\test "" {
\\ comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt
\\ comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
\\}
\\
);

Some files were not shown because too many files have changed in this diff Show More