Merge pull request #3856 from ziglang/builtin-call
introduce `@call` and remove other builtin calls
This commit is contained in:
commit
525b1e8fb4
@ -6839,6 +6839,99 @@ async fn func(y: *i32) void {
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@call#}
|
||||
<pre>{#syntax#}@call(options: std.builtin.CallOptions, function: var, args: var) var{#endsyntax#}</pre>
|
||||
<p>
|
||||
Calls a function, in the same way that invoking an expression with parentheses does:
|
||||
</p>
|
||||
{#code_begin|test|call#}
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
test "noinline function call" {
|
||||
assert(@call(.{}, add, .{3, 9}) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
{#code_end#}
|
||||
<p>
|
||||
{#syntax#}@call{#endsyntax#} allows more flexibility than normal function call syntax does. The
|
||||
{#syntax#}CallOptions{#endsyntax#} struct is reproduced here:
|
||||
</p>
|
||||
{#code_begin|syntax#}
|
||||
pub const CallOptions = struct {
|
||||
modifier: Modifier = .auto,
|
||||
stack: ?[]align(std.Target.stack_align) u8 = null,
|
||||
|
||||
pub const Modifier = enum {
|
||||
/// Equivalent to function call syntax.
|
||||
auto,
|
||||
|
||||
/// Prevents tail call optimization. This guarantees that the return
|
||||
/// address will point to the callsite, as opposed to the callsite's
|
||||
/// callsite. If the call is otherwise required to be tail-called
|
||||
/// or inlined, a compile error is emitted instead.
|
||||
never_tail,
|
||||
|
||||
/// Guarantees that the call will not be inlined. If the call is
|
||||
/// otherwise required to be inlined, a compile error is emitted instead.
|
||||
never_inline,
|
||||
|
||||
/// Asserts that the function call will not suspend. This allows a
|
||||
/// non-async function to call an async function.
|
||||
no_async,
|
||||
|
||||
/// Guarantees that the call will be generated with tail call optimization.
|
||||
/// If this is not possible, a compile error is emitted instead.
|
||||
always_tail,
|
||||
|
||||
/// Guarantees that the call will inlined at the callsite.
|
||||
/// If this is not possible, a compile error is emitted instead.
|
||||
always_inline,
|
||||
|
||||
/// Evaluates the call at compile-time. If the call cannot be completed at
|
||||
/// compile-time, a compile error is emitted instead.
|
||||
compile_time,
|
||||
};
|
||||
};
|
||||
{#code_end#}
|
||||
|
||||
{#header_open|Calling with a New Stack#}
|
||||
<p>
|
||||
When the {#syntax#}stack{#endsyntax#} option is provided, instead of using the same stack as the caller, the function uses the provided stack.
|
||||
</p>
|
||||
{#code_begin|test|new_stack_call#}
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
var new_stack_bytes: [1024]u8 align(16) = undefined;
|
||||
|
||||
test "calling a function with a new stack" {
|
||||
const arg = 1234;
|
||||
|
||||
const a = @call(.{.stack = new_stack_bytes[0..512]}, targetFunction, .{arg});
|
||||
const b = @call(.{.stack = new_stack_bytes[512..]}, targetFunction, .{arg});
|
||||
_ = targetFunction(arg);
|
||||
|
||||
assert(arg == 1234);
|
||||
assert(a < b);
|
||||
}
|
||||
|
||||
fn targetFunction(x: i32) usize {
|
||||
assert(x == 1234);
|
||||
|
||||
var local_variable: i32 = 42;
|
||||
const ptr = &local_variable;
|
||||
ptr.* += 1;
|
||||
|
||||
assert(local_variable == 43);
|
||||
return @ptrToInt(ptr);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_close#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@cDefine#}
|
||||
<pre>{#syntax#}@cDefine(comptime name: []u8, value){#endsyntax#}</pre>
|
||||
<p>
|
||||
@ -7424,27 +7517,6 @@ test "@hasDecl" {
|
||||
{#see_also|Compile Variables|@embedFile#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@inlineCall#}
|
||||
<pre>{#syntax#}@inlineCall(function: X, args: ...) Y{#endsyntax#}</pre>
|
||||
<p>
|
||||
This calls a function, in the same way that invoking an expression with parentheses does:
|
||||
</p>
|
||||
{#code_begin|test#}
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
test "inline function call" {
|
||||
assert(@inlineCall(add, 3, 9) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 { return a + b; }
|
||||
{#code_end#}
|
||||
<p>
|
||||
Unlike a normal function call, however, {#syntax#}@inlineCall{#endsyntax#} guarantees that the call
|
||||
will be inlined. If the call cannot be inlined, a compile error is emitted.
|
||||
</p>
|
||||
{#see_also|@noInlineCall#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@intCast#}
|
||||
<pre>{#syntax#}@intCast(comptime DestType: type, int: var) DestType{#endsyntax#}</pre>
|
||||
<p>
|
||||
@ -7602,71 +7674,6 @@ mem.set(u8, dest, c);{#endsyntax#}</pre>
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@newStackCall#}
|
||||
<pre>{#syntax#}@newStackCall(new_stack: []align(target_stack_align) u8, function: var, args: ...) var{#endsyntax#}</pre>
|
||||
<p>
|
||||
This calls a function, in the same way that invoking an expression with parentheses does. However,
|
||||
instead of using the same stack as the caller, the function uses the stack provided in the {#syntax#}new_stack{#endsyntax#}
|
||||
parameter.
|
||||
</p>
|
||||
<p>
|
||||
The new stack must be aligned to {#syntax#}target_stack_align{#endsyntax#} bytes. This is a target-specific
|
||||
number. A safe value that will work on all targets is {#syntax#}16{#endsyntax#}. This value can
|
||||
also be obtained by using {#link|@sizeOf#} on the {#link|@Frame#} type of {#link|Async Functions#}.
|
||||
</p>
|
||||
{#code_begin|test#}
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
var new_stack_bytes: [1024]u8 align(16) = undefined;
|
||||
|
||||
test "calling a function with a new stack" {
|
||||
const arg = 1234;
|
||||
|
||||
const a = @newStackCall(new_stack_bytes[0..512], targetFunction, arg);
|
||||
const b = @newStackCall(new_stack_bytes[512..], targetFunction, arg);
|
||||
_ = targetFunction(arg);
|
||||
|
||||
assert(arg == 1234);
|
||||
assert(a < b);
|
||||
}
|
||||
|
||||
fn targetFunction(x: i32) usize {
|
||||
assert(x == 1234);
|
||||
|
||||
var local_variable: i32 = 42;
|
||||
const ptr = &local_variable;
|
||||
ptr.* += 1;
|
||||
|
||||
assert(local_variable == 43);
|
||||
return @ptrToInt(ptr);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@noInlineCall#}
|
||||
<pre>{#syntax#}@noInlineCall(function: var, args: ...) var{#endsyntax#}</pre>
|
||||
<p>
|
||||
This calls a function, in the same way that invoking an expression with parentheses does:
|
||||
</p>
|
||||
{#code_begin|test#}
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
test "noinline function call" {
|
||||
assert(@noInlineCall(add, 3, 9) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
{#code_end#}
|
||||
<p>
|
||||
Unlike a normal function call, however, {#syntax#}@noInlineCall{#endsyntax#} guarantees that the call
|
||||
will not be inlined. If the call must be inlined, a compile error is emitted.
|
||||
</p>
|
||||
{#see_also|@inlineCall#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@OpaqueType#}
|
||||
<pre>{#syntax#}@OpaqueType() type{#endsyntax#}</pre>
|
||||
<p>
|
||||
|
@ -372,6 +372,44 @@ pub const Version = struct {
|
||||
patch: u32,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const CallOptions = struct {
|
||||
modifier: Modifier = .auto,
|
||||
stack: ?[]align(std.Target.stack_align) u8 = null,
|
||||
|
||||
pub const Modifier = enum {
|
||||
/// Equivalent to function call syntax.
|
||||
auto,
|
||||
|
||||
/// Prevents tail call optimization. This guarantees that the return
|
||||
/// address will point to the callsite, as opposed to the callsite's
|
||||
/// callsite. If the call is otherwise required to be tail-called
|
||||
/// or inlined, a compile error is emitted instead.
|
||||
never_tail,
|
||||
|
||||
/// Guarantees that the call will not be inlined. If the call is
|
||||
/// otherwise required to be inlined, a compile error is emitted instead.
|
||||
never_inline,
|
||||
|
||||
/// Asserts that the function call will not suspend. This allows a
|
||||
/// non-async function to call an async function.
|
||||
no_async,
|
||||
|
||||
/// Guarantees that the call will be generated with tail call optimization.
|
||||
/// If this is not possible, a compile error is emitted instead.
|
||||
always_tail,
|
||||
|
||||
/// Guarantees that the call will inlined at the callsite.
|
||||
/// If this is not possible, a compile error is emitted instead.
|
||||
always_inline,
|
||||
|
||||
/// Evaluates the call at compile-time. If the call cannot be completed at
|
||||
/// compile-time, a compile error is emitted instead.
|
||||
compile_time,
|
||||
};
|
||||
};
|
||||
|
||||
/// This function type is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn;
|
||||
|
@ -92,7 +92,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
|
||||
// Help the optimizer see that hashing an int is easy by inlining!
|
||||
// TODO Check if the situation is better after #561 is resolved.
|
||||
.Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
|
||||
.Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
|
||||
|
||||
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
|
||||
|
||||
@ -101,7 +101,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
.ErrorSet => hash(hasher, @errorToInt(key), strat),
|
||||
.AnyFrame, .Fn => hash(hasher, @ptrToInt(key), strat),
|
||||
|
||||
.Pointer => @inlineCall(hashPointer, hasher, key, strat),
|
||||
.Pointer => @call(.{ .modifier = .always_inline }, hashPointer, .{ hasher, key, strat }),
|
||||
|
||||
.Optional => if (key) |k| hash(hasher, k, strat),
|
||||
|
||||
|
@ -197,7 +197,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn hashLen16(u: u64, v: u64) u64 {
|
||||
return @inlineCall(hash128To64, u, v);
|
||||
return @call(.{ .modifier = .always_inline }, hash128To64, .{ u, v });
|
||||
}
|
||||
|
||||
fn hashLen16Mul(low: u64, high: u64, mul: u64) u64 {
|
||||
@ -210,7 +210,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn hash128To64(low: u64, high: u64) u64 {
|
||||
return @inlineCall(hashLen16Mul, low, high, 0x9ddfea08eb382d69);
|
||||
return @call(.{ .modifier = .always_inline }, hashLen16Mul, .{ low, high, 0x9ddfea08eb382d69 });
|
||||
}
|
||||
|
||||
fn hashLen0To16(str: []const u8) u64 {
|
||||
@ -291,7 +291,14 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn weakHashLen32WithSeeds(ptr: [*]const u8, a: u64, b: u64) WeakPair {
|
||||
return @inlineCall(weakHashLen32WithSeedsHelper, fetch64(ptr), fetch64(ptr + 8), fetch64(ptr + 16), fetch64(ptr + 24), a, b);
|
||||
return @call(.{ .modifier = .always_inline }, weakHashLen32WithSeedsHelper, .{
|
||||
fetch64(ptr),
|
||||
fetch64(ptr + 8),
|
||||
fetch64(ptr + 16),
|
||||
fetch64(ptr + 24),
|
||||
a,
|
||||
b,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn hash(str: []const u8) u64 {
|
||||
@ -339,7 +346,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
|
||||
return @inlineCall(Self.hashWithSeeds, str, k2, seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeeds, .{ str, k2, seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeeds(str: []const u8, seed0: u64, seed1: u64) u64 {
|
||||
|
@ -8,7 +8,7 @@ pub const Murmur2_32 = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn hash(str: []const u8) u32 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
|
||||
@ -44,7 +44,7 @@ pub const Murmur2_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u32 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
|
||||
@ -64,7 +64,7 @@ pub const Murmur2_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u32 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
|
||||
@ -93,7 +93,7 @@ pub const Murmur2_64 = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn hash(str: []const u8) u64 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
|
||||
@ -127,7 +127,7 @@ pub const Murmur2_64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u64 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u64 {
|
||||
@ -144,7 +144,7 @@ pub const Murmur2_64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u64 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u64 {
|
||||
@ -172,7 +172,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hash(str: []const u8) u32 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
|
||||
@ -220,7 +220,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u32 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
|
||||
@ -246,7 +246,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u32 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
|
||||
|
@ -11,7 +11,7 @@ const testing = std.testing;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const Endian = @import("builtin").Endian;
|
||||
const Endian = std.builtin.Endian;
|
||||
|
||||
pub fn SipHash64(comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
return SipHash(u64, c_rounds, d_rounds);
|
||||
@ -62,7 +62,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
|
||||
var off: usize = 0;
|
||||
while (off < b.len) : (off += 8) {
|
||||
@inlineCall(self.round, b[off .. off + 8]);
|
||||
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 8]});
|
||||
}
|
||||
|
||||
self.msg_len +%= @truncate(u8, b.len);
|
||||
@ -84,9 +84,12 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
self.v2 ^= 0xff;
|
||||
}
|
||||
|
||||
// TODO this is a workaround, should be able to supply the value without a separate variable
|
||||
const inl = std.builtin.CallOptions{ .modifier = .always_inline };
|
||||
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < d_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
const b1 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
@ -98,7 +101,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
|
||||
comptime var j: usize = 0;
|
||||
inline while (j < d_rounds) : (j += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
@ -111,9 +114,11 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
const m = mem.readIntSliceLittle(u64, b[0..]);
|
||||
self.v3 ^= m;
|
||||
|
||||
// TODO this is a workaround, should be able to supply the value without a separate variable
|
||||
const inl = std.builtin.CallOptions{ .modifier = .always_inline };
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < c_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
self.v0 ^= m;
|
||||
@ -140,8 +145,8 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
const aligned_len = input.len - (input.len % 8);
|
||||
|
||||
var c = Self.init(key);
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
|
||||
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ const WyhashStateless = struct {
|
||||
|
||||
var off: usize = 0;
|
||||
while (off < b.len) : (off += 32) {
|
||||
@inlineCall(self.round, b[off .. off + 32]);
|
||||
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
|
||||
}
|
||||
|
||||
self.msg_len += b.len;
|
||||
@ -121,8 +121,8 @@ const WyhashStateless = struct {
|
||||
const aligned_len = input.len - (input.len % 32);
|
||||
|
||||
var c = WyhashStateless.init(seed);
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
|
||||
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -811,7 +811,7 @@ pub const Int = struct {
|
||||
|
||||
var j: usize = 0;
|
||||
while (j < a_lo.len) : (j += 1) {
|
||||
a_lo[j] = @inlineCall(addMulLimbWithCarry, a_lo[j], y[j], xi, &carry);
|
||||
a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry });
|
||||
}
|
||||
|
||||
j = 0;
|
||||
@ -1214,7 +1214,11 @@ pub const Int = struct {
|
||||
const dst_i = src_i + limb_shift;
|
||||
|
||||
const src_digit = a[src_i];
|
||||
r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
|
||||
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
carry = (src_digit << interior_limb_shift);
|
||||
}
|
||||
|
||||
@ -1254,7 +1258,11 @@ pub const Int = struct {
|
||||
|
||||
const src_digit = a[src_i];
|
||||
r[dst_i] = carry | (src_digit >> interior_limb_shift);
|
||||
carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
|
||||
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ pub fn fork() usize {
|
||||
/// the compiler is not aware of how vfork affects control flow and you may
|
||||
/// see different results in optimized builds.
|
||||
pub inline fn vfork() usize {
|
||||
return @inlineCall(syscall0, SYS_vfork);
|
||||
return @call(.{ .modifier = .always_inline }, syscall0, .{SYS_vfork});
|
||||
}
|
||||
|
||||
pub fn futimens(fd: i32, times: *const [2]timespec) usize {
|
||||
|
@ -14,31 +14,31 @@ const ConditionalOperator = enum {
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpeq() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Eq);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Eq});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmplt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Lt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Lt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmple() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Le);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Le});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpge() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Ge);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Ge});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpgt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Gt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Gt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
|
@ -14,31 +14,31 @@ const ConditionalOperator = enum {
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpeq() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Eq);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Eq});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmplt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Lt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Lt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmple() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Le);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Le});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpge() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Ge);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Ge});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpgt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Gt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Gt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,10 @@ pub extern fn __divti3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __divti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__divti3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
test "import divti3" {
|
||||
|
@ -3,19 +3,19 @@ const builtin = @import("builtin");
|
||||
const is_test = builtin.is_test;
|
||||
|
||||
pub extern fn __extendsfdf2(a: f32) f64 {
|
||||
return @inlineCall(extendXfYf2, f64, f32, @bitCast(u32, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extenddftf2(a: f64) f128 {
|
||||
return @inlineCall(extendXfYf2, f128, f64, @bitCast(u64, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extendsftf2(a: f32) f128 {
|
||||
return @inlineCall(extendXfYf2, f128, f32, @bitCast(u32, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extendhfsf2(a: u16) f32 {
|
||||
return @inlineCall(extendXfYf2, f32, f16, a);
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
|
||||
}
|
||||
|
||||
const CHAR_BIT = 8;
|
||||
|
@ -55,17 +55,17 @@ fn floatsiXf(comptime T: type, a: i32) T {
|
||||
|
||||
pub extern fn __floatsisf(arg: i32) f32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f32, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
|
||||
}
|
||||
|
||||
pub extern fn __floatsidf(arg: i32) f64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f64, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
|
||||
}
|
||||
|
||||
pub extern fn __floatsitf(arg: i32) f128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f128, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
|
||||
}
|
||||
|
||||
fn test_one_floatsitf(a: i32, expected: u128) void {
|
||||
|
@ -22,7 +22,10 @@ pub extern fn __modti3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __modti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__modti3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __modti3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
test "import modti3" {
|
||||
|
@ -16,7 +16,10 @@ pub extern fn __multi3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __multi3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__multi3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
fn __mulddi3(a: u64, b: u64) i128 {
|
||||
|
@ -182,25 +182,25 @@ fn win_probe_stack_adjust_sp() void {
|
||||
|
||||
pub nakedcc fn _chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_adjust_sp);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
|
||||
}
|
||||
pub nakedcc fn __chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
switch (builtin.arch) {
|
||||
.i386 => @inlineCall(win_probe_stack_adjust_sp),
|
||||
.x86_64 => @inlineCall(win_probe_stack_only),
|
||||
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
|
||||
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
pub nakedcc fn ___chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_adjust_sp);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
|
||||
}
|
||||
pub nakedcc fn __chkstk_ms() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_only);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
|
||||
}
|
||||
pub nakedcc fn ___chkstk_ms() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_only);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
|
||||
}
|
||||
|
@ -11,5 +11,8 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __umodti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__umodti3, @bitCast(u128, a), @bitCast(u128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __umodti3, .{
|
||||
@bitCast(u128, a),
|
||||
@bitCast(u128, b),
|
||||
}));
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ stdcallcc fn _DllMainCRTStartup(
|
||||
extern fn wasm_freestanding_start() void {
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
_ = @inlineCall(callMain);
|
||||
_ = @call(.{ .modifier = .always_inline }, callMain, .{});
|
||||
}
|
||||
|
||||
extern fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) usize {
|
||||
@ -91,7 +91,7 @@ nakedcc fn _start() noreturn {
|
||||
if (builtin.os == builtin.Os.wasi) {
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
std.os.wasi.proc_exit(@inlineCall(callMain));
|
||||
std.os.wasi.proc_exit(@call(.{ .modifier = .always_inline }, callMain, .{}));
|
||||
}
|
||||
|
||||
switch (builtin.arch) {
|
||||
@ -127,7 +127,7 @@ nakedcc fn _start() noreturn {
|
||||
}
|
||||
// If LLVM inlines stack variables into _start, they will overwrite
|
||||
// the command line argument data.
|
||||
@noInlineCall(posixCallMainAndExit);
|
||||
@call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{});
|
||||
}
|
||||
|
||||
stdcallcc fn WinMainCRTStartup() noreturn {
|
||||
@ -186,10 +186,10 @@ fn posixCallMainAndExit() noreturn {
|
||||
// 0,
|
||||
//) catch @panic("out of memory");
|
||||
//std.os.mprotect(new_stack[0..std.mem.page_size], std.os.PROT_NONE) catch {};
|
||||
//std.os.exit(@newStackCall(new_stack, callMainWithArgs, argc, argv, envp));
|
||||
//std.os.exit(@call(.{.stack = new_stack}, callMainWithArgs, .{argc, argv, envp}));
|
||||
}
|
||||
|
||||
std.os.exit(@inlineCall(callMainWithArgs, argc, argv, envp));
|
||||
std.os.exit(@call(.{ .modifier = .always_inline }, callMainWithArgs, .{ argc, argv, envp }));
|
||||
}
|
||||
|
||||
fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
|
||||
@ -205,7 +205,7 @@ extern fn main(c_argc: i32, c_argv: [*][*:0]u8, c_envp: [*:null]?[*:0]u8) i32 {
|
||||
var env_count: usize = 0;
|
||||
while (c_envp[env_count] != null) : (env_count += 1) {}
|
||||
const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
|
||||
return @inlineCall(callMainWithArgs, @intCast(usize, c_argc), c_argv, envp);
|
||||
return @call(.{ .modifier = .always_inline }, callMainWithArgs, .{ @intCast(usize, c_argc), c_argv, envp });
|
||||
}
|
||||
|
||||
// General error message for a malformed return type
|
||||
@ -235,7 +235,7 @@ inline fn initEventLoopAndCallMain() u8 {
|
||||
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
return @inlineCall(callMain);
|
||||
return @call(.{ .modifier = .always_inline }, callMain, .{});
|
||||
}
|
||||
|
||||
async fn callMainAsync(loop: *std.event.Loop) u8 {
|
||||
|
@ -321,7 +321,7 @@ pub const Inst = struct {
|
||||
}
|
||||
|
||||
const llvm_cc = llvm.CCallConv;
|
||||
const fn_inline = llvm.FnInline.Auto;
|
||||
const call_attr = llvm.CallAttr.Auto;
|
||||
|
||||
return llvm.BuildCall(
|
||||
ofile.builder,
|
||||
@ -329,7 +329,7 @@ pub const Inst = struct {
|
||||
args.ptr,
|
||||
@intCast(c_uint, args.len),
|
||||
llvm_cc,
|
||||
fn_inline,
|
||||
call_attr,
|
||||
"",
|
||||
) orelse error.OutOfMemory;
|
||||
}
|
||||
|
@ -260,10 +260,12 @@ pub const X86StdcallCallConv = c.LLVMX86StdcallCallConv;
|
||||
pub const X86FastcallCallConv = c.LLVMX86FastcallCallConv;
|
||||
pub const CallConv = c.LLVMCallConv;
|
||||
|
||||
pub const FnInline = extern enum {
|
||||
pub const CallAttr = extern enum {
|
||||
Auto,
|
||||
Always,
|
||||
Never,
|
||||
NeverTail,
|
||||
NeverInline,
|
||||
AlwaysTail,
|
||||
AlwaysInline,
|
||||
};
|
||||
|
||||
fn removeNullability(comptime T: type) type {
|
||||
@ -286,6 +288,6 @@ extern fn ZigLLVMTargetMachineEmitToFile(
|
||||
) bool;
|
||||
|
||||
pub const BuildCall = ZigLLVMBuildCall;
|
||||
extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: c_uint, fn_inline: FnInline, Name: [*:0]const u8) ?*Value;
|
||||
extern fn ZigLLVMBuildCall(B: *Builder, Fn: *Value, Args: [*]*Value, NumArgs: c_uint, CC: c_uint, fn_inline: CallAttr, Name: [*:0]const u8) ?*Value;
|
||||
|
||||
pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;
|
||||
|
@ -409,6 +409,9 @@ struct ZigValue {
|
||||
LLVMValueRef llvm_global;
|
||||
|
||||
union {
|
||||
// populated if special == ConstValSpecialLazy
|
||||
LazyValue *x_lazy;
|
||||
|
||||
// populated if special == ConstValSpecialStatic
|
||||
BigInt x_bigint;
|
||||
BigFloat x_bigfloat;
|
||||
@ -429,7 +432,6 @@ struct ZigValue {
|
||||
ConstPtrValue x_ptr;
|
||||
ConstArgTuple x_arg_tuple;
|
||||
Buf *x_enum_literal;
|
||||
LazyValue *x_lazy;
|
||||
|
||||
// populated if special == ConstValSpecialRuntime
|
||||
RuntimeHintErrorUnion rh_error_union;
|
||||
@ -767,11 +769,19 @@ struct AstNodeUnwrapOptional {
|
||||
AstNode *expr;
|
||||
};
|
||||
|
||||
// Must be synchronized with std.builtin.CallOptions.Modifier
|
||||
enum CallModifier {
|
||||
CallModifierNone,
|
||||
CallModifierAsync,
|
||||
CallModifierNeverTail,
|
||||
CallModifierNeverInline,
|
||||
CallModifierNoAsync,
|
||||
CallModifierAlwaysTail,
|
||||
CallModifierAlwaysInline,
|
||||
CallModifierCompileTime,
|
||||
|
||||
// These are additional tags in the compiler, but not exposed in the std lib.
|
||||
CallModifierBuiltin,
|
||||
CallModifierAsync,
|
||||
};
|
||||
|
||||
struct AstNodeFnCallExpr {
|
||||
@ -1692,8 +1702,6 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdFieldParentPtr,
|
||||
BuiltinFnIdByteOffsetOf,
|
||||
BuiltinFnIdBitOffsetOf,
|
||||
BuiltinFnIdInlineCall,
|
||||
BuiltinFnIdNoInlineCall,
|
||||
BuiltinFnIdNewStackCall,
|
||||
BuiltinFnIdAsyncCall,
|
||||
BuiltinFnIdTypeId,
|
||||
@ -1717,6 +1725,7 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdFrameHandle,
|
||||
BuiltinFnIdFrameSize,
|
||||
BuiltinFnIdAs,
|
||||
BuiltinFnIdCall,
|
||||
};
|
||||
|
||||
struct BuiltinFnEntry {
|
||||
@ -2479,6 +2488,8 @@ enum IrInstructionId {
|
||||
IrInstructionIdVarPtr,
|
||||
IrInstructionIdReturnPtr,
|
||||
IrInstructionIdCallSrc,
|
||||
IrInstructionIdCallSrcArgs,
|
||||
IrInstructionIdCallExtra,
|
||||
IrInstructionIdCallGen,
|
||||
IrInstructionIdConst,
|
||||
IrInstructionIdReturn,
|
||||
@ -2886,15 +2897,37 @@ struct IrInstructionCallSrc {
|
||||
ZigFn *fn_entry;
|
||||
size_t arg_count;
|
||||
IrInstruction **args;
|
||||
IrInstruction *ret_ptr;
|
||||
ResultLoc *result_loc;
|
||||
|
||||
IrInstruction *new_stack;
|
||||
|
||||
FnInline fn_inline;
|
||||
CallModifier modifier;
|
||||
|
||||
bool is_async_call_builtin;
|
||||
bool is_comptime;
|
||||
};
|
||||
|
||||
// This is a pass1 instruction, used by @call when the args node is
|
||||
// a tuple or struct literal.
|
||||
struct IrInstructionCallSrcArgs {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *options;
|
||||
IrInstruction *fn_ref;
|
||||
IrInstruction **args_ptr;
|
||||
size_t args_len;
|
||||
ResultLoc *result_loc;
|
||||
};
|
||||
|
||||
// This is a pass1 instruction, used by @call, when the args node
|
||||
// is not a literal.
|
||||
// `args` is expected to be either a struct or a tuple.
|
||||
struct IrInstructionCallExtra {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *options;
|
||||
IrInstruction *fn_ref;
|
||||
IrInstruction *args;
|
||||
ResultLoc *result_loc;
|
||||
};
|
||||
|
||||
struct IrInstructionCallGen {
|
||||
@ -2908,7 +2941,6 @@ struct IrInstructionCallGen {
|
||||
IrInstruction *frame_result_loc;
|
||||
IrInstruction *new_stack;
|
||||
|
||||
FnInline fn_inline;
|
||||
CallModifier modifier;
|
||||
|
||||
bool is_async_call_builtin;
|
||||
|
@ -594,8 +594,11 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
|
||||
if (inferred_struct_field != nullptr) {
|
||||
entry->abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
entry->abi_align = g->builtin_types.entry_usize->abi_align;
|
||||
} else if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
|
||||
if (type_has_bits(child_type)) {
|
||||
entry->abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
@ -956,10 +959,7 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
|
||||
|
||||
ZigType *get_stack_trace_type(CodeGen *g) {
|
||||
if (g->stack_trace_type == nullptr) {
|
||||
ZigValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
|
||||
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
|
||||
|
||||
g->stack_trace_type = stack_trace_type_val->data.x_type;
|
||||
g->stack_trace_type = get_builtin_type(g, "StackTrace");
|
||||
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
|
||||
}
|
||||
return g->stack_trace_type;
|
||||
@ -2717,10 +2717,10 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
|
||||
src_assert(struct_type->data.structure.fields == nullptr, decl_node);
|
||||
struct_type->data.structure.fields = alloc_type_struct_fields(field_count);
|
||||
} else if (decl_node->type == NodeTypeContainerInitExpr) {
|
||||
src_assert(struct_type->data.structure.is_inferred, decl_node);
|
||||
src_assert(struct_type->data.structure.fields != nullptr, decl_node);
|
||||
|
||||
field_count = struct_type->data.structure.src_field_count;
|
||||
|
||||
src_assert(struct_type->data.structure.is_inferred, decl_node);
|
||||
src_assert(field_count == 0 || struct_type->data.structure.fields != nullptr, decl_node);
|
||||
} else zig_unreachable();
|
||||
|
||||
struct_type->data.structure.fields_by_name.init(field_count);
|
||||
@ -7531,6 +7531,12 @@ ZigValue *get_builtin_value(CodeGen *codegen, const char *name) {
|
||||
return var_value;
|
||||
}
|
||||
|
||||
ZigType *get_builtin_type(CodeGen *codegen, const char *name) {
|
||||
ZigValue *type_val = get_builtin_value(codegen, name);
|
||||
assert(type_val->type->id == ZigTypeIdMetaType);
|
||||
return type_val->data.x_type;
|
||||
}
|
||||
|
||||
bool type_is_global_error_set(ZigType *err_set_type) {
|
||||
assert(err_set_type->id == ZigTypeIdErrorSet);
|
||||
assert(!err_set_type->data.error_set.incomplete);
|
||||
|
@ -207,6 +207,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, const char *symbol_name,
|
||||
|
||||
|
||||
ZigValue *get_builtin_value(CodeGen *codegen, const char *name);
|
||||
ZigType *get_builtin_type(CodeGen *codegen, const char *name);
|
||||
ZigType *get_stack_trace_type(CodeGen *g);
|
||||
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
|
||||
|
||||
|
@ -702,14 +702,29 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
|
||||
switch (node->data.fn_call_expr.modifier) {
|
||||
case CallModifierNone:
|
||||
break;
|
||||
case CallModifierBuiltin:
|
||||
fprintf(ar->f, "@");
|
||||
case CallModifierNoAsync:
|
||||
fprintf(ar->f, "noasync ");
|
||||
break;
|
||||
case CallModifierAsync:
|
||||
fprintf(ar->f, "async ");
|
||||
break;
|
||||
case CallModifierNoAsync:
|
||||
fprintf(ar->f, "noasync ");
|
||||
case CallModifierNeverTail:
|
||||
fprintf(ar->f, "notail ");
|
||||
break;
|
||||
case CallModifierNeverInline:
|
||||
fprintf(ar->f, "noinline ");
|
||||
break;
|
||||
case CallModifierAlwaysTail:
|
||||
fprintf(ar->f, "tail ");
|
||||
break;
|
||||
case CallModifierAlwaysInline:
|
||||
fprintf(ar->f, "inline ");
|
||||
break;
|
||||
case CallModifierCompileTime:
|
||||
fprintf(ar->f, "comptime ");
|
||||
break;
|
||||
case CallModifierBuiltin:
|
||||
fprintf(ar->f, "@");
|
||||
break;
|
||||
}
|
||||
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
|
||||
|
@ -981,7 +981,7 @@ static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace
|
||||
msg_arg,
|
||||
stack_trace_arg,
|
||||
};
|
||||
ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, ZigLLVM_FnInlineAuto, "");
|
||||
ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, ZigLLVM_CallAttrAuto, "");
|
||||
if (!stack_trace_is_llvm_alloca) {
|
||||
// The stack trace argument is not in the stack of the caller, so
|
||||
// we'd like to set tail call here, but because slices (the type of msg_arg) are
|
||||
@ -1201,7 +1201,8 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
|
||||
|
||||
LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
|
||||
LLVMValueRef args[] = { err_ret_trace_ptr, return_address };
|
||||
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
|
||||
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAlwaysInline, "");
|
||||
LLVMBuildRetVoid(g->builder);
|
||||
|
||||
LLVMPositionBuilderAtEnd(g->builder, prev_block);
|
||||
@ -1370,13 +1371,13 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
|
||||
err_val,
|
||||
};
|
||||
call_instruction = ZigLLVMBuildCall(g->builder, safety_crash_err_fn, args, 2,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
} else {
|
||||
LLVMValueRef args[] = {
|
||||
err_val,
|
||||
};
|
||||
call_instruction = ZigLLVMBuildCall(g->builder, safety_crash_err_fn, args, 1,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
}
|
||||
if (!is_llvm_alloca) {
|
||||
LLVMSetTailCall(call_instruction, true);
|
||||
@ -2216,7 +2217,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
|
||||
LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
|
||||
LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
|
||||
LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
|
||||
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
|
||||
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAlwaysInline, "");
|
||||
LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
|
||||
LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
|
||||
LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
|
||||
@ -2253,7 +2254,7 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
|
||||
LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope,
|
||||
&is_llvm_alloca);
|
||||
ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
|
||||
ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
|
||||
if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
|
||||
@ -2297,7 +2298,7 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
|
||||
LLVMValueRef arg_val = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
|
||||
LLVMConstInt(usize_type_ref, resume_id, false));
|
||||
LLVMValueRef args[] = {target_frame_ptr, arg_val};
|
||||
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
|
||||
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_CallAttrAuto, "");
|
||||
}
|
||||
|
||||
static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
|
||||
@ -2424,7 +2425,7 @@ static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
|
||||
LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope, &is_llvm_alloca);
|
||||
LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
|
||||
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
}
|
||||
}
|
||||
|
||||
@ -3061,7 +3062,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
|
||||
ZigType *actual_type = cast_instruction->value->value->type;
|
||||
ZigType *wanted_type = cast_instruction->base.value->type;
|
||||
LLVMValueRef expr_val = ir_llvm_value(g, cast_instruction->value);
|
||||
assert(expr_val);
|
||||
ir_assert(expr_val, &cast_instruction->base);
|
||||
|
||||
switch (cast_instruction->cast_op) {
|
||||
case CastOpNoCast:
|
||||
@ -4142,16 +4143,28 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
|
||||
fn_walk.data.call.gen_param_types = &gen_param_types;
|
||||
walk_function_params(g, fn_type, &fn_walk);
|
||||
|
||||
ZigLLVM_FnInline fn_inline;
|
||||
switch (instruction->fn_inline) {
|
||||
case FnInlineAuto:
|
||||
fn_inline = ZigLLVM_FnInlineAuto;
|
||||
ZigLLVM_CallAttr call_attr;
|
||||
switch (instruction->modifier) {
|
||||
case CallModifierBuiltin:
|
||||
case CallModifierCompileTime:
|
||||
zig_unreachable();
|
||||
case CallModifierNone:
|
||||
case CallModifierNoAsync:
|
||||
case CallModifierAsync:
|
||||
call_attr = ZigLLVM_CallAttrAuto;
|
||||
break;
|
||||
case FnInlineAlways:
|
||||
fn_inline = (instruction->fn_entry == nullptr) ? ZigLLVM_FnInlineAuto : ZigLLVM_FnInlineAlways;
|
||||
case CallModifierNeverTail:
|
||||
call_attr = ZigLLVM_CallAttrNeverTail;
|
||||
break;
|
||||
case FnInlineNever:
|
||||
fn_inline = ZigLLVM_FnInlineNever;
|
||||
case CallModifierNeverInline:
|
||||
call_attr = ZigLLVM_CallAttrNeverInline;
|
||||
break;
|
||||
case CallModifierAlwaysTail:
|
||||
call_attr = ZigLLVM_CallAttrAlwaysTail;
|
||||
break;
|
||||
case CallModifierAlwaysInline:
|
||||
ir_assert(instruction->fn_entry != nullptr, &instruction->base);
|
||||
call_attr = ZigLLVM_CallAttrAlwaysInline;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -4257,7 +4270,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
|
||||
|
||||
if (instruction->new_stack == nullptr || instruction->is_async_call_builtin) {
|
||||
result = ZigLLVMBuildCall(g->builder, fn_val,
|
||||
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
|
||||
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, call_attr, "");
|
||||
} else if (instruction->modifier == CallModifierAsync) {
|
||||
zig_panic("TODO @asyncCall of non-async function");
|
||||
} else {
|
||||
@ -4269,7 +4282,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
|
||||
}
|
||||
gen_set_stack_pointer(g, new_stack_addr);
|
||||
result = ZigLLVMBuildCall(g->builder, fn_val,
|
||||
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
|
||||
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, call_attr, "");
|
||||
if (src_return_type->id != ZigTypeIdUnreachable) {
|
||||
LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g);
|
||||
LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, "");
|
||||
@ -4317,8 +4330,17 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa
|
||||
return struct_ptr;
|
||||
}
|
||||
|
||||
ZigType *struct_type = (struct_ptr_type->id == ZigTypeIdPointer) ?
|
||||
struct_ptr_type->data.pointer.child_type : struct_ptr_type;
|
||||
ZigType *struct_type;
|
||||
if (struct_ptr_type->id == ZigTypeIdPointer) {
|
||||
if (struct_ptr_type->data.pointer.inferred_struct_field != nullptr) {
|
||||
struct_type = struct_ptr_type->data.pointer.inferred_struct_field->inferred_struct_type;
|
||||
} else {
|
||||
struct_type = struct_ptr_type->data.pointer.child_type;
|
||||
}
|
||||
} else {
|
||||
struct_type = struct_ptr_type;
|
||||
}
|
||||
|
||||
if ((err = type_resolve(g, struct_type, ResolveStatusLLVMFull)))
|
||||
codegen_report_errors_and_exit(g);
|
||||
|
||||
@ -4947,7 +4969,7 @@ static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable
|
||||
|
||||
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
|
||||
return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable,
|
||||
@ -5903,7 +5925,7 @@ static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_ins
|
||||
LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, source_instr->scope, &is_llvm_alloca);
|
||||
LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
|
||||
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_CallAttrAuto, "");
|
||||
}
|
||||
if (non_async && type_has_bits(result_type)) {
|
||||
LLVMValueRef result_ptr = (result_loc == nullptr) ? their_result_ptr : result_loc;
|
||||
@ -6137,7 +6159,9 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
|
||||
case IrInstructionIdLoadPtr:
|
||||
case IrInstructionIdHasDecl:
|
||||
case IrInstructionIdUndeclaredIdent:
|
||||
case IrInstructionIdCallExtra:
|
||||
case IrInstructionIdCallSrc:
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
case IrInstructionIdAllocaSrc:
|
||||
case IrInstructionIdEndExpr:
|
||||
case IrInstructionIdImplicitCast:
|
||||
@ -8118,8 +8142,6 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
|
||||
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
|
||||
@ -8146,6 +8168,7 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
|
||||
create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1);
|
||||
create_builtin_fn(g, BuiltinFnIdAs, "as", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdCall, "call", 3);
|
||||
}
|
||||
|
||||
static const char *bool_to_str(bool b) {
|
||||
|
665
src/ir.cpp
665
src/ir.cpp
File diff suppressed because it is too large
Load Diff
@ -92,8 +92,12 @@ const char* ir_instruction_type_str(IrInstructionId id) {
|
||||
return "VarPtr";
|
||||
case IrInstructionIdReturnPtr:
|
||||
return "ReturnPtr";
|
||||
case IrInstructionIdCallExtra:
|
||||
return "CallExtra";
|
||||
case IrInstructionIdCallSrc:
|
||||
return "CallSrc";
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
return "CallSrcArgs";
|
||||
case IrInstructionIdCallGen:
|
||||
return "CallGen";
|
||||
case IrInstructionIdConst:
|
||||
@ -636,15 +640,57 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) {
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
static void ir_print_call_extra(IrPrint *irp, IrInstructionCallExtra *instruction) {
|
||||
fprintf(irp->f, "opts=");
|
||||
ir_print_other_instruction(irp, instruction->options);
|
||||
fprintf(irp->f, ", fn=");
|
||||
ir_print_other_instruction(irp, instruction->fn_ref);
|
||||
fprintf(irp->f, ", args=");
|
||||
ir_print_other_instruction(irp, instruction->args);
|
||||
fprintf(irp->f, ", result=");
|
||||
ir_print_result_loc(irp, instruction->result_loc);
|
||||
}
|
||||
|
||||
static void ir_print_call_src_args(IrPrint *irp, IrInstructionCallSrcArgs *instruction) {
|
||||
fprintf(irp->f, "opts=");
|
||||
ir_print_other_instruction(irp, instruction->options);
|
||||
fprintf(irp->f, ", fn=");
|
||||
ir_print_other_instruction(irp, instruction->fn_ref);
|
||||
fprintf(irp->f, ", args=(");
|
||||
for (size_t i = 0; i < instruction->args_len; i += 1) {
|
||||
IrInstruction *arg = instruction->args_ptr[i];
|
||||
if (i != 0)
|
||||
fprintf(irp->f, ", ");
|
||||
ir_print_other_instruction(irp, arg);
|
||||
}
|
||||
fprintf(irp->f, "), result=");
|
||||
ir_print_result_loc(irp, instruction->result_loc);
|
||||
}
|
||||
|
||||
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
|
||||
switch (call_instruction->modifier) {
|
||||
case CallModifierNone:
|
||||
break;
|
||||
case CallModifierNoAsync:
|
||||
fprintf(irp->f, "noasync ");
|
||||
break;
|
||||
case CallModifierAsync:
|
||||
fprintf(irp->f, "async ");
|
||||
break;
|
||||
case CallModifierNoAsync:
|
||||
fprintf(irp->f, "noasync ");
|
||||
case CallModifierNeverTail:
|
||||
fprintf(irp->f, "notail ");
|
||||
break;
|
||||
case CallModifierNeverInline:
|
||||
fprintf(irp->f, "noinline ");
|
||||
break;
|
||||
case CallModifierAlwaysTail:
|
||||
fprintf(irp->f, "tail ");
|
||||
break;
|
||||
case CallModifierAlwaysInline:
|
||||
fprintf(irp->f, "inline ");
|
||||
break;
|
||||
case CallModifierCompileTime:
|
||||
fprintf(irp->f, "comptime ");
|
||||
break;
|
||||
case CallModifierBuiltin:
|
||||
zig_unreachable();
|
||||
@ -670,11 +716,26 @@ static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instructi
|
||||
switch (call_instruction->modifier) {
|
||||
case CallModifierNone:
|
||||
break;
|
||||
case CallModifierNoAsync:
|
||||
fprintf(irp->f, "noasync ");
|
||||
break;
|
||||
case CallModifierAsync:
|
||||
fprintf(irp->f, "async ");
|
||||
break;
|
||||
case CallModifierNoAsync:
|
||||
fprintf(irp->f, "noasync ");
|
||||
case CallModifierNeverTail:
|
||||
fprintf(irp->f, "notail ");
|
||||
break;
|
||||
case CallModifierNeverInline:
|
||||
fprintf(irp->f, "noinline ");
|
||||
break;
|
||||
case CallModifierAlwaysTail:
|
||||
fprintf(irp->f, "tail ");
|
||||
break;
|
||||
case CallModifierAlwaysInline:
|
||||
fprintf(irp->f, "inline ");
|
||||
break;
|
||||
case CallModifierCompileTime:
|
||||
fprintf(irp->f, "comptime ");
|
||||
break;
|
||||
case CallModifierBuiltin:
|
||||
zig_unreachable();
|
||||
@ -2082,9 +2143,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool
|
||||
case IrInstructionIdCast:
|
||||
ir_print_cast(irp, (IrInstructionCast *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallExtra:
|
||||
ir_print_call_extra(irp, (IrInstructionCallExtra *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallSrc:
|
||||
ir_print_call_src(irp, (IrInstructionCallSrc *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
ir_print_call_src_args(irp, (IrInstructionCallSrcArgs *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallGen:
|
||||
ir_print_call_gen(irp, (IrInstructionCallGen *)instruction);
|
||||
break;
|
||||
|
@ -269,19 +269,25 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) {
|
||||
}
|
||||
|
||||
LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
|
||||
unsigned NumArgs, unsigned CC, ZigLLVM_FnInline fn_inline, const char *Name)
|
||||
unsigned NumArgs, unsigned CC, ZigLLVM_CallAttr attr, const char *Name)
|
||||
{
|
||||
CallInst *call_inst = CallInst::Create(unwrap(Fn), makeArrayRef(unwrap(Args), NumArgs), Name);
|
||||
call_inst->setCallingConv(CC);
|
||||
switch (fn_inline) {
|
||||
case ZigLLVM_FnInlineAuto:
|
||||
switch (attr) {
|
||||
case ZigLLVM_CallAttrAuto:
|
||||
break;
|
||||
case ZigLLVM_FnInlineAlways:
|
||||
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::AlwaysInline);
|
||||
case ZigLLVM_CallAttrNeverTail:
|
||||
call_inst->setTailCallKind(CallInst::TCK_NoTail);
|
||||
break;
|
||||
case ZigLLVM_FnInlineNever:
|
||||
case ZigLLVM_CallAttrNeverInline:
|
||||
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
|
||||
break;
|
||||
case ZigLLVM_CallAttrAlwaysTail:
|
||||
call_inst->setTailCallKind(CallInst::TCK_MustTail);
|
||||
break;
|
||||
case ZigLLVM_CallAttrAlwaysInline:
|
||||
call_inst->addAttribute(AttributeList::FunctionIndex, Attribute::AlwaysInline);
|
||||
break;
|
||||
}
|
||||
return wrap(unwrap(B)->Insert(call_inst));
|
||||
}
|
||||
|
@ -64,13 +64,15 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co
|
||||
|
||||
ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref);
|
||||
|
||||
enum ZigLLVM_FnInline {
|
||||
ZigLLVM_FnInlineAuto,
|
||||
ZigLLVM_FnInlineAlways,
|
||||
ZigLLVM_FnInlineNever,
|
||||
enum ZigLLVM_CallAttr {
|
||||
ZigLLVM_CallAttrAuto,
|
||||
ZigLLVM_CallAttrNeverTail,
|
||||
ZigLLVM_CallAttrNeverInline,
|
||||
ZigLLVM_CallAttrAlwaysTail,
|
||||
ZigLLVM_CallAttrAlwaysInline,
|
||||
};
|
||||
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
|
||||
unsigned NumArgs, unsigned CC, enum ZigLLVM_FnInline fn_inline, const char *Name);
|
||||
unsigned NumArgs, unsigned CC, enum ZigLLVM_CallAttr attr, const char *Name);
|
||||
|
||||
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign,
|
||||
LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile);
|
||||
|
@ -2,6 +2,36 @@ const tests = @import("tests.zig");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
cases.add(
|
||||
"bad usage of @call",
|
||||
\\export fn entry1() void {
|
||||
\\ @call(.{}, foo, {});
|
||||
\\}
|
||||
\\export fn entry2() void {
|
||||
\\ comptime @call(.{ .modifier = .never_inline }, foo, .{});
|
||||
\\}
|
||||
\\export fn entry3() void {
|
||||
\\ comptime @call(.{ .modifier = .never_tail }, foo, .{});
|
||||
\\}
|
||||
\\export fn entry4() void {
|
||||
\\ @call(.{ .modifier = .never_inline }, bar, .{});
|
||||
\\}
|
||||
\\export fn entry5(c: bool) void {
|
||||
\\ var baz = if (c) baz1 else baz2;
|
||||
\\ @call(.{ .modifier = .compile_time }, baz, .{});
|
||||
\\}
|
||||
\\fn foo() void {}
|
||||
\\inline fn bar() void {}
|
||||
\\fn baz1() void {}
|
||||
\\fn baz2() void {}
|
||||
,
|
||||
"tmp.zig:2:21: error: expected tuple or struct, found 'void'",
|
||||
"tmp.zig:5:14: error: unable to perform 'never_inline' call at compile-time",
|
||||
"tmp.zig:8:14: error: unable to perform 'never_tail' call at compile-time",
|
||||
"tmp.zig:11:5: error: no-inline call of inline function",
|
||||
"tmp.zig:15:43: error: unable to evaluate constant expression",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
\\export async fn foo() void {}
|
||||
, "tmp.zig:1:1: error: exported function cannot be async");
|
||||
@ -14,13 +44,13 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
);
|
||||
|
||||
cases.addCase(x: {
|
||||
var tc = cases.create("@newStackCall on unsupported target",
|
||||
var tc = cases.create("call with new stack on unsupported target",
|
||||
\\var buf: [10]u8 align(16) = undefined;
|
||||
\\export fn entry() void {
|
||||
\\ var buf: [10]u8 align(16) = undefined;
|
||||
\\ @newStackCall(&buf, foo);
|
||||
\\ @call(.{.stack = &buf}, foo, .{});
|
||||
\\}
|
||||
\\fn foo() void {}
|
||||
, "tmp.zig:3:5: error: target arch 'wasm32' does not support @newStackCall");
|
||||
, "tmp.zig:3:5: error: target arch 'wasm32' does not support calling with a new stack");
|
||||
tc.target = tests.Target{
|
||||
.Cross = tests.CrossTarget{
|
||||
.arch = .wasm32,
|
||||
@ -1927,17 +1957,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:2:12: error: use of undeclared identifier 'SomeNonexistentType'",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"@noInlineCall on an inline function",
|
||||
\\inline fn foo() void {}
|
||||
\\
|
||||
\\export fn entry() void {
|
||||
\\ @noInlineCall(foo);
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:4:5: error: no-inline call of inline function",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"comptime continue inside runtime catch",
|
||||
\\export fn entry(c: bool) void {
|
||||
|
@ -52,6 +52,7 @@ comptime {
|
||||
_ = @import("behavior/bugs/920.zig");
|
||||
_ = @import("behavior/byteswap.zig");
|
||||
_ = @import("behavior/byval_arg_var.zig");
|
||||
_ = @import("behavior/call.zig");
|
||||
_ = @import("behavior/cast.zig");
|
||||
_ = @import("behavior/const_slice_child.zig");
|
||||
_ = @import("behavior/defer.zig");
|
||||
|
48
test/stage1/behavior/call.zig
Normal file
48
test/stage1/behavior/call.zig
Normal file
@ -0,0 +1,48 @@
|
||||
const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
test "basic invocations" {
|
||||
const foo = struct {
|
||||
fn foo() i32 {
|
||||
return 1234;
|
||||
}
|
||||
}.foo;
|
||||
expect(@call(.{}, foo, .{}) == 1234);
|
||||
comptime {
|
||||
// modifiers that allow comptime calls
|
||||
expect(@call(.{}, foo, .{}) == 1234);
|
||||
expect(@call(.{ .modifier = .no_async }, foo, .{}) == 1234);
|
||||
expect(@call(.{ .modifier = .always_tail }, foo, .{}) == 1234);
|
||||
expect(@call(.{ .modifier = .always_inline }, foo, .{}) == 1234);
|
||||
}
|
||||
{
|
||||
// comptime call without comptime keyword
|
||||
const result = @call(.{ .modifier = .compile_time }, foo, .{}) == 1234;
|
||||
comptime expect(result);
|
||||
}
|
||||
}
|
||||
|
||||
test "tuple parameters" {
|
||||
const add = struct {
|
||||
fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
}.add;
|
||||
var a: i32 = 12;
|
||||
var b: i32 = 34;
|
||||
expect(@call(.{}, add, .{ a, 34 }) == 46);
|
||||
expect(@call(.{}, add, .{ 12, b }) == 46);
|
||||
expect(@call(.{}, add, .{ a, b }) == 46);
|
||||
expect(@call(.{}, add, .{ 12, 34 }) == 46);
|
||||
comptime expect(@call(.{}, add, .{ 12, 34 }) == 46);
|
||||
{
|
||||
const separate_args0 = .{ a, b };
|
||||
//TODO const separate_args1 = .{ a, 34 };
|
||||
const separate_args2 = .{ 12, 34 };
|
||||
//TODO const separate_args3 = .{ 12, b };
|
||||
expect(@call(.{ .modifier = .always_inline }, add, separate_args0) == 46);
|
||||
// TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args1) == 46);
|
||||
expect(@call(.{ .modifier = .always_inline }, add, separate_args2) == 46);
|
||||
// TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args3) == 46);
|
||||
}
|
||||
}
|
@ -96,14 +96,6 @@ fn fn4() u32 {
|
||||
return 8;
|
||||
}
|
||||
|
||||
test "inline function call" {
|
||||
expect(@inlineCall(add, 3, 9) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
test "number literal as an argument" {
|
||||
numberLiteralArg(3);
|
||||
comptime numberLiteralArg(3);
|
||||
@ -251,7 +243,7 @@ test "discard the result of a function that returns a struct" {
|
||||
test "function call with anon list literal" {
|
||||
const S = struct {
|
||||
fn doTheTest() void {
|
||||
consumeVec(.{9, 8, 7});
|
||||
consumeVec(.{ 9, 8, 7 });
|
||||
}
|
||||
|
||||
fn consumeVec(vec: [3]f32) void {
|
||||
|
@ -18,8 +18,8 @@ test "calling a function with a new stack" {
|
||||
|
||||
const arg = 1234;
|
||||
|
||||
const a = @newStackCall(new_stack_bytes[0..512], targetFunction, arg);
|
||||
const b = @newStackCall(new_stack_bytes[512..], targetFunction, arg);
|
||||
const a = @call(.{ .stack = new_stack_bytes[0..512] }, targetFunction, .{arg});
|
||||
const b = @call(.{ .stack = new_stack_bytes[512..] }, targetFunction, .{arg});
|
||||
_ = targetFunction(arg);
|
||||
|
||||
expect(arg == 1234);
|
||||
|
Loading…
x
Reference in New Issue
Block a user