Merge pull request #3652 from ziglang/anon-container-lit

implement anonymous struct literals and anonymous list literals
master
Andrew Kelley 2019-11-12 01:40:31 +00:00 committed by GitHub
commit 5502160bd2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 783 additions and 243 deletions

View File

@ -1734,6 +1734,43 @@ test "array initialization with function calls" {
{#code_end#}
{#see_also|for|Slices#}
{#header_open|Anonymous List Literals#}
<p>Similar to {#link|Enum Literals#} and {#link|Anonymous Struct Literals#}
the type can be omitted from array literals:</p>
{#code_begin|test|anon_list#}
const std = @import("std");
const assert = std.debug.assert;
test "anonymous list literal syntax" {
var array: [4]u8 = .{11, 22, 33, 44};
assert(array[0] == 11);
assert(array[1] == 22);
assert(array[2] == 33);
assert(array[3] == 44);
}
{#code_end#}
<p>
If there is no type in the result location then an anonymous list literal actually
turns into a {#link|struct#} with numbered field names:
</p>
{#code_begin|test|infer_list_literal#}
const std = @import("std");
const assert = std.debug.assert;
test "fully anonymous list literal" {
dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi"});
}
fn dump(args: var) void {
assert(args.@"0" == 1234);
assert(args.@"1" == 12.34);
assert(args.@"2");
assert(args.@"3"[0] == 'h');
assert(args.@"3"[1] == 'i');
}
{#code_end#}
{#header_close#}
{#header_open|Multidimensional Arrays#}
<p>
Mutlidimensional arrays can be created by nesting arrays:
@ -2526,7 +2563,8 @@ test "overaligned pointer to packed struct" {
Don't worry, there will be a good solution for this use case in zig.
</p>
{#header_close#}
{#header_open|struct Naming#}
{#header_open|Struct Naming#}
<p>Since all structs are anonymous, Zig infers the type name based on a few rules.</p>
<ul>
<li>If the struct is in the initialization expression of a variable, it gets named after
@ -2552,6 +2590,53 @@ fn List(comptime T: type) type {
}
{#code_end#}
{#header_close#}
{#header_open|Anonymous Struct Literals#}
<p>
Zig allows omitting the struct type of a literal. When the result is {#link|coerced|Type Coercion#},
the struct literal will directly instantiate the result location, with no copy:
</p>
{#code_begin|test|struct_result#}
const std = @import("std");
const assert = std.debug.assert;
const Point = struct {x: i32, y: i32};
test "anonymous struct literal" {
var pt: Point = .{
.x = 13,
.y = 67,
};
assert(pt.x == 13);
assert(pt.y == 67);
}
{#code_end#}
<p>
The struct type can be inferred. Here the result location does not include a type, and
so Zig infers the type:
</p>
{#code_begin|test|struct_anon#}
const std = @import("std");
const assert = std.debug.assert;
test "fully anonymous struct" {
dump(.{
.int = @as(u32, 1234),
.float = @as(f64, 12.34),
.b = true,
.s = "hi",
});
}
fn dump(args: var) void {
assert(args.int == 1234);
assert(args.float == 12.34);
assert(args.b);
assert(args.s[0] == 'h');
assert(args.s[1] == 'i');
}
{#code_end#}
{#header_close#}
{#see_also|comptime|@fieldParentPtr#}
{#header_close#}
{#header_open|enum#}
@ -2906,6 +2991,32 @@ test "@tagName" {
<p>A {#syntax#}packed union{#endsyntax#} has well-defined in-memory layout and is eligible
to be in a {#link|packed struct#}.
{#header_close#}
{#header_open|Anonymous Union Literals#}
<p>{#link|Anonymous Struct Literals#} syntax can be used to initialize unions without specifying
the type:</p>
{#code_begin|test|anon_union#}
const std = @import("std");
const assert = std.debug.assert;
const Number = union {
int: i32,
float: f64,
};
test "anonymous union literal syntax" {
var i: Number = .{.int = 42};
var f = makeNumber();
assert(i.int == 42);
assert(f.float == 12.34);
}
fn makeNumber() Number {
return .{.float = 12.34};
}
{#code_end#}
{#header_close#}
{#header_close#}
{#header_open|blocks#}

View File

@ -90,40 +90,11 @@ pub const Mode = enum {
ReleaseSmall,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const TypeId = enum {
Type,
Void,
Bool,
NoReturn,
Int,
Float,
Pointer,
Array,
Struct,
ComptimeFloat,
ComptimeInt,
Undefined,
Null,
Optional,
ErrorUnion,
ErrorSet,
Enum,
Union,
Fn,
BoundFn,
ArgTuple,
Opaque,
Frame,
AnyFrame,
Vector,
EnumLiteral,
};
pub const TypeId = @TagType(TypeInfo);
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const TypeInfo = union(TypeId) {
pub const TypeInfo = union(enum) {
Type: void,
Void: void,
Bool: void,

View File

@ -1648,10 +1648,15 @@ pub const Node = struct {
pub const SuffixOp = struct {
base: Node,
lhs: *Node,
lhs: Lhs,
op: Op,
rtoken: TokenIndex,
pub const Lhs = union(enum) {
node: *Node,
dot: TokenIndex,
};
pub const Op = union(enum) {
Call: Call,
ArrayAccess: *Node,
@ -1679,8 +1684,13 @@ pub const Node = struct {
pub fn iterate(self: *SuffixOp, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
i -= 1;
switch (self.lhs) {
.node => |node| {
if (i == 0) return node;
i -= 1;
},
.dot => {},
}
switch (self.op) {
.Call => |*call_info| {
@ -1721,7 +1731,10 @@ pub const Node = struct {
.Call => |*call_info| if (call_info.async_token) |async_token| return async_token,
else => {},
}
return self.lhs.firstToken();
switch (self.lhs) {
.node => |node| return node.firstToken(),
.dot => |dot| return dot,
}
}
pub fn lastToken(self: *const SuffixOp) TokenIndex {

View File

@ -1026,16 +1026,16 @@ fn parseWhileExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// CurlySuffixExpr <- TypeExpr InitList?
fn parseCurlySuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const type_expr = (try parseTypeExpr(arena, it, tree)) orelse return null;
const init_list = (try parseInitList(arena, it, tree)) orelse return type_expr;
init_list.cast(Node.SuffixOp).?.lhs = type_expr;
return init_list;
const suffix_op = (try parseInitList(arena, it, tree)) orelse return type_expr;
suffix_op.lhs.node = type_expr;
return &suffix_op.base;
}
/// InitList
/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
/// / LBRACE RBRACE
fn parseInitList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
fn parseInitList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node.SuffixOp {
const lbrace = eatToken(it, .LBrace) orelse return null;
var init_list = Node.SuffixOp.Op.InitList.init(arena);
@ -1064,11 +1064,11 @@ fn parseInitList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = try arena.create(Node.SuffixOp);
node.* = Node.SuffixOp{
.base = Node{ .id = .SuffixOp },
.lhs = undefined, // set by caller
.lhs = .{.node = undefined}, // set by caller
.op = op,
.rtoken = try expectToken(it, tree, .RBrace),
};
return &node.base;
return node;
}
/// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
@ -1117,7 +1117,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
while (try parseSuffixOp(arena, it, tree)) |node| {
switch (node.id) {
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = res,
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = .{.node = res},
.InfixOp => node.cast(Node.InfixOp).?.lhs = res,
else => unreachable,
}
@ -1133,7 +1133,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = try arena.create(Node.SuffixOp);
node.* = Node.SuffixOp{
.base = Node{ .id = .SuffixOp },
.lhs = res,
.lhs = .{.node = res},
.op = Node.SuffixOp.Op{
.Call = Node.SuffixOp.Op.Call{
.params = params.list,
@ -1150,7 +1150,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
while (true) {
if (try parseSuffixOp(arena, it, tree)) |node| {
switch (node.id) {
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = res,
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = .{.node = res},
.InfixOp => node.cast(Node.InfixOp).?.lhs = res,
else => unreachable,
}
@ -1161,7 +1161,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const call = try arena.create(Node.SuffixOp);
call.* = Node.SuffixOp{
.base = Node{ .id = .SuffixOp },
.lhs = res,
.lhs = .{.node = res},
.op = Node.SuffixOp.Op{
.Call = Node.SuffixOp.Op.Call{
.params = params.list,
@ -1215,7 +1215,7 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
return &node.base;
}
if (try parseContainerDecl(arena, it, tree)) |node| return node;
if (try parseEnumLiteral(arena, it, tree)) |node| return node;
if (try parseAnonLiteral(arena, it, tree)) |node| return node;
if (try parseErrorSetDecl(arena, it, tree)) |node| return node;
if (try parseFloatLiteral(arena, it, tree)) |node| return node;
if (try parseFnProto(arena, it, tree)) |node| return node;
@ -1494,16 +1494,28 @@ fn parseAsmExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// DOT IDENTIFIER
fn parseEnumLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
fn parseAnonLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const dot = eatToken(it, .Period) orelse return null;
const name = try expectToken(it, tree, .Identifier);
const node = try arena.create(Node.EnumLiteral);
node.* = Node.EnumLiteral{
.base = Node{ .id = .EnumLiteral },
.dot = dot,
.name = name,
};
return &node.base;
// anon enum literal
if (eatToken(it, .Identifier)) |name| {
const node = try arena.create(Node.EnumLiteral);
node.* = Node.EnumLiteral{
.base = Node{ .id = .EnumLiteral },
.dot = dot,
.name = name,
};
return &node.base;
}
// anon container literal
if (try parseInitList(arena, it, tree)) |node| {
node.lhs = .{.dot = dot};
return &node.base;
}
putBackToken(it, dot);
return null;
}
/// AsmOutput <- COLON AsmOutputList AsmInput?

View File

@ -1,3 +1,20 @@
test "zig fmt: anon struct literal syntax" {
try testCanonical(
\\const x = .{
\\ .a = b,
\\ .c = d,
\\};
\\
);
}
test "zig fmt: anon list literal syntax" {
try testCanonical(
\\const x = .{ a, b, c };
\\
);
}
test "zig fmt: async function" {
try testCanonical(
\\pub const Server = struct {

View File

@ -538,9 +538,9 @@ fn renderExpression(
try renderToken(tree, stream, async_token, indent, start_col, Space.Space);
}
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None);
const lparen = tree.nextToken(suffix_op.lhs.lastToken());
const lparen = tree.nextToken(suffix_op.lhs.node.lastToken());
if (call_info.params.len == 0) {
try renderToken(tree, stream, lparen, indent, start_col, Space.None);
@ -598,7 +598,7 @@ fn renderExpression(
const lbracket = tree.prevToken(index_expr.firstToken());
const rbracket = tree.nextToken(index_expr.lastToken());
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None);
try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
const starts_with_comment = tree.tokens.at(lbracket + 1).id == .LineComment;
@ -616,18 +616,18 @@ fn renderExpression(
},
ast.Node.SuffixOp.Op.Deref => {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None);
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // .*
},
ast.Node.SuffixOp.Op.UnwrapOptional => {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None);
try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // .
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ?
},
@TagType(ast.Node.SuffixOp.Op).Slice => |range| {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs.node, Space.None);
const lbracket = tree.prevToken(range.start.firstToken());
const dotdot = tree.nextToken(range.start.lastToken());
@ -647,10 +647,16 @@ fn renderExpression(
},
ast.Node.SuffixOp.Op.StructInitializer => |*field_inits| {
const lbrace = tree.nextToken(suffix_op.lhs.lastToken());
const lbrace = switch (suffix_op.lhs) {
.dot => |dot| tree.nextToken(dot),
.node => |node| tree.nextToken(node.lastToken()),
};
if (field_inits.len == 0) {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, indent + indent_delta, start_col, Space.None);
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
}
@ -691,7 +697,10 @@ fn renderExpression(
break :blk;
}
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
try renderExpression(allocator, stream, tree, indent, start_col, &field_init.base, Space.Space);
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
@ -699,7 +708,10 @@ fn renderExpression(
if (!src_has_trailing_comma and src_same_line and expr_outputs_one_line) {
// render all on one line, no trailing comma
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
var it = field_inits.iterator(0);
@ -719,7 +731,10 @@ fn renderExpression(
const new_indent = indent + indent_delta;
try renderExpression(allocator, stream, tree, new_indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, new_indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
var it = field_inits.iterator(0);
@ -743,23 +758,35 @@ fn renderExpression(
},
ast.Node.SuffixOp.Op.ArrayInitializer => |*exprs| {
const lbrace = tree.nextToken(suffix_op.lhs.lastToken());
const lbrace = switch (suffix_op.lhs) {
.dot => |dot| tree.nextToken(dot),
.node => |node| tree.nextToken(node.lastToken()),
};
if (exprs.len == 0) {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
}
if (exprs.len == 1 and tree.tokens.at(exprs.at(0).*.lastToken() + 1).id == .RBrace) {
const expr = exprs.at(0).*;
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
}
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
switch (suffix_op.lhs) {
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
.node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
}
// scan to find row size
const maybe_row_size: ?usize = blk: {

View File

@ -1187,10 +1187,22 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b);
static const uint32_t VECTOR_INDEX_NONE = UINT32_MAX;
static const uint32_t VECTOR_INDEX_RUNTIME = UINT32_MAX - 1;
struct InferredStructField {
ZigType *inferred_struct_type;
Buf *field_name;
};
struct ZigTypePointer {
ZigType *child_type;
ZigType *slice_parent;
// Anonymous struct literal syntax uses this when the result location has
// no type in it. This field is null if this pointer does not refer to
// a field of a currently-being-inferred struct type.
// When this is non-null, the pointer is pointing to the base of the inferred
// struct.
InferredStructField *inferred_struct_field;
PtrLen ptr_len;
uint32_t explicit_alignment; // 0 means use ABI alignment
@ -1237,6 +1249,7 @@ struct TypeStructField {
enum ResolveStatus {
ResolveStatusUnstarted,
ResolveStatusInvalid,
ResolveStatusBeingInferred,
ResolveStatusZeroBitsKnown,
ResolveStatusAlignmentKnown,
ResolveStatusSizeKnown,
@ -1285,6 +1298,7 @@ struct ZigTypeStruct {
bool requires_comptime;
bool resolve_loop_flag_zero_bits;
bool resolve_loop_flag_other;
bool is_inferred;
};
struct ZigTypeOptional {
@ -1741,6 +1755,7 @@ struct TypeId {
union {
struct {
ZigType *child_type;
InferredStructField *inferred_struct_field;
PtrLen ptr_len;
uint32_t alignment;
@ -2812,7 +2827,7 @@ struct IrInstructionElemPtr {
IrInstruction *array_ptr;
IrInstruction *elem_index;
IrInstruction *init_array_type;
AstNode *init_array_type_source_node;
PtrLen ptr_len;
bool safety_check_on;
};
@ -2909,11 +2924,11 @@ struct IrInstructionResizeSlice {
struct IrInstructionContainerInitList {
IrInstruction base;
IrInstruction *container_type;
IrInstruction *elem_type;
size_t item_count;
IrInstruction **elem_result_loc_list;
IrInstruction *result_loc;
AstNode *init_array_type_source_node;
};
struct IrInstructionContainerInitFieldsField {
@ -2926,7 +2941,6 @@ struct IrInstructionContainerInitFieldsField {
struct IrInstructionContainerInitFields {
IrInstruction base;
IrInstruction *container_type;
size_t field_count;
IrInstructionContainerInitFieldsField *fields;
IrInstruction *result_loc;

View File

@ -140,7 +140,6 @@ void init_scope(CodeGen *g, Scope *dest, ScopeId id, AstNode *source_node, Scope
static ScopeDecls *create_decls_scope(CodeGen *g, AstNode *node, Scope *parent, ZigType *container_type,
ZigType *import, Buf *bare_name)
{
assert(node == nullptr || node->type == NodeTypeContainerDecl || node->type == NodeTypeFnCallExpr);
ScopeDecls *scope = allocate<ScopeDecls>(1);
init_scope(g, &scope->base, ScopeIdDecls, node, parent);
scope->decl_table.init(4);
@ -346,6 +345,8 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
switch (status) {
case ResolveStatusInvalid:
zig_unreachable();
case ResolveStatusBeingInferred:
zig_unreachable();
case ResolveStatusUnstarted:
case ResolveStatusZeroBitsKnown:
return true;
@ -362,6 +363,8 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
switch (status) {
case ResolveStatusInvalid:
zig_unreachable();
case ResolveStatusBeingInferred:
zig_unreachable();
case ResolveStatusUnstarted:
return true;
case ResolveStatusZeroBitsKnown:
@ -483,7 +486,7 @@ ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn) {
ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment,
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero,
uint32_t vector_index)
uint32_t vector_index, InferredStructField *inferred_struct_field)
{
assert(ptr_len != PtrLenC || allow_zero);
assert(!type_is_invalid(child_type));
@ -506,7 +509,7 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
TypeId type_id = {};
ZigType **parent_pointer = nullptr;
if (host_int_bytes != 0 || is_volatile || byte_alignment != 0 || ptr_len != PtrLenSingle ||
allow_zero || vector_index != VECTOR_INDEX_NONE)
allow_zero || vector_index != VECTOR_INDEX_NONE || inferred_struct_field != nullptr)
{
type_id.id = ZigTypeIdPointer;
type_id.data.pointer.child_type = child_type;
@ -518,6 +521,7 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
type_id.data.pointer.ptr_len = ptr_len;
type_id.data.pointer.allow_zero = allow_zero;
type_id.data.pointer.vector_index = vector_index;
type_id.data.pointer.inferred_struct_field = inferred_struct_field;
auto existing_entry = g->type_table.maybe_get(type_id);
if (existing_entry)
@ -545,8 +549,15 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
}
buf_resize(&entry->name, 0);
if (host_int_bytes == 0 && byte_alignment == 0 && vector_index == VECTOR_INDEX_NONE) {
buf_appendf(&entry->name, "%s%s%s%s%s",
star_str, const_str, volatile_str, allow_zero_str, buf_ptr(&child_type->name));
if (inferred_struct_field == nullptr) {
buf_appendf(&entry->name, "%s%s%s%s%s",
star_str, const_str, volatile_str, allow_zero_str, buf_ptr(&child_type->name));
} else {
buf_appendf(&entry->name, "(%s%s%s%s field '%s' of %s)",
star_str, const_str, volatile_str, allow_zero_str,
buf_ptr(inferred_struct_field->field_name),
buf_ptr(&inferred_struct_field->inferred_struct_type->name));
}
} else if (host_int_bytes == 0 && vector_index == VECTOR_INDEX_NONE) {
buf_appendf(&entry->name, "%salign(%" PRIu32 ") %s%s%s%s", star_str, byte_alignment,
const_str, volatile_str, allow_zero_str, buf_ptr(&child_type->name));
@ -603,6 +614,7 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
entry->data.pointer.host_int_bytes = host_int_bytes;
entry->data.pointer.allow_zero = allow_zero;
entry->data.pointer.vector_index = vector_index;
entry->data.pointer.inferred_struct_field = inferred_struct_field;
if (parent_pointer) {
*parent_pointer = entry;
@ -617,12 +629,12 @@ ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_cons
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero)
{
return get_pointer_to_type_extra2(g, child_type, is_const, is_volatile, ptr_len,
byte_alignment, bit_offset_in_host, host_int_bytes, allow_zero, VECTOR_INDEX_NONE);
byte_alignment, bit_offset_in_host, host_int_bytes, allow_zero, VECTOR_INDEX_NONE, nullptr);
}
ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) {
return get_pointer_to_type_extra2(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false,
VECTOR_INDEX_NONE);
VECTOR_INDEX_NONE, nullptr);
}
ZigType *get_optional_type(CodeGen *g, ZigType *child_type) {
@ -2079,7 +2091,7 @@ static Error resolve_struct_type(CodeGen *g, ZigType *struct_type) {
}
assert(struct_type->data.structure.fields || struct_type->data.structure.src_field_count == 0);
assert(decl_node->type == NodeTypeContainerDecl);
assert(decl_node->type == NodeTypeContainerDecl || decl_node->type == NodeTypeContainerInitExpr);
size_t field_count = struct_type->data.structure.src_field_count;
@ -2667,7 +2679,6 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
return ErrorNone;
AstNode *decl_node = struct_type->data.structure.decl_node;
assert(decl_node->type == NodeTypeContainerDecl);
if (struct_type->data.structure.resolve_loop_flag_zero_bits) {
if (struct_type->data.structure.resolve_status != ResolveStatusInvalid) {
@ -2678,29 +2689,46 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
}
return ErrorSemanticAnalyzeFail;
}
struct_type->data.structure.resolve_loop_flag_zero_bits = true;
assert(!struct_type->data.structure.fields);
size_t field_count = decl_node->data.container_decl.fields.length;
struct_type->data.structure.src_field_count = (uint32_t)field_count;
struct_type->data.structure.fields = allocate<TypeStructField>(field_count);
size_t field_count;
if (decl_node->type == NodeTypeContainerDecl) {
field_count = decl_node->data.container_decl.fields.length;
struct_type->data.structure.src_field_count = (uint32_t)field_count;
src_assert(struct_type->data.structure.fields == nullptr, decl_node);
struct_type->data.structure.fields = allocate<TypeStructField>(field_count);
} else if (decl_node->type == NodeTypeContainerInitExpr) {
src_assert(struct_type->data.structure.is_inferred, decl_node);
src_assert(struct_type->data.structure.fields != nullptr, decl_node);
field_count = struct_type->data.structure.src_field_count;
} else zig_unreachable();
struct_type->data.structure.fields_by_name.init(field_count);
Scope *scope = &struct_type->data.structure.decls_scope->base;
size_t gen_field_index = 0;
for (size_t i = 0; i < field_count; i += 1) {
AstNode *field_node = decl_node->data.container_decl.fields.at(i);
TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
type_struct_field->name = field_node->data.struct_field.name;
type_struct_field->decl_node = field_node;
if (field_node->data.struct_field.type == nullptr) {
add_node_error(g, field_node, buf_sprintf("struct field missing type"));
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
return ErrorSemanticAnalyzeFail;
}
AstNode *field_node;
if (decl_node->type == NodeTypeContainerDecl) {
field_node = decl_node->data.container_decl.fields.at(i);
type_struct_field->name = field_node->data.struct_field.name;
type_struct_field->decl_node = field_node;
if (field_node->data.struct_field.type == nullptr) {
add_node_error(g, field_node, buf_sprintf("struct field missing type"));
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
return ErrorSemanticAnalyzeFail;
}
} else if (decl_node->type == NodeTypeContainerInitExpr) {
field_node = type_struct_field->decl_node;
src_assert(type_struct_field->type_entry != nullptr, field_node);
} else zig_unreachable();
auto field_entry = struct_type->data.structure.fields_by_name.put_unique(type_struct_field->name, type_struct_field);
if (field_entry != nullptr) {
@ -2711,16 +2739,21 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
return ErrorSemanticAnalyzeFail;
}
ConstExprValue *field_type_val = analyze_const_value(g, scope,
field_node->data.struct_field.type, g->builtin_types.entry_type, nullptr, LazyOkNoUndef);
if (type_is_invalid(field_type_val->type)) {
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
return ErrorSemanticAnalyzeFail;
}
assert(field_type_val->special != ConstValSpecialRuntime);
type_struct_field->type_val = field_type_val;
if (struct_type->data.structure.resolve_status == ResolveStatusInvalid)
return ErrorSemanticAnalyzeFail;
ConstExprValue *field_type_val;
if (decl_node->type == NodeTypeContainerDecl) {
field_type_val = analyze_const_value(g, scope,
field_node->data.struct_field.type, g->builtin_types.entry_type, nullptr, LazyOkNoUndef);
if (type_is_invalid(field_type_val->type)) {
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
return ErrorSemanticAnalyzeFail;
}
assert(field_type_val->special != ConstValSpecialRuntime);
type_struct_field->type_val = field_type_val;
if (struct_type->data.structure.resolve_status == ResolveStatusInvalid)
return ErrorSemanticAnalyzeFail;
} else if (decl_node->type == NodeTypeContainerInitExpr) {
field_type_val = type_struct_field->type_val;
} else zig_unreachable();
bool field_is_opaque_type;
if ((err = type_val_resolve_is_opaque_type(g, field_type_val, &field_is_opaque_type))) {
@ -2804,7 +2837,7 @@ static Error resolve_struct_alignment(CodeGen *g, ZigType *struct_type) {
}
struct_type->data.structure.resolve_loop_flag_other = true;
assert(decl_node->type == NodeTypeContainerDecl);
assert(decl_node->type == NodeTypeContainerDecl || decl_node->type == NodeTypeContainerInitExpr);
size_t field_count = struct_type->data.structure.src_field_count;
bool packed = struct_type->data.structure.layout == ContainerLayoutPacked;
@ -2814,7 +2847,8 @@ static Error resolve_struct_alignment(CodeGen *g, ZigType *struct_type) {
if (field->gen_index == SIZE_MAX)
continue;
AstNode *align_expr = field->decl_node->data.struct_field.align_expr;
AstNode *align_expr = (field->decl_node->type == NodeTypeStructField) ?
field->decl_node->data.struct_field.align_expr : nullptr;
if (align_expr != nullptr) {
if (!analyze_const_align(g, &struct_type->data.structure.decls_scope->base, align_expr,
&field->align))
@ -5413,6 +5447,12 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
if (type_entry->one_possible_value != OnePossibleValueInvalid)
return type_entry->one_possible_value;
if (type_entry->id == ZigTypeIdStruct &&
type_entry->data.structure.resolve_status == ResolveStatusBeingInferred)
{
return OnePossibleValueNo;
}
Error err;
if ((err = type_resolve(g, type_entry, ResolveStatusZeroBitsKnown)))
return OnePossibleValueInvalid;
@ -6132,6 +6172,8 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
continue;
if (instruction->ref_count == 0)
continue;
if ((err = type_resolve(g, instruction->value.type, ResolveStatusZeroBitsKnown)))
return ErrorSemanticAnalyzeFail;
if (!type_has_bits(instruction->value.type))
continue;
if (scope_needs_spill(instruction->scope)) {
@ -6271,6 +6313,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
switch (status) {
case ResolveStatusUnstarted:
return ErrorNone;
case ResolveStatusBeingInferred:
zig_unreachable();
case ResolveStatusInvalid:
zig_unreachable();
case ResolveStatusZeroBitsKnown:
@ -6995,7 +7039,16 @@ bool type_id_eql(TypeId a, TypeId b) {
a.data.pointer.alignment == b.data.pointer.alignment &&
a.data.pointer.bit_offset_in_host == b.data.pointer.bit_offset_in_host &&
a.data.pointer.vector_index == b.data.pointer.vector_index &&
a.data.pointer.host_int_bytes == b.data.pointer.host_int_bytes;
a.data.pointer.host_int_bytes == b.data.pointer.host_int_bytes &&
(
a.data.pointer.inferred_struct_field == b.data.pointer.inferred_struct_field ||
(a.data.pointer.inferred_struct_field != nullptr &&
b.data.pointer.inferred_struct_field != nullptr &&
a.data.pointer.inferred_struct_field->inferred_struct_type ==
b.data.pointer.inferred_struct_field->inferred_struct_type &&
buf_eql_buf(a.data.pointer.inferred_struct_field->field_name,
b.data.pointer.inferred_struct_field->field_name))
);
case ZigTypeIdArray:
return a.data.array.child_type == b.data.array.child_type &&
a.data.array.size == b.data.array.size;
@ -7808,7 +7861,6 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
ZigLLVMDIScope *di_scope;
unsigned line;
if (decl_node != nullptr) {
assert(decl_node->type == NodeTypeContainerDecl);
Scope *scope = &struct_type->data.structure.decls_scope->base;
ZigType *import = get_scope_import(scope);
di_file = import->data.structure.root_struct->di_file;
@ -8011,7 +8063,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
unsigned line;
if (decl_node != nullptr) {
AstNode *field_node = decl_node->data.container_decl.fields.at(i);
AstNode *field_node = field->decl_node;
line = field_node->line + 1;
} else {
line = 0;
@ -8307,12 +8359,12 @@ static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type, ResolveStatus
if (type->data.pointer.vector_index == VECTOR_INDEX_NONE) {
peer_type = get_pointer_to_type_extra2(g, elem_type, false, false,
PtrLenSingle, 0, 0, type->data.pointer.host_int_bytes, false,
VECTOR_INDEX_NONE);
VECTOR_INDEX_NONE, nullptr);
} else {
uint32_t host_vec_len = type->data.pointer.host_int_bytes;
ZigType *host_vec_type = get_vector_type(g, host_vec_len, elem_type);
peer_type = get_pointer_to_type_extra2(g, host_vec_type, false, false,
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE);
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE, nullptr);
}
type->llvm_type = get_llvm_type(g, peer_type);
type->llvm_di_type = get_llvm_di_type(g, peer_type);
@ -9038,4 +9090,3 @@ Error analyze_import(CodeGen *g, ZigType *source_import, Buf *import_target_str,
*out_import = add_source_file(g, target_package, resolved_path, import_code, source_kind);
return ErrorNone;
}

View File

@ -24,7 +24,7 @@ ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type,
ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type,
bool is_const, bool is_volatile, PtrLen ptr_len,
uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count,
bool allow_zero, uint32_t vector_index);
bool allow_zero, uint32_t vector_index, InferredStructField *inferred_struct_field);
uint64_t type_size(CodeGen *g, ZigType *type_entry);
uint64_t type_size_bits(CodeGen *g, ZigType *type_entry);
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);

View File

@ -821,7 +821,9 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
break;
}
case NodeTypeContainerInitExpr:
render_node_ungrouped(ar, node->data.container_init_expr.type);
if (node->data.container_init_expr.type != nullptr) {
render_node_ungrouped(ar, node->data.container_init_expr.type);
}
if (node->data.container_init_expr.kind == ContainerInitKindStruct) {
fprintf(ar->f, "{\n");
ar->indent += ar->indent_size;

View File

@ -202,6 +202,10 @@ static Buf *get_anon_type_name(CodeGen *codegen, IrExecutable *exec, const char
Scope *scope, AstNode *source_node, Buf *out_bare_name);
static ResultLocCast *ir_build_cast_result_loc(IrBuilder *irb, IrInstruction *dest_type,
ResultLoc *parent_result_loc);
static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction *source_instr,
TypeStructField *field, IrInstruction *struct_ptr, ZigType *struct_type, bool initializing);
static IrInstruction *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type);
static ConstExprValue *const_ptr_pointee_unchecked(CodeGen *g, ConstExprValue *const_val) {
assert(get_src_ptr_type(const_val->type) != nullptr);
@ -1350,18 +1354,17 @@ static IrInstruction *ir_build_return_ptr(IrAnalyze *ira, IrInstruction *source_
static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *array_ptr, IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len,
IrInstruction *init_array_type)
AstNode *init_array_type_source_node)
{
IrInstructionElemPtr *instruction = ir_build_instruction<IrInstructionElemPtr>(irb, scope, source_node);
instruction->array_ptr = array_ptr;
instruction->elem_index = elem_index;
instruction->safety_check_on = safety_check_on;
instruction->ptr_len = ptr_len;
instruction->init_array_type = init_array_type;
instruction->init_array_type_source_node = init_array_type_source_node;
ir_ref_instruction(array_ptr, irb->current_basic_block);
ir_ref_instruction(elem_index, irb->current_basic_block);
if (init_array_type != nullptr) ir_ref_instruction(init_array_type, irb->current_basic_block);
return &instruction->base;
}
@ -1575,17 +1578,16 @@ static IrInstruction *ir_build_un_op(IrBuilder *irb, Scope *scope, AstNode *sour
}
static IrInstruction *ir_build_container_init_list(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *container_type, size_t item_count, IrInstruction **elem_result_loc_list,
IrInstruction *result_loc)
size_t item_count, IrInstruction **elem_result_loc_list, IrInstruction *result_loc,
AstNode *init_array_type_source_node)
{
IrInstructionContainerInitList *container_init_list_instruction =
ir_build_instruction<IrInstructionContainerInitList>(irb, scope, source_node);
container_init_list_instruction->container_type = container_type;
container_init_list_instruction->item_count = item_count;
container_init_list_instruction->elem_result_loc_list = elem_result_loc_list;
container_init_list_instruction->result_loc = result_loc;
container_init_list_instruction->init_array_type_source_node = init_array_type_source_node;
ir_ref_instruction(container_type, irb->current_basic_block);
for (size_t i = 0; i < item_count; i += 1) {
ir_ref_instruction(elem_result_loc_list[i], irb->current_basic_block);
}
@ -1595,17 +1597,14 @@ static IrInstruction *ir_build_container_init_list(IrBuilder *irb, Scope *scope,
}
static IrInstruction *ir_build_container_init_fields(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *container_type, size_t field_count, IrInstructionContainerInitFieldsField *fields,
IrInstruction *result_loc)
size_t field_count, IrInstructionContainerInitFieldsField *fields, IrInstruction *result_loc)
{
IrInstructionContainerInitFields *container_init_fields_instruction =
ir_build_instruction<IrInstructionContainerInitFields>(irb, scope, source_node);
container_init_fields_instruction->container_type = container_type;
container_init_fields_instruction->field_count = field_count;
container_init_fields_instruction->fields = fields;
container_init_fields_instruction->result_loc = result_loc;
ir_ref_instruction(container_type, irb->current_basic_block);
for (size_t i = 0; i < field_count; i += 1) {
ir_ref_instruction(fields[i].result_loc, irb->current_basic_block);
}
@ -3084,7 +3083,7 @@ static IrInstruction *ir_build_resolve_result(IrBuilder *irb, Scope *scope, AstN
instruction->result_loc = result_loc;
instruction->ty = ty;
ir_ref_instruction(ty, irb->current_basic_block);
if (ty != nullptr) ir_ref_instruction(ty, irb->current_basic_block);
return &instruction->base;
}
@ -6127,28 +6126,46 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A
AstNodeContainerInitExpr *container_init_expr = &node->data.container_init_expr;
ContainerInitKind kind = container_init_expr->kind;
IrInstruction *container_type = nullptr;
IrInstruction *elem_type = nullptr;
if (container_init_expr->type->type == NodeTypeInferredArrayType) {
elem_type = ir_gen_node(irb, container_init_expr->type->data.inferred_array_type.child_type, scope);
if (elem_type == irb->codegen->invalid_instruction)
return elem_type;
} else {
container_type = ir_gen_node(irb, container_init_expr->type, scope);
if (container_type == irb->codegen->invalid_instruction)
return container_type;
}
switch (kind) {
case ContainerInitKindStruct: {
if (elem_type != nullptr) {
ResultLocCast *result_loc_cast = nullptr;
ResultLoc *child_result_loc;
AstNode *init_array_type_source_node;
if (container_init_expr->type != nullptr) {
IrInstruction *container_type;
if (container_init_expr->type->type == NodeTypeInferredArrayType) {
if (kind == ContainerInitKindStruct) {
add_node_error(irb->codegen, container_init_expr->type,
buf_sprintf("initializing array with struct syntax"));
return irb->codegen->invalid_instruction;
}
IrInstruction *elem_type = ir_gen_node(irb,
container_init_expr->type->data.inferred_array_type.child_type, scope);
if (elem_type == irb->codegen->invalid_instruction)
return elem_type;
size_t item_count = container_init_expr->entries.length;
IrInstruction *item_count_inst = ir_build_const_usize(irb, scope, node, item_count);
container_type = ir_build_array_type(irb, scope, node, item_count_inst, elem_type);
} else {
container_type = ir_gen_node(irb, container_init_expr->type, scope);
if (container_type == irb->codegen->invalid_instruction)
return container_type;
}
IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, parent_result_loc,
container_type);
result_loc_cast = ir_build_cast_result_loc(irb, container_type, parent_result_loc);
child_result_loc = &result_loc_cast->base;
init_array_type_source_node = container_type->source_node;
} else {
child_result_loc = parent_result_loc;
if (parent_result_loc->source_instruction != nullptr) {
init_array_type_source_node = parent_result_loc->source_instruction->source_node;
} else {
init_array_type_source_node = node;
}
}
switch (kind) {
case ContainerInitKindStruct: {
IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
nullptr);
size_t field_count = container_init_expr->entries.length;
IrInstructionContainerInitFieldsField *fields = allocate<IrInstructionContainerInitFieldsField>(field_count);
@ -6176,29 +6193,27 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A
fields[i].source_node = entry_node;
fields[i].result_loc = field_ptr;
}
IrInstruction *init_fields = ir_build_container_init_fields(irb, scope, node, container_type,
field_count, fields, container_ptr);
IrInstruction *result = ir_build_container_init_fields(irb, scope, node, field_count,
fields, container_ptr);
return ir_lval_wrap(irb, scope, init_fields, lval, parent_result_loc);
if (result_loc_cast != nullptr) {
result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
}
return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
}
case ContainerInitKindArray: {
size_t item_count = container_init_expr->entries.length;
if (container_type == nullptr) {
IrInstruction *item_count_inst = ir_build_const_usize(irb, scope, node, item_count);
container_type = ir_build_array_type(irb, scope, node, item_count_inst, elem_type);
}
IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, parent_result_loc,
container_type);
IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
nullptr);
IrInstruction **result_locs = allocate<IrInstruction *>(item_count);
for (size_t i = 0; i < item_count; i += 1) {
AstNode *expr_node = container_init_expr->entries.at(i);
IrInstruction *elem_index = ir_build_const_usize(irb, scope, expr_node, i);
IrInstruction *elem_ptr = ir_build_elem_ptr(irb, scope, expr_node, container_ptr, elem_index,
false, PtrLenSingle, container_type);
IrInstruction *elem_ptr = ir_build_elem_ptr(irb, scope, expr_node, container_ptr,
elem_index, false, PtrLenSingle, init_array_type_source_node);
ResultLocInstruction *result_loc_inst = allocate<ResultLocInstruction>(1);
result_loc_inst->base.id = ResultLocIdInstruction;
result_loc_inst->base.source_instruction = elem_ptr;
@ -6213,9 +6228,12 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A
result_locs[i] = elem_ptr;
}
IrInstruction *init_list = ir_build_container_init_list(irb, scope, node, container_type,
item_count, result_locs, container_ptr);
return ir_lval_wrap(irb, scope, init_list, lval, parent_result_loc);
IrInstruction *result = ir_build_container_init_list(irb, scope, node, item_count,
result_locs, container_ptr, init_array_type_source_node);
if (result_loc_cast != nullptr) {
result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
}
return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
}
}
zig_unreachable();
@ -7935,14 +7953,14 @@ static bool render_instance_name_recursive(CodeGen *codegen, Buf *name, Scope *o
static Buf *get_anon_type_name(CodeGen *codegen, IrExecutable *exec, const char *kind_name,
Scope *scope, AstNode *source_node, Buf *out_bare_name)
{
if (exec->name) {
if (exec != nullptr && exec->name) {
ZigType *import = get_scope_import(scope);
Buf *namespace_name = buf_alloc();
append_namespace_qualification(codegen, namespace_name, import);
buf_append_buf(namespace_name, exec->name);
buf_init_from_buf(out_bare_name, exec->name);
return namespace_name;
} else if (exec->name_fn != nullptr) {
} else if (exec != nullptr && exec->name_fn != nullptr) {
Buf *name = buf_alloc();
buf_append_buf(name, &exec->name_fn->symbol_name);
buf_appendf(name, "(");
@ -15541,11 +15559,7 @@ static bool ir_result_has_type(ResultLoc *result_loc) {
static IrInstruction *ir_resolve_no_result_loc(IrAnalyze *ira, IrInstruction *suspend_source_instr,
ResultLoc *result_loc, ZigType *value_type, bool force_runtime, bool non_null_comptime)
{
Error err;
IrInstructionAllocaGen *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
PtrLenSingle, 0, 0, 0, false);
set_up_result_loc_for_inferred_comptime(&alloca_gen->base);
@ -15750,6 +15764,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
return casted_value;
}
bool old_parent_result_loc_written = result_cast->parent->written;
IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_cast->parent,
dest_type, casted_value, force_runtime, non_null_comptime, true);
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) ||
@ -15775,6 +15790,22 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle,
parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero);
{
// we also need to check that this cast is OK.
ConstCastOnly const_cast_result = types_match_const_cast_only(ira,
parent_result_loc->value.type, ptr_type,
result_cast->base.source_instruction->source_node, false);
if (const_cast_result.id == ConstCastResultIdInvalid)
return ira->codegen->invalid_instruction;
if (const_cast_result.id != ConstCastResultIdOk) {
// We will not be able to provide a result location for this value. Create
// a new result location.
result_cast->parent->written = old_parent_result_loc_written;
return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type,
force_runtime, non_null_comptime);
}
}
result_loc->written = true;
result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc,
ptr_type, result_cast->base.source_instruction, false);
@ -15902,10 +15933,37 @@ static IrInstruction *ir_resolve_result(IrAnalyze *ira, IrInstruction *suspend_s
return result_loc;
}
static IrInstruction *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrInstructionResolveResult *instruction) {
ZigType *implicit_elem_type = ir_resolve_type(ira, instruction->ty->child);
if (type_is_invalid(implicit_elem_type))
return ira->codegen->invalid_instruction;
static IrInstruction *ir_analyze_instruction_resolve_result(IrAnalyze *ira,
IrInstructionResolveResult *instruction)
{
ZigType *implicit_elem_type;
if (instruction->ty == nullptr) {
if (instruction->result_loc->id == ResultLocIdCast) {
implicit_elem_type = ir_resolve_type(ira,
instruction->result_loc->source_instruction->child);
if (type_is_invalid(implicit_elem_type))
return ira->codegen->invalid_instruction;
} else if (instruction->result_loc->id == ResultLocIdReturn) {
implicit_elem_type = ira->explicit_return_type;
if (type_is_invalid(implicit_elem_type))
return ira->codegen->invalid_instruction;
} else {
Buf *bare_name = buf_alloc();
Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
instruction->base.scope, instruction->base.source_node, bare_name);
ZigType *inferred_struct_type = get_partial_container_type(ira->codegen,
instruction->base.scope, ContainerKindStruct, instruction->base.source_node,
buf_ptr(name), bare_name, ContainerLayoutAuto);
inferred_struct_type->data.structure.is_inferred = true;
inferred_struct_type->data.structure.resolve_status = ResolveStatusBeingInferred;
implicit_elem_type = inferred_struct_type;
}
} else {
implicit_elem_type = ir_resolve_type(ira, instruction->ty->child);
if (type_is_invalid(implicit_elem_type))
return ira->codegen->invalid_instruction;
}
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
implicit_elem_type, nullptr, false, true, true);
if (result_loc != nullptr)
@ -16267,13 +16325,78 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
return ir_const_void(ira, source_instr);
}
ZigType *child_type = ptr->value.type->data.pointer.child_type;
InferredStructField *isf = ptr->value.type->data.pointer.inferred_struct_field;
if (allow_write_through_const && isf != nullptr) {
// Now it's time to add the field to the struct type.
uint32_t old_field_count = isf->inferred_struct_type->data.structure.src_field_count;
uint32_t new_field_count = old_field_count + 1;
isf->inferred_struct_type->data.structure.src_field_count = new_field_count;
if (new_field_count > 16) {
// This thing with 16 is a hack to allow this functionality to work without
// modifying the ConstExprValue layout of structs. That reworking needs to be
// done, but this hack lets us do it separately, in the future.
zig_panic("TODO need to rework the layout of ZigTypeStruct. This realloc would have caused invalid pointer references");
}
if (isf->inferred_struct_type->data.structure.fields == nullptr) {
isf->inferred_struct_type->data.structure.fields = allocate<TypeStructField>(16);
}
// This reference can't live long, don't keep it around outside this block.
TypeStructField *field = &isf->inferred_struct_type->data.structure.fields[old_field_count];
field->name = isf->field_name;
field->type_entry = uncasted_value->value.type;
field->type_val = create_const_type(ira->codegen, field->type_entry);
field->src_index = old_field_count;
field->decl_node = uncasted_value->source_node;
ZigType *struct_ptr_type = get_pointer_to_type(ira->codegen, isf->inferred_struct_type, false);
IrInstruction *casted_ptr;
if (instr_is_comptime(ptr)) {
casted_ptr = ir_const(ira, source_instr, struct_ptr_type);
copy_const_val(&casted_ptr->value, &ptr->value, false);
casted_ptr->value.type = struct_ptr_type;
} else {
casted_ptr = ir_build_cast(&ira->new_irb, source_instr->scope,
source_instr->source_node, struct_ptr_type, ptr, CastOpNoop);
casted_ptr->value.type = struct_ptr_type;
}
if (instr_is_comptime(casted_ptr)) {
ConstExprValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
if (!ptr_val)
return ira->codegen->invalid_instruction;
if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
ConstExprValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val,
source_instr->source_node);
struct_val->special = ConstValSpecialStatic;
if (new_field_count > 16) {
// This thing with 16 is a hack to allow this functionality to work without
// modifying the ConstExprValue layout of structs. That reworking needs to be
// done, but this hack lets us do it separately, in the future.
zig_panic("TODO need to rework the layout of ConstExprValue for structs. This realloc would have caused invalid pointer references");
}
if (struct_val->data.x_struct.fields == nullptr) {
struct_val->data.x_struct.fields = create_const_vals(16);
}
ConstExprValue *field_val = &struct_val->data.x_struct.fields[old_field_count];
field_val->special = ConstValSpecialUndef;
field_val->type = field->type_entry;
field_val->parent.id = ConstParentIdStruct;
field_val->parent.data.p_struct.struct_val = struct_val;
field_val->parent.data.p_struct.field_index = old_field_count;
}
}
ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, casted_ptr,
isf->inferred_struct_type, true);
}
if (ptr->value.type->data.pointer.is_const && !allow_write_through_const) {
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
return ira->codegen->invalid_instruction;
}
ZigType *child_type = ptr->value.type->data.pointer.child_type;
IrInstruction *value = ir_implicit_cast(ira, uncasted_value, child_type);
if (value == ira->codegen->invalid_instruction)
return ira->codegen->invalid_instruction;
@ -17769,6 +17892,19 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
} else if (array_type->id == ZigTypeIdVector) {
// This depends on whether the element index is comptime, so it is computed later.
return_type = nullptr;
} else if (elem_ptr_instruction->init_array_type_source_node != nullptr &&
array_type->id == ZigTypeIdStruct &&
array_type->data.structure.resolve_status == ResolveStatusBeingInferred)
{
ZigType *usize = ira->codegen->builtin_types.entry_usize;
IrInstruction *casted_elem_index = ir_implicit_cast(ira, elem_index, usize);
if (casted_elem_index == ira->codegen->invalid_instruction)
return ira->codegen->invalid_instruction;
ir_assert(instr_is_comptime(casted_elem_index), &elem_ptr_instruction->base);
Buf *field_name = buf_alloc();
bigint_append_buf(field_name, &casted_elem_index->value.data.x_bigint, 10);
return ir_analyze_inferred_field_ptr(ira, field_name, &elem_ptr_instruction->base,
array_ptr, array_type);
} else {
ir_add_error_node(ira, elem_ptr_instruction->base.source_node,
buf_sprintf("array access of non-array type '%s'", buf_ptr(&array_type->name)));
@ -17799,7 +17935,8 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
return_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
elem_ptr_instruction->ptr_len,
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, (uint32_t)index);
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, (uint32_t)index,
nullptr);
} else if (return_type->data.pointer.explicit_alignment != 0) {
// figure out the largest alignment possible
@ -17837,7 +17974,9 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
if (array_ptr_val == nullptr)
return ira->codegen->invalid_instruction;
if (array_ptr_val->special == ConstValSpecialUndef && elem_ptr_instruction->init_array_type != nullptr) {
if (array_ptr_val->special == ConstValSpecialUndef &&
elem_ptr_instruction->init_array_type_source_node != nullptr)
{
if (array_type->id == ZigTypeIdArray || array_type->id == ZigTypeIdVector) {
array_ptr_val->data.x_array.special = ConstArraySpecialNone;
array_ptr_val->data.x_array.data.s_none.elements = create_const_vals(array_type->data.array.len);
@ -17851,11 +17990,13 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
elem_val->parent.data.p_array.elem_index = i;
}
} else if (is_slice(array_type)) {
ZigType *actual_array_type = ir_resolve_type(ira, elem_ptr_instruction->init_array_type->child);
ir_assert(array_ptr->value.type->id == ZigTypeIdPointer, &elem_ptr_instruction->base);
ZigType *actual_array_type = array_ptr->value.type->data.pointer.child_type;
if (type_is_invalid(actual_array_type))
return ira->codegen->invalid_instruction;
if (actual_array_type->id != ZigTypeIdArray) {
ir_add_error(ira, elem_ptr_instruction->init_array_type,
ir_add_error_node(ira, elem_ptr_instruction->init_array_type_source_node,
buf_sprintf("expected array type or [_], found slice"));
return ira->codegen->invalid_instruction;
}
@ -17879,7 +18020,7 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
false);
array_ptr_val->data.x_struct.fields[slice_ptr_index].data.x_ptr.mut = ConstPtrMutInfer;
} else {
ir_add_error(ira, elem_ptr_instruction->init_array_type,
ir_add_error_node(ira, elem_ptr_instruction->init_array_type_source_node,
buf_sprintf("expected array type or [_], found '%s'",
buf_ptr(&array_type->name)));
return ira->codegen->invalid_instruction;
@ -18012,7 +18153,7 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
if (orig_array_ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope,
elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index,
false, elem_ptr_instruction->ptr_len, elem_ptr_instruction->init_array_type);
false, elem_ptr_instruction->ptr_len, nullptr);
result->value.type = return_type;
result->value.special = ConstValSpecialStatic;
} else {
@ -18036,7 +18177,8 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
return_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
elem_ptr_instruction->ptr_len,
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, VECTOR_INDEX_RUNTIME);
get_ptr_align(ira->codegen, ptr_type), 0, host_vec_len, false, VECTOR_INDEX_RUNTIME,
nullptr);
} else {
// runtime known element index
switch (type_requires_comptime(ira->codegen, return_type)) {
@ -18073,7 +18215,7 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope,
elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index, safety_check_on,
elem_ptr_instruction->ptr_len, elem_ptr_instruction->init_array_type);
elem_ptr_instruction->ptr_len, nullptr);
result->value.type = return_type;
return result;
}
@ -18152,31 +18294,34 @@ static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction
case OnePossibleValueNo:
break;
}
ResolveStatus needed_resolve_status =
(struct_type->data.structure.layout == ContainerLayoutAuto) ?
ResolveStatusZeroBitsKnown : ResolveStatusSizeKnown;
if ((err = type_resolve(ira->codegen, struct_type, needed_resolve_status)))
return ira->codegen->invalid_instruction;
assert(struct_ptr->value.type->id == ZigTypeIdPointer);
uint32_t ptr_bit_offset = struct_ptr->value.type->data.pointer.bit_offset_in_host;
uint32_t ptr_host_int_bytes = struct_ptr->value.type->data.pointer.host_int_bytes;
uint32_t host_int_bytes_for_result_type = (ptr_host_int_bytes == 0) ?
get_host_int_bytes(ira->codegen, struct_type, field) : ptr_host_int_bytes;
bool is_const = struct_ptr->value.type->data.pointer.is_const;
bool is_volatile = struct_ptr->value.type->data.pointer.is_volatile;
ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
is_const, is_volatile, PtrLenSingle, field->align,
(uint32_t)(ptr_bit_offset + field->bit_offset_in_host),
(uint32_t)host_int_bytes_for_result_type, false);
ZigType *ptr_type;
if (struct_type->data.structure.is_inferred) {
ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
is_const, is_volatile, PtrLenSingle, 0, 0, 0, false);
} else {
ResolveStatus needed_resolve_status =
(struct_type->data.structure.layout == ContainerLayoutAuto) ?
ResolveStatusZeroBitsKnown : ResolveStatusSizeKnown;
if ((err = type_resolve(ira->codegen, struct_type, needed_resolve_status)))
return ira->codegen->invalid_instruction;
assert(struct_ptr->value.type->id == ZigTypeIdPointer);
uint32_t ptr_bit_offset = struct_ptr->value.type->data.pointer.bit_offset_in_host;
uint32_t ptr_host_int_bytes = struct_ptr->value.type->data.pointer.host_int_bytes;
uint32_t host_int_bytes_for_result_type = (ptr_host_int_bytes == 0) ?
get_host_int_bytes(ira->codegen, struct_type, field) : ptr_host_int_bytes;
ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
is_const, is_volatile, PtrLenSingle, field->align,
(uint32_t)(ptr_bit_offset + field->bit_offset_in_host),
(uint32_t)host_int_bytes_for_result_type, false);
}
if (instr_is_comptime(struct_ptr)) {
ConstExprValue *ptr_val = ir_resolve_const(ira, struct_ptr, UndefBad);
if (!ptr_val)
return ira->codegen->invalid_instruction;
if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
if ((err = type_resolve(ira->codegen, struct_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_instruction;
ConstExprValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
if (struct_val == nullptr)
return ira->codegen->invalid_instruction;
@ -18188,7 +18333,8 @@ static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction
for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) {
ConstExprValue *field_val = &struct_val->data.x_struct.fields[i];
field_val->special = ConstValSpecialUndef;
field_val->type = struct_type->data.structure.fields[i].type_entry;
field_val->type = resolve_struct_field_type(ira->codegen,
&struct_type->data.structure.fields[i]);
field_val->parent.id = ConstParentIdStruct;
field_val->parent.data.p_struct.struct_val = struct_val;
field_val->parent.data.p_struct.field_index = i;
@ -18217,12 +18363,53 @@ static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction
return result;
}
static IrInstruction *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type)
{
// The type of the field is not available until a store using this pointer happens.
// So, here we create a special pointer type which has the inferred struct type and
// field name encoded in the type. Later, when there is a store via this pointer,
// the field type will then be available, and the field will be added to the inferred
// struct.
ZigType *container_ptr_type = container_ptr->value.type;
ir_assert(container_ptr_type->id == ZigTypeIdPointer, source_instr);
InferredStructField *inferred_struct_field = allocate<InferredStructField>(1, "InferredStructField");
inferred_struct_field->inferred_struct_type = container_type;
inferred_struct_field->field_name = field_name;
ZigType *elem_type = ira->codegen->builtin_types.entry_c_void;
ZigType *field_ptr_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
container_ptr_type->data.pointer.is_const, container_ptr_type->data.pointer.is_volatile,
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE, inferred_struct_field);
if (instr_is_comptime(container_ptr)) {
IrInstruction *result = ir_const(ira, source_instr, field_ptr_type);
copy_const_val(&result->value, &container_ptr->value, false);
result->value.type = field_ptr_type;
return result;
}
IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope,
source_instr->source_node, field_ptr_type, container_ptr, CastOpNoop);
result->value.type = field_ptr_type;
return result;
}
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type, bool initializing)
{
Error err;
ZigType *bare_type = container_ref_type(container_type);
if (initializing && bare_type->id == ZigTypeIdStruct &&
bare_type->data.structure.resolve_status == ResolveStatusBeingInferred)
{
return ir_analyze_inferred_field_ptr(ira, field_name, source_instr, container_ptr, bare_type);
}
if ((err = type_resolve(ira->codegen, bare_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
@ -19997,6 +20184,11 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc
return ira->codegen->invalid_instruction;
}
if (container_type->data.structure.resolve_status == ResolveStatusBeingInferred) {
// We're now done inferring the type.
container_type->data.structure.resolve_status = ResolveStatusUnstarted;
}
if ((err = type_resolve(ira->codegen, container_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_instruction;
@ -20066,8 +20258,12 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc
TypeStructField *field = &container_type->data.structure.fields[i];
if (field->init_val == nullptr) {
// it's not memoized. time to go analyze it
assert(field->decl_node->type == NodeTypeStructField);
AstNode *init_node = field->decl_node->data.struct_field.value;
AstNode *init_node;
if (field->decl_node->type == NodeTypeStructField) {
init_node = field->decl_node->data.struct_field.value;
} else {
init_node = nullptr;
}
if (init_node == nullptr) {
ir_add_error_node(ira, instruction->source_node,
buf_sprintf("missing field: '%s'", buf_ptr(container_type->data.structure.fields[i].name)));
@ -20124,14 +20320,18 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc
static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira,
IrInstructionContainerInitList *instruction)
{
ZigType *container_type = ir_resolve_type(ira, instruction->container_type->child);
if (type_is_invalid(container_type))
return ira->codegen->invalid_instruction;
ir_assert(instruction->result_loc != nullptr, &instruction->base);
IrInstruction *result_loc = instruction->result_loc->child;
if (type_is_invalid(result_loc->value.type))
return result_loc;
ir_assert(result_loc->value.type->id == ZigTypeIdPointer, &instruction->base);
ZigType *container_type = result_loc->value.type->data.pointer.child_type;
size_t elem_count = instruction->item_count;
if (is_slice(container_type)) {
ir_add_error(ira, instruction->container_type,
ir_add_error_node(ira, instruction->init_array_type_source_node,
buf_sprintf("expected array type or [_], found slice"));
return ira->codegen->invalid_instruction;
}
@ -20153,29 +20353,28 @@ static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira,
return ir_analyze_container_init_fields(ira, &instruction->base, container_type, 0, nullptr, result_loc);
}
if (container_type->id != ZigTypeIdArray) {
if (container_type->id == ZigTypeIdArray) {
ZigType *child_type = container_type->data.array.child_type;
if (container_type->data.array.len != elem_count) {
ZigType *literal_type = get_array_type(ira->codegen, child_type, elem_count);
ir_add_error(ira, &instruction->base,
buf_sprintf("expected %s literal, found %s literal",
buf_ptr(&container_type->name), buf_ptr(&literal_type->name)));
return ira->codegen->invalid_instruction;
}
} else if (container_type->id == ZigTypeIdStruct &&
container_type->data.structure.resolve_status == ResolveStatusBeingInferred)
{
// We're now done inferring the type.
container_type->data.structure.resolve_status = ResolveStatusUnstarted;
} else {
ir_add_error_node(ira, instruction->base.source_node,
buf_sprintf("type '%s' does not support array initialization",
buf_ptr(&container_type->name)));
return ira->codegen->invalid_instruction;
}
ir_assert(instruction->result_loc != nullptr, &instruction->base);
IrInstruction *result_loc = instruction->result_loc->child;
if (type_is_invalid(result_loc->value.type))
return result_loc;
ir_assert(result_loc->value.type->id == ZigTypeIdPointer, &instruction->base);
ZigType *child_type = container_type->data.array.child_type;
if (container_type->data.array.len != elem_count) {
ZigType *literal_type = get_array_type(ira->codegen, child_type, elem_count);
ir_add_error(ira, &instruction->base,
buf_sprintf("expected %s literal, found %s literal",
buf_ptr(&container_type->name), buf_ptr(&literal_type->name)));
return ira->codegen->invalid_instruction;
}
switch (type_has_one_possible_value(ira->codegen, container_type)) {
case OnePossibleValueInvalid:
return ira->codegen->invalid_instruction;
@ -20262,16 +20461,14 @@ static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira,
static IrInstruction *ir_analyze_instruction_container_init_fields(IrAnalyze *ira,
IrInstructionContainerInitFields *instruction)
{
IrInstruction *container_type_value = instruction->container_type->child;
ZigType *container_type = ir_resolve_type(ira, container_type_value);
if (type_is_invalid(container_type))
return ira->codegen->invalid_instruction;
ir_assert(instruction->result_loc != nullptr, &instruction->base);
IrInstruction *result_loc = instruction->result_loc->child;
if (type_is_invalid(result_loc->value.type))
return result_loc;
ir_assert(result_loc->value.type->id == ZigTypeIdPointer, &instruction->base);
ZigType *container_type = result_loc->value.type->data.pointer.child_type;
return ir_analyze_container_init_fields(ira, &instruction->base, container_type,
instruction->field_count, instruction->fields, result_loc);
}
@ -24607,6 +24804,10 @@ static IrInstruction *ir_analyze_ptr_cast(IrAnalyze *ira, IrInstruction *source_
ZigType *src_type = ptr->value.type;
assert(!type_is_invalid(src_type));
if (src_type == dest_type) {
return ptr;
}
// We have a check for zero bits later so we use get_src_ptr_type to
// validate src_type and dest_type.
@ -24656,6 +24857,9 @@ static IrInstruction *ir_analyze_ptr_cast(IrAnalyze *ira, IrInstruction *source_
IrInstruction *result;
if (ptr->value.data.x_ptr.mut == ConstPtrMutInfer) {
result = ir_build_ptr_cast_gen(ira, source_instr, dest_type, ptr, safety_check_on);
if ((err = type_resolve(ira->codegen, dest_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
} else {
result = ir_const(ira, source_instr, dest_type);
}

View File

@ -731,7 +731,6 @@ static void ir_print_phi(IrPrint *irp, IrInstructionPhi *phi_instruction) {
}
static void ir_print_container_init_list(IrPrint *irp, IrInstructionContainerInitList *instruction) {
ir_print_other_instruction(irp, instruction->container_type);
fprintf(irp->f, "{");
if (instruction->item_count > 50) {
fprintf(irp->f, "...(%" ZIG_PRI_usize " items)...", instruction->item_count);
@ -743,11 +742,11 @@ static void ir_print_container_init_list(IrPrint *irp, IrInstructionContainerIni
ir_print_other_instruction(irp, result_loc);
}
}
fprintf(irp->f, "}");
fprintf(irp->f, "}result=");
ir_print_other_instruction(irp, instruction->result_loc);
}
static void ir_print_container_init_fields(IrPrint *irp, IrInstructionContainerInitFields *instruction) {
ir_print_other_instruction(irp, instruction->container_type);
fprintf(irp->f, "{");
for (size_t i = 0; i < instruction->field_count; i += 1) {
IrInstructionContainerInitFieldsField *field = &instruction->fields[i];
@ -755,7 +754,8 @@ static void ir_print_container_init_fields(IrPrint *irp, IrInstructionContainerI
fprintf(irp->f, "%s.%s = ", comma, buf_ptr(field->name));
ir_print_other_instruction(irp, field->result_loc);
}
fprintf(irp->f, "} // container init");
fprintf(irp->f, "}result=");
ir_print_other_instruction(irp, instruction->result_loc);
}
static void ir_print_unreachable(IrPrint *irp, IrInstructionUnreachable *instruction) {

View File

@ -81,7 +81,7 @@ static AstNode *ast_parse_for_type_expr(ParseContext *pc);
static AstNode *ast_parse_while_type_expr(ParseContext *pc);
static AstNode *ast_parse_switch_expr(ParseContext *pc);
static AstNode *ast_parse_asm_expr(ParseContext *pc);
static AstNode *ast_parse_enum_lit(ParseContext *pc);
static AstNode *ast_parse_anon_lit(ParseContext *pc);
static AstNode *ast_parse_asm_output(ParseContext *pc);
static AsmOutput *ast_parse_asm_output_item(ParseContext *pc);
static AstNode *ast_parse_asm_input(ParseContext *pc);
@ -1600,9 +1600,9 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (container_decl != nullptr)
return container_decl;
AstNode *enum_lit = ast_parse_enum_lit(pc);
if (enum_lit != nullptr)
return enum_lit;
AstNode *anon_lit = ast_parse_anon_lit(pc);
if (anon_lit != nullptr)
return anon_lit;
AstNode *error_set_decl = ast_parse_error_set_decl(pc);
if (error_set_decl != nullptr)
@ -1876,16 +1876,22 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc) {
return res;
}
static AstNode *ast_parse_enum_lit(ParseContext *pc) {
static AstNode *ast_parse_anon_lit(ParseContext *pc) {
Token *period = eat_token_if(pc, TokenIdDot);
if (period == nullptr)
return nullptr;
Token *identifier = expect_token(pc, TokenIdSymbol);
AstNode *res = ast_create_node(pc, NodeTypeEnumLiteral, period);
res->data.enum_literal.period = period;
res->data.enum_literal.identifier = identifier;
return res;
// anon enum literal
Token *identifier = eat_token_if(pc, TokenIdSymbol);
if (identifier != nullptr) {
AstNode *res = ast_create_node(pc, NodeTypeEnumLiteral, period);
res->data.enum_literal.period = period;
res->data.enum_literal.identifier = identifier;
return res;
}
// anon container literal
return ast_parse_init_list(pc);
}
// AsmOutput <- COLON AsmOutputList AsmInput?

View File

@ -2,6 +2,23 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"missing const in slice with nested array type",
\\const Geo3DTex2D = struct { vertices: [][2]f32 };
\\pub fn getGeo3DTex2D() Geo3DTex2D {
\\ return Geo3DTex2D{
\\ .vertices = [_][2]f32{
\\ [_]f32{ -0.5, -0.5},
\\ },
\\ };
\\}
\\export fn entry() void {
\\ var geo_data = getGeo3DTex2D();
\\}
,
"tmp.zig:4:30: error: expected type '[][2]f32', found '[1][2]f32'",
);
cases.add(
"slicing of global undefined pointer",
\\var buf: *[1]u8 = undefined;
@ -216,9 +233,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ const obj = AstObject{ .lhsExpr = lhsExpr };
\\}
,
"tmp.zig:4:19: error: union 'AstObject' depends on itself",
"tmp.zig:2:5: note: while checking this field",
"tmp.zig:1:17: error: struct 'LhsExpr' depends on itself",
"tmp.zig:5:5: note: while checking this field",
"tmp.zig:2:5: note: while checking this field",
);
cases.add(

View File

@ -298,3 +298,17 @@ test "implicit cast zero sized array ptr to slice" {
const c: []const u8 = &b;
expect(c.len == 0);
}
test "anonymous list literal syntax" {
const S = struct {
fn doTheTest() void {
var array: [4]u8 = .{1, 2, 3, 4};
expect(array[0] == 1);
expect(array[1] == 2);
expect(array[2] == 3);
expect(array[3] == 4);
}
};
S.doTheTest();
comptime S.doTheTest();
}

View File

@ -1214,7 +1214,7 @@ test "spill target expr in a for loop" {
}
const Foo = struct {
slice: []i32,
slice: []const i32,
};
fn atest(foo: *Foo) i32 {
@ -1245,7 +1245,7 @@ test "spill target expr in a for loop, with a var decl in the loop body" {
}
const Foo = struct {
slice: []i32,
slice: []const i32,
};
fn atest(foo: *Foo) i32 {

View File

@ -709,3 +709,62 @@ test "packed struct field passed to generic function" {
var loaded = S.genericReadPackedField(&p.b);
expect(loaded == 29);
}
test "anonymous struct literal syntax" {
const S = struct {
const Point = struct {
x: i32,
y: i32,
};
fn doTheTest() void {
var p: Point = .{
.x = 1,
.y = 2,
};
expect(p.x == 1);
expect(p.y == 2);
}
};
S.doTheTest();
comptime S.doTheTest();
}
test "fully anonymous struct" {
const S = struct {
fn doTheTest() void {
dump(.{
.int = @as(u32, 1234),
.float = @as(f64, 12.34),
.b = true,
.s = "hi",
});
}
fn dump(args: var) void {
expect(args.int == 1234);
expect(args.float == 12.34);
expect(args.b);
expect(args.s[0] == 'h');
expect(args.s[1] == 'i');
}
};
S.doTheTest();
comptime S.doTheTest();
}
test "fully anonymous list literal" {
const S = struct {
fn doTheTest() void {
dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi"});
}
fn dump(args: var) void {
expect(args.@"0" == 1234);
expect(args.@"1" == 12.34);
expect(args.@"2");
expect(args.@"3"[0] == 'h');
expect(args.@"3"[1] == 'i');
}
};
S.doTheTest();
comptime S.doTheTest();
}

View File

@ -549,3 +549,25 @@ test "initialize global array of union" {
expect(glbl_array[0].U0 == 1);
expect(glbl_array[1].U1 == 2);
}
test "anonymous union literal syntax" {
const S = struct {
const Number = union {
int: i32,
float: f64,
};
fn doTheTest() void {
var i: Number = .{.int = 42};
var f = makeNumber();
expect(i.int == 42);
expect(f.float == 12.34);
}
fn makeNumber() Number {
return .{.float = 12.34};
}
};
S.doTheTest();
comptime S.doTheTest();
}