stage2 parser: token ids in their own array
To prevent cache misses, token ids go in their own array, and the start/end offsets go in a different one. perf measurement before: 2,667,914 cache-misses:u 2,139,139,935 instructions:u 894,167,331 cycles:u perf measurement after: 1,757,723 cache-misses:u 2,069,932,298 instructions:u 858,105,570 cycles:u
This commit is contained in:
parent
295bca9b5f
commit
8df0841d6a
@ -10,7 +10,8 @@ pub const NodeIndex = usize;
|
|||||||
pub const Tree = struct {
|
pub const Tree = struct {
|
||||||
/// Reference to externally-owned data.
|
/// Reference to externally-owned data.
|
||||||
source: []const u8,
|
source: []const u8,
|
||||||
tokens: []const Token,
|
token_ids: []const Token.Id,
|
||||||
|
token_locs: []const Token.Loc,
|
||||||
errors: []const Error,
|
errors: []const Error,
|
||||||
/// undefined on parse error (when errors field is not empty)
|
/// undefined on parse error (when errors field is not empty)
|
||||||
root_node: *Node.Root,
|
root_node: *Node.Root,
|
||||||
@ -23,26 +24,27 @@ pub const Tree = struct {
|
|||||||
generated: bool = false,
|
generated: bool = false,
|
||||||
|
|
||||||
pub fn deinit(self: *Tree) void {
|
pub fn deinit(self: *Tree) void {
|
||||||
self.gpa.free(self.tokens);
|
self.gpa.free(self.token_ids);
|
||||||
|
self.gpa.free(self.token_locs);
|
||||||
self.gpa.free(self.errors);
|
self.gpa.free(self.errors);
|
||||||
self.arena.promote(self.gpa).deinit();
|
self.arena.promote(self.gpa).deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn renderError(self: *Tree, parse_error: *const Error, stream: var) !void {
|
pub fn renderError(self: *Tree, parse_error: *const Error, stream: var) !void {
|
||||||
return parse_error.render(self.tokens, stream);
|
return parse_error.render(self.token_ids, stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 {
|
pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 {
|
||||||
return self.tokenSlicePtr(self.tokens[token_index]);
|
return self.tokenSliceLoc(self.token_locs[token_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokenSlicePtr(self: *Tree, token: Token) []const u8 {
|
pub fn tokenSliceLoc(self: *Tree, token: Token.Loc) []const u8 {
|
||||||
return self.source[token.start..token.end];
|
return self.source[token.start..token.end];
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn getNodeSource(self: *const Tree, node: *const Node) []const u8 {
|
pub fn getNodeSource(self: *const Tree, node: *const Node) []const u8 {
|
||||||
const first_token = self.tokens[node.firstToken()];
|
const first_token = self.token_locs[node.firstToken()];
|
||||||
const last_token = self.tokens[node.lastToken()];
|
const last_token = self.token_locs[node.lastToken()];
|
||||||
return self.source[first_token.start..last_token.end];
|
return self.source[first_token.start..last_token.end];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +56,7 @@ pub const Tree = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// Return the Location of the token relative to the offset specified by `start_index`.
|
/// Return the Location of the token relative to the offset specified by `start_index`.
|
||||||
pub fn tokenLocationPtr(self: *Tree, start_index: usize, token: Token) Location {
|
pub fn tokenLocationLoc(self: *Tree, start_index: usize, token: Token.Loc) Location {
|
||||||
var loc = Location{
|
var loc = Location{
|
||||||
.line = 0,
|
.line = 0,
|
||||||
.column = 0,
|
.column = 0,
|
||||||
@ -82,14 +84,14 @@ pub const Tree = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location {
|
pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location {
|
||||||
return self.tokenLocationPtr(start_index, self.tokens[token_index]);
|
return self.tokenLocationLoc(start_index, self.token_locs[token_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
|
pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
|
||||||
return self.tokensOnSameLinePtr(self.tokens[token1_index], self.tokens[token2_index]);
|
return self.tokensOnSameLineLoc(self.token_locs[token1_index], self.token_locs[token2_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tokensOnSameLinePtr(self: *Tree, token1: Token, token2: Token) bool {
|
pub fn tokensOnSameLineLoc(self: *Tree, token1: Token.Loc, token2: Token.Loc) bool {
|
||||||
return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null;
|
return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,7 +102,7 @@ pub const Tree = struct {
|
|||||||
/// Skips over comments
|
/// Skips over comments
|
||||||
pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex {
|
pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex {
|
||||||
var index = token_index - 1;
|
var index = token_index - 1;
|
||||||
while (self.tokens[index].id == Token.Id.LineComment) {
|
while (self.token_ids[index] == Token.Id.LineComment) {
|
||||||
index -= 1;
|
index -= 1;
|
||||||
}
|
}
|
||||||
return index;
|
return index;
|
||||||
@ -109,7 +111,7 @@ pub const Tree = struct {
|
|||||||
/// Skips over comments
|
/// Skips over comments
|
||||||
pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex {
|
pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex {
|
||||||
var index = token_index + 1;
|
var index = token_index + 1;
|
||||||
while (self.tokens[index].id == Token.Id.LineComment) {
|
while (self.token_ids[index] == Token.Id.LineComment) {
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
return index;
|
return index;
|
||||||
@ -166,7 +168,7 @@ pub const Error = union(enum) {
|
|||||||
DeclBetweenFields: DeclBetweenFields,
|
DeclBetweenFields: DeclBetweenFields,
|
||||||
InvalidAnd: InvalidAnd,
|
InvalidAnd: InvalidAnd,
|
||||||
|
|
||||||
pub fn render(self: *const Error, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const Error, tokens: []const Token.Id, stream: var) !void {
|
||||||
switch (self.*) {
|
switch (self.*) {
|
||||||
.InvalidToken => |*x| return x.render(tokens, stream),
|
.InvalidToken => |*x| return x.render(tokens, stream),
|
||||||
.ExpectedContainerMembers => |*x| return x.render(tokens, stream),
|
.ExpectedContainerMembers => |*x| return x.render(tokens, stream),
|
||||||
@ -321,7 +323,7 @@ pub const Error = union(enum) {
|
|||||||
pub const ExpectedCall = struct {
|
pub const ExpectedCall = struct {
|
||||||
node: *Node,
|
node: *Node,
|
||||||
|
|
||||||
pub fn render(self: *const ExpectedCall, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: var) !void {
|
||||||
return stream.print("expected " ++ @tagName(Node.Id.Call) ++ ", found {}", .{
|
return stream.print("expected " ++ @tagName(Node.Id.Call) ++ ", found {}", .{
|
||||||
@tagName(self.node.id),
|
@tagName(self.node.id),
|
||||||
});
|
});
|
||||||
@ -331,7 +333,7 @@ pub const Error = union(enum) {
|
|||||||
pub const ExpectedCallOrFnProto = struct {
|
pub const ExpectedCallOrFnProto = struct {
|
||||||
node: *Node,
|
node: *Node,
|
||||||
|
|
||||||
pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: var) !void {
|
||||||
return stream.print("expected " ++ @tagName(Node.Id.Call) ++ " or " ++
|
return stream.print("expected " ++ @tagName(Node.Id.Call) ++ " or " ++
|
||||||
@tagName(Node.Id.FnProto) ++ ", found {}", .{@tagName(self.node.id)});
|
@tagName(Node.Id.FnProto) ++ ", found {}", .{@tagName(self.node.id)});
|
||||||
}
|
}
|
||||||
@ -341,14 +343,14 @@ pub const Error = union(enum) {
|
|||||||
token: TokenIndex,
|
token: TokenIndex,
|
||||||
expected_id: Token.Id,
|
expected_id: Token.Id,
|
||||||
|
|
||||||
pub fn render(self: *const ExpectedToken, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: var) !void {
|
||||||
const found_token = tokens[self.token];
|
const found_token = tokens[self.token];
|
||||||
switch (found_token.id) {
|
switch (found_token) {
|
||||||
.Invalid => {
|
.Invalid => {
|
||||||
return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()});
|
return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()});
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
const token_name = found_token.id.symbol();
|
const token_name = found_token.symbol();
|
||||||
return stream.print("expected '{}', found '{}'", .{ self.expected_id.symbol(), token_name });
|
return stream.print("expected '{}', found '{}'", .{ self.expected_id.symbol(), token_name });
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -359,11 +361,11 @@ pub const Error = union(enum) {
|
|||||||
token: TokenIndex,
|
token: TokenIndex,
|
||||||
end_id: Token.Id,
|
end_id: Token.Id,
|
||||||
|
|
||||||
pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: var) !void {
|
||||||
const actual_token = tokens[self.token];
|
const actual_token = tokens[self.token];
|
||||||
return stream.print("expected ',' or '{}', found '{}'", .{
|
return stream.print("expected ',' or '{}', found '{}'", .{
|
||||||
self.end_id.symbol(),
|
self.end_id.symbol(),
|
||||||
actual_token.id.symbol(),
|
actual_token.symbol(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -374,9 +376,9 @@ pub const Error = union(enum) {
|
|||||||
|
|
||||||
token: TokenIndex,
|
token: TokenIndex,
|
||||||
|
|
||||||
pub fn render(self: *const ThisError, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void {
|
||||||
const actual_token = tokens[self.token];
|
const actual_token = tokens[self.token];
|
||||||
return stream.print(msg, .{actual_token.id.symbol()});
|
return stream.print(msg, .{actual_token.symbol()});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -387,7 +389,7 @@ pub const Error = union(enum) {
|
|||||||
|
|
||||||
token: TokenIndex,
|
token: TokenIndex,
|
||||||
|
|
||||||
pub fn render(self: *const ThisError, tokens: []const Token, stream: var) !void {
|
pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void {
|
||||||
return stream.writeAll(msg);
|
return stream.writeAll(msg);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -16,28 +16,32 @@ pub const Error = error{ParseError} || Allocator.Error;
|
|||||||
pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!*Tree {
|
pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!*Tree {
|
||||||
// TODO optimization idea: ensureCapacity on the tokens list and
|
// TODO optimization idea: ensureCapacity on the tokens list and
|
||||||
// then appendAssumeCapacity inside the loop.
|
// then appendAssumeCapacity inside the loop.
|
||||||
var tokens = std.ArrayList(Token).init(gpa);
|
var token_ids = std.ArrayList(Token.Id).init(gpa);
|
||||||
defer tokens.deinit();
|
defer token_ids.deinit();
|
||||||
|
var token_locs = std.ArrayList(Token.Loc).init(gpa);
|
||||||
|
defer token_locs.deinit();
|
||||||
|
|
||||||
var tokenizer = std.zig.Tokenizer.init(source);
|
var tokenizer = std.zig.Tokenizer.init(source);
|
||||||
while (true) {
|
while (true) {
|
||||||
const tree_token = try tokens.addOne();
|
const token = tokenizer.next();
|
||||||
tree_token.* = tokenizer.next();
|
try token_ids.append(token.id);
|
||||||
if (tree_token.id == .Eof) break;
|
try token_locs.append(token.loc);
|
||||||
|
if (token.id == .Eof) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
var parser: Parser = .{
|
var parser: Parser = .{
|
||||||
.source = source,
|
.source = source,
|
||||||
.arena = std.heap.ArenaAllocator.init(gpa),
|
.arena = std.heap.ArenaAllocator.init(gpa),
|
||||||
.gpa = gpa,
|
.gpa = gpa,
|
||||||
.tokens = tokens.items,
|
.token_ids = token_ids.items,
|
||||||
|
.token_locs = token_locs.items,
|
||||||
.errors = .{},
|
.errors = .{},
|
||||||
.tok_i = 0,
|
.tok_i = 0,
|
||||||
};
|
};
|
||||||
defer parser.errors.deinit(gpa);
|
defer parser.errors.deinit(gpa);
|
||||||
errdefer parser.arena.deinit();
|
errdefer parser.arena.deinit();
|
||||||
|
|
||||||
while (tokens.items[parser.tok_i].id == .LineComment) parser.tok_i += 1;
|
while (token_ids.items[parser.tok_i] == .LineComment) parser.tok_i += 1;
|
||||||
|
|
||||||
const root_node = try parser.parseRoot();
|
const root_node = try parser.parseRoot();
|
||||||
|
|
||||||
@ -45,7 +49,8 @@ pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!*Tree {
|
|||||||
tree.* = .{
|
tree.* = .{
|
||||||
.gpa = gpa,
|
.gpa = gpa,
|
||||||
.source = source,
|
.source = source,
|
||||||
.tokens = tokens.toOwnedSlice(),
|
.token_ids = token_ids.toOwnedSlice(),
|
||||||
|
.token_locs = token_locs.toOwnedSlice(),
|
||||||
.errors = parser.errors.toOwnedSlice(gpa),
|
.errors = parser.errors.toOwnedSlice(gpa),
|
||||||
.root_node = root_node,
|
.root_node = root_node,
|
||||||
.arena = parser.arena.state,
|
.arena = parser.arena.state,
|
||||||
@ -58,9 +63,8 @@ const Parser = struct {
|
|||||||
arena: std.heap.ArenaAllocator,
|
arena: std.heap.ArenaAllocator,
|
||||||
gpa: *Allocator,
|
gpa: *Allocator,
|
||||||
source: []const u8,
|
source: []const u8,
|
||||||
/// TODO: Optimization idea: have this be several arrays of the token fields rather
|
token_ids: []const Token.Id,
|
||||||
/// than an array of structs.
|
token_locs: []const Token.Loc,
|
||||||
tokens: []const Token,
|
|
||||||
tok_i: TokenIndex,
|
tok_i: TokenIndex,
|
||||||
errors: std.ArrayListUnmanaged(AstError),
|
errors: std.ArrayListUnmanaged(AstError),
|
||||||
|
|
||||||
@ -80,19 +84,6 @@ const Parser = struct {
|
|||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper function for appending elements to a singly linked list.
|
|
||||||
fn llpush(
|
|
||||||
p: *Parser,
|
|
||||||
comptime T: type,
|
|
||||||
it: *?*std.SinglyLinkedList(T).Node,
|
|
||||||
data: T,
|
|
||||||
) !*?*std.SinglyLinkedList(T).Node {
|
|
||||||
const llnode = try p.arena.allocator.create(std.SinglyLinkedList(T).Node);
|
|
||||||
llnode.* = .{ .data = data };
|
|
||||||
it.* = llnode;
|
|
||||||
return &llnode.next;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// ContainerMembers
|
/// ContainerMembers
|
||||||
/// <- TestDecl ContainerMembers
|
/// <- TestDecl ContainerMembers
|
||||||
/// / TopLevelComptime ContainerMembers
|
/// / TopLevelComptime ContainerMembers
|
||||||
@ -228,7 +219,7 @@ const Parser = struct {
|
|||||||
// try to continue parsing
|
// try to continue parsing
|
||||||
const index = p.tok_i;
|
const index = p.tok_i;
|
||||||
p.findNextContainerMember();
|
p.findNextContainerMember();
|
||||||
const next = p.tokens[p.tok_i].id;
|
const next = p.token_ids[p.tok_i];
|
||||||
switch (next) {
|
switch (next) {
|
||||||
.Eof => break,
|
.Eof => break,
|
||||||
else => {
|
else => {
|
||||||
@ -257,7 +248,7 @@ const Parser = struct {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const next = p.tokens[p.tok_i].id;
|
const next = p.token_ids[p.tok_i];
|
||||||
switch (next) {
|
switch (next) {
|
||||||
.Eof => break,
|
.Eof => break,
|
||||||
.Keyword_comptime => {
|
.Keyword_comptime => {
|
||||||
@ -291,7 +282,7 @@ const Parser = struct {
|
|||||||
var level: u32 = 0;
|
var level: u32 = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
const tok = p.nextToken();
|
const tok = p.nextToken();
|
||||||
switch (tok.ptr.id) {
|
switch (p.token_ids[tok]) {
|
||||||
// any of these can start a new top level declaration
|
// any of these can start a new top level declaration
|
||||||
.Keyword_test,
|
.Keyword_test,
|
||||||
.Keyword_comptime,
|
.Keyword_comptime,
|
||||||
@ -308,7 +299,7 @@ const Parser = struct {
|
|||||||
.Identifier,
|
.Identifier,
|
||||||
=> {
|
=> {
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
p.putBackToken(tok.index);
|
p.putBackToken(tok);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -325,13 +316,13 @@ const Parser = struct {
|
|||||||
.RBrace => {
|
.RBrace => {
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
// end of container, exit
|
// end of container, exit
|
||||||
p.putBackToken(tok.index);
|
p.putBackToken(tok);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
level -= 1;
|
level -= 1;
|
||||||
},
|
},
|
||||||
.Eof => {
|
.Eof => {
|
||||||
p.putBackToken(tok.index);
|
p.putBackToken(tok);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
@ -344,11 +335,11 @@ const Parser = struct {
|
|||||||
var level: u32 = 0;
|
var level: u32 = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
const tok = p.nextToken();
|
const tok = p.nextToken();
|
||||||
switch (tok.ptr.id) {
|
switch (p.token_ids[tok]) {
|
||||||
.LBrace => level += 1,
|
.LBrace => level += 1,
|
||||||
.RBrace => {
|
.RBrace => {
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
p.putBackToken(tok.index);
|
p.putBackToken(tok);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
level -= 1;
|
level -= 1;
|
||||||
@ -359,7 +350,7 @@ const Parser = struct {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Eof => {
|
.Eof => {
|
||||||
p.putBackToken(tok.index);
|
p.putBackToken(tok);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
@ -454,8 +445,8 @@ const Parser = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (extern_export_inline_token) |token| {
|
if (extern_export_inline_token) |token| {
|
||||||
if (p.tokens[token].id == .Keyword_inline or
|
if (p.token_ids[token] == .Keyword_inline or
|
||||||
p.tokens[token].id == .Keyword_noinline)
|
p.token_ids[token] == .Keyword_noinline)
|
||||||
{
|
{
|
||||||
try p.errors.append(p.gpa, .{
|
try p.errors.append(p.gpa, .{
|
||||||
.ExpectedFn = .{ .token = p.tok_i },
|
.ExpectedFn = .{ .token = p.tok_i },
|
||||||
@ -722,7 +713,7 @@ const Parser = struct {
|
|||||||
|
|
||||||
const defer_token = p.eatToken(.Keyword_defer) orelse p.eatToken(.Keyword_errdefer);
|
const defer_token = p.eatToken(.Keyword_defer) orelse p.eatToken(.Keyword_errdefer);
|
||||||
if (defer_token) |token| {
|
if (defer_token) |token| {
|
||||||
const payload = if (p.tokens[token].id == .Keyword_errdefer)
|
const payload = if (p.token_ids[token] == .Keyword_errdefer)
|
||||||
try p.parsePayload()
|
try p.parsePayload()
|
||||||
else
|
else
|
||||||
null;
|
null;
|
||||||
@ -2269,7 +2260,7 @@ const Parser = struct {
|
|||||||
/// / EQUAL
|
/// / EQUAL
|
||||||
fn parseAssignOp(p: *Parser) !?*Node {
|
fn parseAssignOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.AsteriskEqual => .AssignMul,
|
.AsteriskEqual => .AssignMul,
|
||||||
.SlashEqual => .AssignDiv,
|
.SlashEqual => .AssignDiv,
|
||||||
.PercentEqual => .AssignMod,
|
.PercentEqual => .AssignMod,
|
||||||
@ -2285,14 +2276,14 @@ const Parser = struct {
|
|||||||
.MinusPercentEqual => .AssignSubWrap,
|
.MinusPercentEqual => .AssignSubWrap,
|
||||||
.Equal => .Assign,
|
.Equal => .Assign,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const node = try p.arena.allocator.create(Node.InfixOp);
|
const node = try p.arena.allocator.create(Node.InfixOp);
|
||||||
node.* = .{
|
node.* = .{
|
||||||
.op_token = token.index,
|
.op_token = token,
|
||||||
.lhs = undefined, // set by caller
|
.lhs = undefined, // set by caller
|
||||||
.op = op,
|
.op = op,
|
||||||
.rhs = undefined, // set by caller
|
.rhs = undefined, // set by caller
|
||||||
@ -2309,7 +2300,7 @@ const Parser = struct {
|
|||||||
/// / RARROWEQUAL
|
/// / RARROWEQUAL
|
||||||
fn parseCompareOp(p: *Parser) !?*Node {
|
fn parseCompareOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.EqualEqual => .EqualEqual,
|
.EqualEqual => .EqualEqual,
|
||||||
.BangEqual => .BangEqual,
|
.BangEqual => .BangEqual,
|
||||||
.AngleBracketLeft => .LessThan,
|
.AngleBracketLeft => .LessThan,
|
||||||
@ -2317,12 +2308,12 @@ const Parser = struct {
|
|||||||
.AngleBracketLeftEqual => .LessOrEqual,
|
.AngleBracketLeftEqual => .LessOrEqual,
|
||||||
.AngleBracketRightEqual => .GreaterOrEqual,
|
.AngleBracketRightEqual => .GreaterOrEqual,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return p.createInfixOp(token.index, op);
|
return p.createInfixOp(token, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// BitwiseOp
|
/// BitwiseOp
|
||||||
@ -2333,19 +2324,19 @@ const Parser = struct {
|
|||||||
/// / KEYWORD_catch Payload?
|
/// / KEYWORD_catch Payload?
|
||||||
fn parseBitwiseOp(p: *Parser) !?*Node {
|
fn parseBitwiseOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.Ampersand => .BitAnd,
|
.Ampersand => .BitAnd,
|
||||||
.Caret => .BitXor,
|
.Caret => .BitXor,
|
||||||
.Pipe => .BitOr,
|
.Pipe => .BitOr,
|
||||||
.Keyword_orelse => .UnwrapOptional,
|
.Keyword_orelse => .UnwrapOptional,
|
||||||
.Keyword_catch => .{ .Catch = try p.parsePayload() },
|
.Keyword_catch => .{ .Catch = try p.parsePayload() },
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return p.createInfixOp(token.index, op);
|
return p.createInfixOp(token, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// BitShiftOp
|
/// BitShiftOp
|
||||||
@ -2353,16 +2344,16 @@ const Parser = struct {
|
|||||||
/// / RARROW2
|
/// / RARROW2
|
||||||
fn parseBitShiftOp(p: *Parser) !?*Node {
|
fn parseBitShiftOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.AngleBracketAngleBracketLeft => .BitShiftLeft,
|
.AngleBracketAngleBracketLeft => .BitShiftLeft,
|
||||||
.AngleBracketAngleBracketRight => .BitShiftRight,
|
.AngleBracketAngleBracketRight => .BitShiftRight,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return p.createInfixOp(token.index, op);
|
return p.createInfixOp(token, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// AdditionOp
|
/// AdditionOp
|
||||||
@ -2373,19 +2364,19 @@ const Parser = struct {
|
|||||||
/// / MINUSPERCENT
|
/// / MINUSPERCENT
|
||||||
fn parseAdditionOp(p: *Parser) !?*Node {
|
fn parseAdditionOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.Plus => .Add,
|
.Plus => .Add,
|
||||||
.Minus => .Sub,
|
.Minus => .Sub,
|
||||||
.PlusPlus => .ArrayCat,
|
.PlusPlus => .ArrayCat,
|
||||||
.PlusPercent => .AddWrap,
|
.PlusPercent => .AddWrap,
|
||||||
.MinusPercent => .SubWrap,
|
.MinusPercent => .SubWrap,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return p.createInfixOp(token.index, op);
|
return p.createInfixOp(token, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// MultiplyOp
|
/// MultiplyOp
|
||||||
@ -2397,7 +2388,7 @@ const Parser = struct {
|
|||||||
/// / ASTERISKPERCENT
|
/// / ASTERISKPERCENT
|
||||||
fn parseMultiplyOp(p: *Parser) !?*Node {
|
fn parseMultiplyOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.InfixOp.Op = switch (token.ptr.id) {
|
const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
|
||||||
.PipePipe => .MergeErrorSets,
|
.PipePipe => .MergeErrorSets,
|
||||||
.Asterisk => .Mul,
|
.Asterisk => .Mul,
|
||||||
.Slash => .Div,
|
.Slash => .Div,
|
||||||
@ -2405,12 +2396,12 @@ const Parser = struct {
|
|||||||
.AsteriskAsterisk => .ArrayMult,
|
.AsteriskAsterisk => .ArrayMult,
|
||||||
.AsteriskPercent => .MulWrap,
|
.AsteriskPercent => .MulWrap,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return p.createInfixOp(token.index, op);
|
return p.createInfixOp(token, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// PrefixOp
|
/// PrefixOp
|
||||||
@ -2423,7 +2414,7 @@ const Parser = struct {
|
|||||||
/// / KEYWORD_await
|
/// / KEYWORD_await
|
||||||
fn parsePrefixOp(p: *Parser) !?*Node {
|
fn parsePrefixOp(p: *Parser) !?*Node {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
const op: Node.PrefixOp.Op = switch (token.ptr.id) {
|
const op: Node.PrefixOp.Op = switch (p.token_ids[token]) {
|
||||||
.Bang => .BoolNot,
|
.Bang => .BoolNot,
|
||||||
.Minus => .Negation,
|
.Minus => .Negation,
|
||||||
.Tilde => .BitNot,
|
.Tilde => .BitNot,
|
||||||
@ -2432,14 +2423,14 @@ const Parser = struct {
|
|||||||
.Keyword_try => .Try,
|
.Keyword_try => .Try,
|
||||||
.Keyword_await => .Await,
|
.Keyword_await => .Await,
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const node = try p.arena.allocator.create(Node.PrefixOp);
|
const node = try p.arena.allocator.create(Node.PrefixOp);
|
||||||
node.* = .{
|
node.* = .{
|
||||||
.op_token = token.index,
|
.op_token = token,
|
||||||
.op = op,
|
.op = op,
|
||||||
.rhs = undefined, // set by caller
|
.rhs = undefined, // set by caller
|
||||||
};
|
};
|
||||||
@ -2493,7 +2484,7 @@ const Parser = struct {
|
|||||||
// If the token encountered was **, there will be two nodes instead of one.
|
// If the token encountered was **, there will be two nodes instead of one.
|
||||||
// The attributes should be applied to the rightmost operator.
|
// The attributes should be applied to the rightmost operator.
|
||||||
const prefix_op = node.cast(Node.PrefixOp).?;
|
const prefix_op = node.cast(Node.PrefixOp).?;
|
||||||
var ptr_info = if (p.tokens[prefix_op.op_token].id == .AsteriskAsterisk)
|
var ptr_info = if (p.token_ids[prefix_op.op_token] == .AsteriskAsterisk)
|
||||||
&prefix_op.rhs.cast(Node.PrefixOp).?.op.PtrType
|
&prefix_op.rhs.cast(Node.PrefixOp).?.op.PtrType
|
||||||
else
|
else
|
||||||
&prefix_op.op.PtrType;
|
&prefix_op.op.PtrType;
|
||||||
@ -2812,7 +2803,8 @@ const Parser = struct {
|
|||||||
return null;
|
return null;
|
||||||
};
|
};
|
||||||
if (p.eatToken(.Identifier)) |ident| {
|
if (p.eatToken(.Identifier)) |ident| {
|
||||||
const token_slice = p.source[p.tokens[ident].start..p.tokens[ident].end];
|
const token_loc = p.token_locs[ident];
|
||||||
|
const token_slice = p.source[token_loc.start..token_loc.end];
|
||||||
if (!std.mem.eql(u8, token_slice, "c")) {
|
if (!std.mem.eql(u8, token_slice, "c")) {
|
||||||
p.putBackToken(ident);
|
p.putBackToken(ident);
|
||||||
} else {
|
} else {
|
||||||
@ -2879,7 +2871,7 @@ const Parser = struct {
|
|||||||
fn parseContainerDeclType(p: *Parser) !?ContainerDeclType {
|
fn parseContainerDeclType(p: *Parser) !?ContainerDeclType {
|
||||||
const kind_token = p.nextToken();
|
const kind_token = p.nextToken();
|
||||||
|
|
||||||
const init_arg_expr = switch (kind_token.ptr.id) {
|
const init_arg_expr = switch (p.token_ids[kind_token]) {
|
||||||
.Keyword_struct => Node.ContainerDecl.InitArg{ .None = {} },
|
.Keyword_struct => Node.ContainerDecl.InitArg{ .None = {} },
|
||||||
.Keyword_enum => blk: {
|
.Keyword_enum => blk: {
|
||||||
if (p.eatToken(.LParen) != null) {
|
if (p.eatToken(.LParen) != null) {
|
||||||
@ -2914,13 +2906,13 @@ const Parser = struct {
|
|||||||
break :blk Node.ContainerDecl.InitArg{ .None = {} };
|
break :blk Node.ContainerDecl.InitArg{ .None = {} };
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
p.putBackToken(kind_token.index);
|
p.putBackToken(kind_token);
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return ContainerDeclType{
|
return ContainerDeclType{
|
||||||
.kind_token = kind_token.index,
|
.kind_token = kind_token,
|
||||||
.init_arg_expr = init_arg_expr,
|
.init_arg_expr = init_arg_expr,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -2973,7 +2965,7 @@ const Parser = struct {
|
|||||||
while (try nodeParseFn(p)) |item| {
|
while (try nodeParseFn(p)) |item| {
|
||||||
try list.append(item);
|
try list.append(item);
|
||||||
|
|
||||||
switch (p.tokens[p.tok_i].id) {
|
switch (p.token_ids[p.tok_i]) {
|
||||||
.Comma => _ = p.nextToken(),
|
.Comma => _ = p.nextToken(),
|
||||||
// all possible delimiters
|
// all possible delimiters
|
||||||
.Colon, .RParen, .RBrace, .RBracket => break,
|
.Colon, .RParen, .RBrace, .RBracket => break,
|
||||||
@ -2994,13 +2986,13 @@ const Parser = struct {
|
|||||||
fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.InfixOp.Op) NodeParseFn {
|
fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.InfixOp.Op) NodeParseFn {
|
||||||
return struct {
|
return struct {
|
||||||
pub fn parse(p: *Parser) Error!?*Node {
|
pub fn parse(p: *Parser) Error!?*Node {
|
||||||
const op_token = if (token == .Keyword_and) switch (p.tokens[p.tok_i].id) {
|
const op_token = if (token == .Keyword_and) switch (p.token_ids[p.tok_i]) {
|
||||||
.Keyword_and => p.nextToken().index,
|
.Keyword_and => p.nextToken(),
|
||||||
.Invalid_ampersands => blk: {
|
.Invalid_ampersands => blk: {
|
||||||
try p.errors.append(p.gpa, .{
|
try p.errors.append(p.gpa, .{
|
||||||
.InvalidAnd = .{ .token = p.tok_i },
|
.InvalidAnd = .{ .token = p.tok_i },
|
||||||
});
|
});
|
||||||
break :blk p.nextToken().index;
|
break :blk p.nextToken();
|
||||||
},
|
},
|
||||||
else => return null,
|
else => return null,
|
||||||
} else p.eatToken(token) orelse return null;
|
} else p.eatToken(token) orelse return null;
|
||||||
@ -3104,7 +3096,7 @@ const Parser = struct {
|
|||||||
var tok_i = start_tok_i;
|
var tok_i = start_tok_i;
|
||||||
var count: usize = 1; // including first_line
|
var count: usize = 1; // including first_line
|
||||||
while (true) : (tok_i += 1) {
|
while (true) : (tok_i += 1) {
|
||||||
switch (p.tokens[tok_i].id) {
|
switch (p.token_ids[tok_i]) {
|
||||||
.LineComment => continue,
|
.LineComment => continue,
|
||||||
.MultilineStringLiteralLine => count += 1,
|
.MultilineStringLiteralLine => count += 1,
|
||||||
else => break,
|
else => break,
|
||||||
@ -3118,7 +3110,7 @@ const Parser = struct {
|
|||||||
lines[0] = first_line;
|
lines[0] = first_line;
|
||||||
count = 1;
|
count = 1;
|
||||||
while (true) : (tok_i += 1) {
|
while (true) : (tok_i += 1) {
|
||||||
switch (p.tokens[tok_i].id) {
|
switch (p.token_ids[tok_i]) {
|
||||||
.LineComment => continue,
|
.LineComment => continue,
|
||||||
.MultilineStringLiteralLine => {
|
.MultilineStringLiteralLine => {
|
||||||
lines[count] = tok_i;
|
lines[count] = tok_i;
|
||||||
@ -3215,7 +3207,7 @@ const Parser = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn tokensOnSameLine(p: *Parser, token1: TokenIndex, token2: TokenIndex) bool {
|
fn tokensOnSameLine(p: *Parser, token1: TokenIndex, token2: TokenIndex) bool {
|
||||||
return std.mem.indexOfScalar(u8, p.source[p.tokens[token1].end..p.tokens[token2].start], '\n') == null;
|
return std.mem.indexOfScalar(u8, p.source[p.token_locs[token1].end..p.token_locs[token2].start], '\n') == null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Eat a single-line doc comment on the same line as another node
|
/// Eat a single-line doc comment on the same line as another node
|
||||||
@ -3239,7 +3231,7 @@ const Parser = struct {
|
|||||||
.PrefixOp => {
|
.PrefixOp => {
|
||||||
var prefix_op = rightmost_op.cast(Node.PrefixOp).?;
|
var prefix_op = rightmost_op.cast(Node.PrefixOp).?;
|
||||||
// If the token encountered was **, there will be two nodes
|
// If the token encountered was **, there will be two nodes
|
||||||
if (p.tokens[prefix_op.op_token].id == .AsteriskAsterisk) {
|
if (p.token_ids[prefix_op.op_token] == .AsteriskAsterisk) {
|
||||||
rightmost_op = prefix_op.rhs;
|
rightmost_op = prefix_op.rhs;
|
||||||
prefix_op = rightmost_op.cast(Node.PrefixOp).?;
|
prefix_op = rightmost_op.cast(Node.PrefixOp).?;
|
||||||
}
|
}
|
||||||
@ -3328,11 +3320,7 @@ const Parser = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn eatToken(p: *Parser, id: Token.Id) ?TokenIndex {
|
fn eatToken(p: *Parser, id: Token.Id) ?TokenIndex {
|
||||||
return if (p.eatAnnotatedToken(id)) |token| token.index else null;
|
return if (p.token_ids[p.tok_i] == id) p.nextToken() else null;
|
||||||
}
|
|
||||||
|
|
||||||
fn eatAnnotatedToken(p: *Parser, id: Token.Id) ?AnnotatedToken {
|
|
||||||
return if (p.tokens[p.tok_i].id == id) p.nextToken() else null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expectToken(p: *Parser, id: Token.Id) Error!TokenIndex {
|
fn expectToken(p: *Parser, id: Token.Id) Error!TokenIndex {
|
||||||
@ -3341,29 +3329,25 @@ const Parser = struct {
|
|||||||
|
|
||||||
fn expectTokenRecoverable(p: *Parser, id: Token.Id) !?TokenIndex {
|
fn expectTokenRecoverable(p: *Parser, id: Token.Id) !?TokenIndex {
|
||||||
const token = p.nextToken();
|
const token = p.nextToken();
|
||||||
if (token.ptr.id != id) {
|
if (p.token_ids[token] != id) {
|
||||||
try p.errors.append(p.gpa, .{
|
try p.errors.append(p.gpa, .{
|
||||||
.ExpectedToken = .{ .token = token.index, .expected_id = id },
|
.ExpectedToken = .{ .token = token, .expected_id = id },
|
||||||
});
|
});
|
||||||
// go back so that we can recover properly
|
// go back so that we can recover properly
|
||||||
p.putBackToken(token.index);
|
p.putBackToken(token);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return token.index;
|
return token;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn nextToken(p: *Parser) AnnotatedToken {
|
fn nextToken(p: *Parser) TokenIndex {
|
||||||
const result = AnnotatedToken{
|
const result = p.tok_i;
|
||||||
.index = p.tok_i,
|
|
||||||
.ptr = &p.tokens[p.tok_i],
|
|
||||||
};
|
|
||||||
p.tok_i += 1;
|
p.tok_i += 1;
|
||||||
assert(result.ptr.id != .LineComment);
|
assert(p.token_ids[result] != .LineComment);
|
||||||
if (p.tok_i >= p.tokens.len) return result;
|
if (p.tok_i >= p.token_ids.len) return result;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
const next_tok = p.tokens[p.tok_i];
|
if (p.token_ids[p.tok_i] != .LineComment) return result;
|
||||||
if (next_tok.id != .LineComment) return result;
|
|
||||||
p.tok_i += 1;
|
p.tok_i += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3371,18 +3355,12 @@ const Parser = struct {
|
|||||||
fn putBackToken(p: *Parser, putting_back: TokenIndex) void {
|
fn putBackToken(p: *Parser, putting_back: TokenIndex) void {
|
||||||
while (p.tok_i > 0) {
|
while (p.tok_i > 0) {
|
||||||
p.tok_i -= 1;
|
p.tok_i -= 1;
|
||||||
const prev_tok = p.tokens[p.tok_i];
|
if (p.token_ids[p.tok_i] == .LineComment) continue;
|
||||||
if (prev_tok.id == .LineComment) continue;
|
|
||||||
assert(putting_back == p.tok_i);
|
assert(putting_back == p.tok_i);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const AnnotatedToken = struct {
|
|
||||||
index: TokenIndex,
|
|
||||||
ptr: *const Token,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn expectNode(
|
fn expectNode(
|
||||||
p: *Parser,
|
p: *Parser,
|
||||||
parseFn: NodeParseFn,
|
parseFn: NodeParseFn,
|
||||||
|
@ -3181,7 +3181,7 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b
|
|||||||
defer tree.deinit();
|
defer tree.deinit();
|
||||||
|
|
||||||
for (tree.errors) |*parse_error| {
|
for (tree.errors) |*parse_error| {
|
||||||
const token = tree.tokens[parse_error.loc()];
|
const token = tree.token_locs[parse_error.loc()];
|
||||||
const loc = tree.tokenLocation(0, parse_error.loc());
|
const loc = tree.tokenLocation(0, parse_error.loc());
|
||||||
try stderr.print("(memory buffer):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 });
|
try stderr.print("(memory buffer):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 });
|
||||||
try tree.renderError(parse_error, stderr);
|
try tree.renderError(parse_error, stderr);
|
||||||
|
@ -68,11 +68,12 @@ fn renderRoot(
|
|||||||
tree: *ast.Tree,
|
tree: *ast.Tree,
|
||||||
) (@TypeOf(stream).Error || Error)!void {
|
) (@TypeOf(stream).Error || Error)!void {
|
||||||
// render all the line comments at the beginning of the file
|
// render all the line comments at the beginning of the file
|
||||||
for (tree.tokens) |token, i| {
|
for (tree.token_ids) |token_id, i| {
|
||||||
if (token.id != .LineComment) break;
|
if (token_id != .LineComment) break;
|
||||||
try stream.print("{}\n", .{mem.trimRight(u8, tree.tokenSlicePtr(token), " ")});
|
const token_loc = tree.token_locs[i];
|
||||||
const next_token = &tree.tokens[i + 1];
|
try stream.print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")});
|
||||||
const loc = tree.tokenLocationPtr(token.end, next_token.*);
|
const next_token = tree.token_locs[i + 1];
|
||||||
|
const loc = tree.tokenLocationLoc(token_loc.end, next_token);
|
||||||
if (loc.line >= 2) {
|
if (loc.line >= 2) {
|
||||||
try stream.writeByte('\n');
|
try stream.writeByte('\n');
|
||||||
}
|
}
|
||||||
@ -101,8 +102,8 @@ fn renderRoot(
|
|||||||
|
|
||||||
while (token_index != 0) {
|
while (token_index != 0) {
|
||||||
token_index -= 1;
|
token_index -= 1;
|
||||||
const token = tree.tokens[token_index];
|
const token_id = tree.token_ids[token_index];
|
||||||
switch (token.id) {
|
switch (token_id) {
|
||||||
.LineComment => {},
|
.LineComment => {},
|
||||||
.DocComment => {
|
.DocComment => {
|
||||||
copy_start_token_index = token_index;
|
copy_start_token_index = token_index;
|
||||||
@ -111,12 +112,13 @@ fn renderRoot(
|
|||||||
else => break,
|
else => break,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) {
|
const token_loc = tree.token_locs[token_index];
|
||||||
|
if (mem.eql(u8, mem.trim(u8, tree.tokenSliceLoc(token_loc)[2..], " "), "zig fmt: off")) {
|
||||||
if (!found_fmt_directive) {
|
if (!found_fmt_directive) {
|
||||||
fmt_active = false;
|
fmt_active = false;
|
||||||
found_fmt_directive = true;
|
found_fmt_directive = true;
|
||||||
}
|
}
|
||||||
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) {
|
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSliceLoc(token_loc)[2..], " "), "zig fmt: on")) {
|
||||||
if (!found_fmt_directive) {
|
if (!found_fmt_directive) {
|
||||||
fmt_active = true;
|
fmt_active = true;
|
||||||
found_fmt_directive = true;
|
found_fmt_directive = true;
|
||||||
@ -135,7 +137,7 @@ fn renderRoot(
|
|||||||
if (decl_i >= root_decls.len) {
|
if (decl_i >= root_decls.len) {
|
||||||
// If there's no next reformatted `decl`, just copy the
|
// If there's no next reformatted `decl`, just copy the
|
||||||
// remaining input tokens and bail out.
|
// remaining input tokens and bail out.
|
||||||
const start = tree.tokens[copy_start_token_index].start;
|
const start = tree.token_locs[copy_start_token_index].start;
|
||||||
try copyFixingWhitespace(stream, tree.source[start..]);
|
try copyFixingWhitespace(stream, tree.source[start..]);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -143,15 +145,16 @@ fn renderRoot(
|
|||||||
var decl_first_token_index = decl.firstToken();
|
var decl_first_token_index = decl.firstToken();
|
||||||
|
|
||||||
while (token_index < decl_first_token_index) : (token_index += 1) {
|
while (token_index < decl_first_token_index) : (token_index += 1) {
|
||||||
const token = tree.tokens[token_index];
|
const token_id = tree.token_ids[token_index];
|
||||||
switch (token.id) {
|
switch (token_id) {
|
||||||
.LineComment => {},
|
.LineComment => {},
|
||||||
.Eof => unreachable,
|
.Eof => unreachable,
|
||||||
else => continue,
|
else => continue,
|
||||||
}
|
}
|
||||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) {
|
const token_loc = tree.token_locs[token_index];
|
||||||
|
if (mem.eql(u8, mem.trim(u8, tree.tokenSliceLoc(token_loc)[2..], " "), "zig fmt: on")) {
|
||||||
fmt_active = true;
|
fmt_active = true;
|
||||||
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) {
|
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSliceLoc(token_loc)[2..], " "), "zig fmt: off")) {
|
||||||
fmt_active = false;
|
fmt_active = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,8 +166,8 @@ fn renderRoot(
|
|||||||
token_index = copy_end_token_index;
|
token_index = copy_end_token_index;
|
||||||
while (token_index != 0) {
|
while (token_index != 0) {
|
||||||
token_index -= 1;
|
token_index -= 1;
|
||||||
const token = tree.tokens[token_index];
|
const token_id = tree.token_ids[token_index];
|
||||||
switch (token.id) {
|
switch (token_id) {
|
||||||
.LineComment => {},
|
.LineComment => {},
|
||||||
.DocComment => {
|
.DocComment => {
|
||||||
copy_end_token_index = token_index;
|
copy_end_token_index = token_index;
|
||||||
@ -174,8 +177,8 @@ fn renderRoot(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const start = tree.tokens[copy_start_token_index].start;
|
const start = tree.token_locs[copy_start_token_index].start;
|
||||||
const end = tree.tokens[copy_end_token_index].start;
|
const end = tree.token_locs[copy_end_token_index].start;
|
||||||
try copyFixingWhitespace(stream, tree.source[start..end]);
|
try copyFixingWhitespace(stream, tree.source[start..end]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,13 +197,13 @@ fn renderExtraNewlineToken(tree: *ast.Tree, stream: var, start_col: *usize, firs
|
|||||||
var prev_token = first_token;
|
var prev_token = first_token;
|
||||||
if (prev_token == 0) return;
|
if (prev_token == 0) return;
|
||||||
var newline_threshold: usize = 2;
|
var newline_threshold: usize = 2;
|
||||||
while (tree.tokens[prev_token - 1].id == .DocComment) {
|
while (tree.token_ids[prev_token - 1] == .DocComment) {
|
||||||
if (tree.tokenLocation(tree.tokens[prev_token - 1].end, prev_token).line == 1) {
|
if (tree.tokenLocation(tree.token_locs[prev_token - 1].end, prev_token).line == 1) {
|
||||||
newline_threshold += 1;
|
newline_threshold += 1;
|
||||||
}
|
}
|
||||||
prev_token -= 1;
|
prev_token -= 1;
|
||||||
}
|
}
|
||||||
const prev_token_end = tree.tokens[prev_token - 1].end;
|
const prev_token_end = tree.token_locs[prev_token - 1].end;
|
||||||
const loc = tree.tokenLocation(prev_token_end, first_token);
|
const loc = tree.tokenLocation(prev_token_end, first_token);
|
||||||
if (loc.line >= newline_threshold) {
|
if (loc.line >= newline_threshold) {
|
||||||
try stream.writeByte('\n');
|
try stream.writeByte('\n');
|
||||||
@ -265,7 +268,7 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree,
|
|||||||
|
|
||||||
const src_has_trailing_comma = blk: {
|
const src_has_trailing_comma = blk: {
|
||||||
const maybe_comma = tree.nextToken(field.lastToken());
|
const maybe_comma = tree.nextToken(field.lastToken());
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
// The trailing comma is emitted at the end, but if it's not present
|
// The trailing comma is emitted at the end, but if it's not present
|
||||||
@ -327,11 +330,11 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree,
|
|||||||
|
|
||||||
.DocComment => {
|
.DocComment => {
|
||||||
const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl);
|
const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl);
|
||||||
const kind = tree.tokens[comment.first_line].id;
|
const kind = tree.token_ids[comment.first_line];
|
||||||
try renderToken(tree, stream, comment.first_line, indent, start_col, .Newline);
|
try renderToken(tree, stream, comment.first_line, indent, start_col, .Newline);
|
||||||
var tok_i = comment.first_line + 1;
|
var tok_i = comment.first_line + 1;
|
||||||
while (true) : (tok_i += 1) {
|
while (true) : (tok_i += 1) {
|
||||||
const tok_id = tree.tokens[tok_i].id;
|
const tok_id = tree.token_ids[tok_i];
|
||||||
if (tok_id == kind) {
|
if (tok_id == kind) {
|
||||||
try stream.writeByteNTimes(' ', indent);
|
try stream.writeByteNTimes(' ', indent);
|
||||||
try renderToken(tree, stream, tok_i, indent, start_col, .Newline);
|
try renderToken(tree, stream, tok_i, indent, start_col, .Newline);
|
||||||
@ -436,13 +439,13 @@ fn renderExpression(
|
|||||||
try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
|
try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
|
||||||
|
|
||||||
const after_op_space = blk: {
|
const after_op_space = blk: {
|
||||||
const loc = tree.tokenLocation(tree.tokens[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token));
|
const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token));
|
||||||
break :blk if (loc.line == 0) op_space else Space.Newline;
|
break :blk if (loc.line == 0) op_space else Space.Newline;
|
||||||
};
|
};
|
||||||
|
|
||||||
try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
|
try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
|
||||||
if (after_op_space == Space.Newline and
|
if (after_op_space == Space.Newline and
|
||||||
tree.tokens[tree.nextToken(infix_op_node.op_token)].id != .MultilineStringLiteralLine)
|
tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine)
|
||||||
{
|
{
|
||||||
try stream.writeByteNTimes(' ', indent + indent_delta);
|
try stream.writeByteNTimes(' ', indent + indent_delta);
|
||||||
start_col.* = indent + indent_delta;
|
start_col.* = indent + indent_delta;
|
||||||
@ -463,10 +466,10 @@ fn renderExpression(
|
|||||||
|
|
||||||
switch (prefix_op_node.op) {
|
switch (prefix_op_node.op) {
|
||||||
.PtrType => |ptr_info| {
|
.PtrType => |ptr_info| {
|
||||||
const op_tok_id = tree.tokens[prefix_op_node.op_token].id;
|
const op_tok_id = tree.token_ids[prefix_op_node.op_token];
|
||||||
switch (op_tok_id) {
|
switch (op_tok_id) {
|
||||||
.Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
|
.Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
|
||||||
.LBracket => if (tree.tokens[prefix_op_node.op_token + 2].id == .Identifier)
|
.LBracket => if (tree.token_ids[prefix_op_node.op_token + 2] == .Identifier)
|
||||||
try stream.writeAll("[*c")
|
try stream.writeAll("[*c")
|
||||||
else
|
else
|
||||||
try stream.writeAll("[*"),
|
try stream.writeAll("[*"),
|
||||||
@ -578,8 +581,8 @@ fn renderExpression(
|
|||||||
|
|
||||||
try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
|
try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
|
||||||
|
|
||||||
const starts_with_comment = tree.tokens[lbracket + 1].id == .LineComment;
|
const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
|
||||||
const ends_with_comment = tree.tokens[rbracket - 1].id == .LineComment;
|
const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
|
||||||
const new_indent = if (ends_with_comment) indent + indent_delta else indent;
|
const new_indent = if (ends_with_comment) indent + indent_delta else indent;
|
||||||
const new_space = if (ends_with_comment) Space.Newline else Space.None;
|
const new_space = if (ends_with_comment) Space.Newline else Space.None;
|
||||||
try renderExpression(allocator, stream, tree, new_indent, start_col, array_info.len_expr, new_space);
|
try renderExpression(allocator, stream, tree, new_indent, start_col, array_info.len_expr, new_space);
|
||||||
@ -653,7 +656,7 @@ fn renderExpression(
|
|||||||
return renderToken(tree, stream, rtoken, indent, start_col, space);
|
return renderToken(tree, stream, rtoken, indent, start_col, space);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (exprs.len == 1 and tree.tokens[exprs[0].lastToken() + 1].id == .RBrace) {
|
if (exprs.len == 1 and tree.token_ids[exprs[0].lastToken() + 1] == .RBrace) {
|
||||||
const expr = exprs[0];
|
const expr = exprs[0];
|
||||||
switch (lhs) {
|
switch (lhs) {
|
||||||
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
|
.dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
|
||||||
@ -675,17 +678,17 @@ fn renderExpression(
|
|||||||
for (exprs) |expr, i| {
|
for (exprs) |expr, i| {
|
||||||
if (i + 1 < exprs.len) {
|
if (i + 1 < exprs.len) {
|
||||||
const expr_last_token = expr.lastToken() + 1;
|
const expr_last_token = expr.lastToken() + 1;
|
||||||
const loc = tree.tokenLocation(tree.tokens[expr_last_token].end, exprs[i+1].firstToken());
|
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].end, exprs[i+1].firstToken());
|
||||||
if (loc.line != 0) break :blk count;
|
if (loc.line != 0) break :blk count;
|
||||||
count += 1;
|
count += 1;
|
||||||
} else {
|
} else {
|
||||||
const expr_last_token = expr.lastToken();
|
const expr_last_token = expr.lastToken();
|
||||||
const loc = tree.tokenLocation(tree.tokens[expr_last_token].end, rtoken);
|
const loc = tree.tokenLocation(tree.token_locs[expr_last_token].end, rtoken);
|
||||||
if (loc.line == 0) {
|
if (loc.line == 0) {
|
||||||
// all on one line
|
// all on one line
|
||||||
const src_has_trailing_comma = trailblk: {
|
const src_has_trailing_comma = trailblk: {
|
||||||
const maybe_comma = tree.prevToken(rtoken);
|
const maybe_comma = tree.prevToken(rtoken);
|
||||||
break :trailblk tree.tokens[maybe_comma].id == .Comma;
|
break :trailblk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
if (src_has_trailing_comma) {
|
if (src_has_trailing_comma) {
|
||||||
break :blk 1; // force row size 1
|
break :blk 1; // force row size 1
|
||||||
@ -723,7 +726,7 @@ fn renderExpression(
|
|||||||
|
|
||||||
var new_indent = indent + indent_delta;
|
var new_indent = indent + indent_delta;
|
||||||
|
|
||||||
if (tree.tokens[tree.nextToken(lbrace)].id != .MultilineStringLiteralLine) {
|
if (tree.token_ids[tree.nextToken(lbrace)] != .MultilineStringLiteralLine) {
|
||||||
try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
|
try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
|
||||||
try stream.writeByteNTimes(' ', new_indent);
|
try stream.writeByteNTimes(' ', new_indent);
|
||||||
} else {
|
} else {
|
||||||
@ -750,7 +753,7 @@ fn renderExpression(
|
|||||||
}
|
}
|
||||||
col = 1;
|
col = 1;
|
||||||
|
|
||||||
if (tree.tokens[tree.nextToken(comma)].id != .MultilineStringLiteralLine) {
|
if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) {
|
||||||
try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
|
try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
|
||||||
} else {
|
} else {
|
||||||
try renderToken(tree, stream, comma, new_indent, start_col, Space.None); // ,
|
try renderToken(tree, stream, comma, new_indent, start_col, Space.None); // ,
|
||||||
@ -819,11 +822,11 @@ fn renderExpression(
|
|||||||
|
|
||||||
const src_has_trailing_comma = blk: {
|
const src_has_trailing_comma = blk: {
|
||||||
const maybe_comma = tree.prevToken(rtoken);
|
const maybe_comma = tree.prevToken(rtoken);
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
const src_same_line = blk: {
|
const src_same_line = blk: {
|
||||||
const loc = tree.tokenLocation(tree.tokens[lbrace].end, rtoken);
|
const loc = tree.tokenLocation(tree.token_locs[lbrace].end, rtoken);
|
||||||
break :blk loc.line == 0;
|
break :blk loc.line == 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -929,7 +932,7 @@ fn renderExpression(
|
|||||||
|
|
||||||
const src_has_trailing_comma = blk: {
|
const src_has_trailing_comma = blk: {
|
||||||
const maybe_comma = tree.prevToken(call.rtoken);
|
const maybe_comma = tree.prevToken(call.rtoken);
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (src_has_trailing_comma) {
|
if (src_has_trailing_comma) {
|
||||||
@ -983,8 +986,8 @@ fn renderExpression(
|
|||||||
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
|
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
|
||||||
try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
|
try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
|
||||||
|
|
||||||
const starts_with_comment = tree.tokens[lbracket + 1].id == .LineComment;
|
const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
|
||||||
const ends_with_comment = tree.tokens[rbracket - 1].id == .LineComment;
|
const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
|
||||||
const new_indent = if (ends_with_comment) indent + indent_delta else indent;
|
const new_indent = if (ends_with_comment) indent + indent_delta else indent;
|
||||||
const new_space = if (ends_with_comment) Space.Newline else Space.None;
|
const new_space = if (ends_with_comment) Space.Newline else Space.None;
|
||||||
try renderExpression(allocator, stream, tree, new_indent, start_col, index_expr, new_space);
|
try renderExpression(allocator, stream, tree, new_indent, start_col, index_expr, new_space);
|
||||||
@ -1226,9 +1229,9 @@ fn renderExpression(
|
|||||||
var maybe_comma = tree.prevToken(container_decl.lastToken());
|
var maybe_comma = tree.prevToken(container_decl.lastToken());
|
||||||
// Doc comments for a field may also appear after the comma, eg.
|
// Doc comments for a field may also appear after the comma, eg.
|
||||||
// field_name: T, // comment attached to field_name
|
// field_name: T, // comment attached to field_name
|
||||||
if (tree.tokens[maybe_comma].id == .DocComment)
|
if (tree.token_ids[maybe_comma] == .DocComment)
|
||||||
maybe_comma = tree.prevToken(maybe_comma);
|
maybe_comma = tree.prevToken(maybe_comma);
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
const fields_and_decls = container_decl.fieldsAndDecls();
|
const fields_and_decls = container_decl.fieldsAndDecls();
|
||||||
@ -1321,7 +1324,7 @@ fn renderExpression(
|
|||||||
|
|
||||||
const src_has_trailing_comma = blk: {
|
const src_has_trailing_comma = blk: {
|
||||||
const maybe_comma = tree.prevToken(err_set_decl.rbrace_token);
|
const maybe_comma = tree.prevToken(err_set_decl.rbrace_token);
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (src_has_trailing_comma) {
|
if (src_has_trailing_comma) {
|
||||||
@ -1353,7 +1356,7 @@ fn renderExpression(
|
|||||||
try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
|
try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
|
||||||
|
|
||||||
const comma_token = tree.nextToken(node.lastToken());
|
const comma_token = tree.nextToken(node.lastToken());
|
||||||
assert(tree.tokens[comma_token].id == .Comma);
|
assert(tree.token_ids[comma_token] == .Comma);
|
||||||
try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
|
try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
|
||||||
try renderExtraNewline(tree, stream, start_col, decls[i + 1]);
|
try renderExtraNewline(tree, stream, start_col, decls[i + 1]);
|
||||||
} else {
|
} else {
|
||||||
@ -1378,7 +1381,7 @@ fn renderExpression(
|
|||||||
const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base);
|
const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base);
|
||||||
|
|
||||||
var skip_first_indent = true;
|
var skip_first_indent = true;
|
||||||
if (tree.tokens[multiline_str_literal.firstToken() - 1].id != .LineComment) {
|
if (tree.token_ids[multiline_str_literal.firstToken() - 1] != .LineComment) {
|
||||||
try stream.print("\n", .{});
|
try stream.print("\n", .{});
|
||||||
skip_first_indent = false;
|
skip_first_indent = false;
|
||||||
}
|
}
|
||||||
@ -1406,7 +1409,7 @@ fn renderExpression(
|
|||||||
if (builtin_call.params_len < 2) break :blk false;
|
if (builtin_call.params_len < 2) break :blk false;
|
||||||
const last_node = builtin_call.params()[builtin_call.params_len - 1];
|
const last_node = builtin_call.params()[builtin_call.params_len - 1];
|
||||||
const maybe_comma = tree.nextToken(last_node.lastToken());
|
const maybe_comma = tree.nextToken(last_node.lastToken());
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
const lparen = tree.nextToken(builtin_call.builtin_token);
|
const lparen = tree.nextToken(builtin_call.builtin_token);
|
||||||
@ -1443,8 +1446,8 @@ fn renderExpression(
|
|||||||
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base);
|
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base);
|
||||||
|
|
||||||
if (fn_proto.visib_token) |visib_token_index| {
|
if (fn_proto.visib_token) |visib_token_index| {
|
||||||
const visib_token = tree.tokens[visib_token_index];
|
const visib_token = tree.token_ids[visib_token_index];
|
||||||
assert(visib_token.id == .Keyword_pub or visib_token.id == .Keyword_export);
|
assert(visib_token == .Keyword_pub or visib_token == .Keyword_export);
|
||||||
|
|
||||||
try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub
|
try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub
|
||||||
}
|
}
|
||||||
@ -1466,7 +1469,7 @@ fn renderExpression(
|
|||||||
try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
|
try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
|
||||||
break :blk tree.nextToken(fn_proto.fn_token);
|
break :blk tree.nextToken(fn_proto.fn_token);
|
||||||
};
|
};
|
||||||
assert(tree.tokens[lparen].id == .LParen);
|
assert(tree.token_ids[lparen] == .LParen);
|
||||||
|
|
||||||
const rparen = tree.prevToken(
|
const rparen = tree.prevToken(
|
||||||
// the first token for the annotation expressions is the left
|
// the first token for the annotation expressions is the left
|
||||||
@ -1482,10 +1485,10 @@ fn renderExpression(
|
|||||||
.InferErrorSet => |node| tree.prevToken(node.firstToken()),
|
.InferErrorSet => |node| tree.prevToken(node.firstToken()),
|
||||||
.Invalid => unreachable,
|
.Invalid => unreachable,
|
||||||
});
|
});
|
||||||
assert(tree.tokens[rparen].id == .RParen);
|
assert(tree.token_ids[rparen] == .RParen);
|
||||||
|
|
||||||
const src_params_trailing_comma = blk: {
|
const src_params_trailing_comma = blk: {
|
||||||
const maybe_comma = tree.tokens[rparen - 1].id;
|
const maybe_comma = tree.token_ids[rparen - 1];
|
||||||
break :blk maybe_comma == .Comma or maybe_comma == .LineComment;
|
break :blk maybe_comma == .Comma or maybe_comma == .LineComment;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1622,7 +1625,7 @@ fn renderExpression(
|
|||||||
const src_has_trailing_comma = blk: {
|
const src_has_trailing_comma = blk: {
|
||||||
const last_node = switch_case.items()[switch_case.items_len - 1];
|
const last_node = switch_case.items()[switch_case.items_len - 1];
|
||||||
const maybe_comma = tree.nextToken(last_node.lastToken());
|
const maybe_comma = tree.nextToken(last_node.lastToken());
|
||||||
break :blk tree.tokens[maybe_comma].id == .Comma;
|
break :blk tree.token_ids[maybe_comma] == .Comma;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (switch_case.items_len == 1 or !src_has_trailing_comma) {
|
if (switch_case.items_len == 1 or !src_has_trailing_comma) {
|
||||||
@ -1967,7 +1970,7 @@ fn renderExpression(
|
|||||||
try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline);
|
try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline);
|
||||||
try stream.writeByteNTimes(' ', indent_once);
|
try stream.writeByteNTimes(' ', indent_once);
|
||||||
const comma_or_colon = tree.nextToken(asm_output.lastToken());
|
const comma_or_colon = tree.nextToken(asm_output.lastToken());
|
||||||
break :blk switch (tree.tokens[comma_or_colon].id) {
|
break :blk switch (tree.token_ids[comma_or_colon]) {
|
||||||
.Comma => tree.nextToken(comma_or_colon),
|
.Comma => tree.nextToken(comma_or_colon),
|
||||||
else => comma_or_colon,
|
else => comma_or_colon,
|
||||||
};
|
};
|
||||||
@ -2002,7 +2005,7 @@ fn renderExpression(
|
|||||||
try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline);
|
try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline);
|
||||||
try stream.writeByteNTimes(' ', indent_once);
|
try stream.writeByteNTimes(' ', indent_once);
|
||||||
const comma_or_colon = tree.nextToken(asm_input.lastToken());
|
const comma_or_colon = tree.nextToken(asm_input.lastToken());
|
||||||
break :blk switch (tree.tokens[comma_or_colon].id) {
|
break :blk switch (tree.token_ids[comma_or_colon]) {
|
||||||
.Comma => tree.nextToken(comma_or_colon),
|
.Comma => tree.nextToken(comma_or_colon),
|
||||||
else => comma_or_colon,
|
else => comma_or_colon,
|
||||||
};
|
};
|
||||||
@ -2205,7 +2208,7 @@ fn renderStatement(
|
|||||||
try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None);
|
try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None);
|
||||||
|
|
||||||
const semicolon_index = tree.nextToken(base.lastToken());
|
const semicolon_index = tree.nextToken(base.lastToken());
|
||||||
assert(tree.tokens[semicolon_index].id == .Semicolon);
|
assert(tree.token_ids[semicolon_index] == .Semicolon);
|
||||||
try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline);
|
try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline);
|
||||||
} else {
|
} else {
|
||||||
try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline);
|
try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline);
|
||||||
@ -2243,22 +2246,25 @@ fn renderTokenOffset(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var token = tree.tokens[token_index];
|
var token_loc = tree.token_locs[token_index];
|
||||||
try stream.writeAll(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " "));
|
try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " "));
|
||||||
|
|
||||||
if (space == Space.NoComment)
|
if (space == Space.NoComment)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
var next_token = tree.tokens[token_index + 1];
|
var next_token_id = tree.token_ids[token_index + 1];
|
||||||
|
var next_token_loc = tree.token_locs[token_index + 1];
|
||||||
|
|
||||||
if (space == Space.Comma) switch (next_token.id) {
|
if (space == Space.Comma) switch (next_token_id) {
|
||||||
.Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline),
|
.Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline),
|
||||||
.LineComment => {
|
.LineComment => {
|
||||||
try stream.writeAll(", ");
|
try stream.writeAll(", ");
|
||||||
return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline);
|
return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline);
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
if (token_index + 2 < tree.tokens.len and tree.tokens[token_index + 2].id == .MultilineStringLiteralLine) {
|
if (token_index + 2 < tree.token_ids.len and
|
||||||
|
tree.token_ids[token_index + 2] == .MultilineStringLiteralLine)
|
||||||
|
{
|
||||||
try stream.writeAll(",");
|
try stream.writeAll(",");
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
@ -2271,19 +2277,20 @@ fn renderTokenOffset(
|
|||||||
|
|
||||||
// Skip over same line doc comments
|
// Skip over same line doc comments
|
||||||
var offset: usize = 1;
|
var offset: usize = 1;
|
||||||
if (next_token.id == .DocComment) {
|
if (next_token_id == .DocComment) {
|
||||||
const loc = tree.tokenLocationPtr(token.end, next_token);
|
const loc = tree.tokenLocationLoc(token_loc.end, next_token_loc);
|
||||||
if (loc.line == 0) {
|
if (loc.line == 0) {
|
||||||
offset += 1;
|
offset += 1;
|
||||||
next_token = tree.tokens[token_index + offset];
|
next_token_id = tree.token_ids[token_index + offset];
|
||||||
|
next_token_loc = tree.token_locs[token_index + offset];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next_token.id != .LineComment) blk: {
|
if (next_token_id != .LineComment) blk: {
|
||||||
switch (space) {
|
switch (space) {
|
||||||
Space.None, Space.NoNewline => return,
|
Space.None, Space.NoNewline => return,
|
||||||
Space.Newline => {
|
Space.Newline => {
|
||||||
if (next_token.id == .MultilineStringLiteralLine) {
|
if (next_token_id == .MultilineStringLiteralLine) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
try stream.writeAll("\n");
|
try stream.writeAll("\n");
|
||||||
@ -2292,7 +2299,7 @@ fn renderTokenOffset(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
Space.Space, Space.SpaceOrOutdent => {
|
Space.Space, Space.SpaceOrOutdent => {
|
||||||
if (next_token.id == .MultilineStringLiteralLine)
|
if (next_token_id == .MultilineStringLiteralLine)
|
||||||
return;
|
return;
|
||||||
try stream.writeByte(' ');
|
try stream.writeByte(' ');
|
||||||
return;
|
return;
|
||||||
@ -2302,14 +2309,15 @@ fn renderTokenOffset(
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2;
|
const comment_is_empty = mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ").len == 2;
|
||||||
if (comment_is_empty) {
|
if (comment_is_empty) {
|
||||||
switch (space) {
|
switch (space) {
|
||||||
Space.Newline => {
|
Space.Newline => {
|
||||||
offset += 1;
|
offset += 1;
|
||||||
token = next_token;
|
token_loc = next_token_loc;
|
||||||
next_token = tree.tokens[token_index + offset];
|
next_token_id = tree.token_ids[token_index + offset];
|
||||||
if (next_token.id != .LineComment) {
|
next_token_loc = tree.token_locs[token_index + offset];
|
||||||
|
if (next_token_id != .LineComment) {
|
||||||
try stream.writeByte('\n');
|
try stream.writeByte('\n');
|
||||||
start_col.* = 0;
|
start_col.* = 0;
|
||||||
return;
|
return;
|
||||||
@ -2322,18 +2330,19 @@ fn renderTokenOffset(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var loc = tree.tokenLocationPtr(token.end, next_token);
|
var loc = tree.tokenLocationLoc(token_loc.end, next_token_loc);
|
||||||
if (loc.line == 0) {
|
if (loc.line == 0) {
|
||||||
try stream.print(" {}", .{mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ")});
|
try stream.print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")});
|
||||||
offset = 2;
|
offset = 2;
|
||||||
token = next_token;
|
token_loc = next_token_loc;
|
||||||
next_token = tree.tokens[token_index + offset];
|
next_token_loc = tree.token_locs[token_index + offset];
|
||||||
if (next_token.id != .LineComment) {
|
next_token_id = tree.token_ids[token_index + offset];
|
||||||
|
if (next_token_id != .LineComment) {
|
||||||
switch (space) {
|
switch (space) {
|
||||||
Space.None, Space.Space => {
|
Space.None, Space.Space => {
|
||||||
try stream.writeByte('\n');
|
try stream.writeByte('\n');
|
||||||
const after_comment_token = tree.tokens[token_index + offset];
|
const after_comment_token = tree.token_ids[token_index + offset];
|
||||||
const next_line_indent = switch (after_comment_token.id) {
|
const next_line_indent = switch (after_comment_token) {
|
||||||
.RParen, .RBrace, .RBracket => indent,
|
.RParen, .RBrace, .RBracket => indent,
|
||||||
else => indent + indent_delta,
|
else => indent + indent_delta,
|
||||||
};
|
};
|
||||||
@ -2346,7 +2355,7 @@ fn renderTokenOffset(
|
|||||||
start_col.* = indent;
|
start_col.* = indent;
|
||||||
},
|
},
|
||||||
Space.Newline => {
|
Space.Newline => {
|
||||||
if (next_token.id == .MultilineStringLiteralLine) {
|
if (next_token_id == .MultilineStringLiteralLine) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
try stream.writeAll("\n");
|
try stream.writeAll("\n");
|
||||||
@ -2359,7 +2368,7 @@ fn renderTokenOffset(
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
loc = tree.tokenLocationPtr(token.end, next_token);
|
loc = tree.tokenLocationLoc(token_loc.end, next_token_loc);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -2369,15 +2378,16 @@ fn renderTokenOffset(
|
|||||||
const newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2);
|
const newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2);
|
||||||
try stream.writeByteNTimes('\n', newline_count);
|
try stream.writeByteNTimes('\n', newline_count);
|
||||||
try stream.writeByteNTimes(' ', indent);
|
try stream.writeByteNTimes(' ', indent);
|
||||||
try stream.writeAll(mem.trimRight(u8, tree.tokenSlicePtr(next_token), " "));
|
try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " "));
|
||||||
|
|
||||||
offset += 1;
|
offset += 1;
|
||||||
token = next_token;
|
token_loc = next_token_loc;
|
||||||
next_token = tree.tokens[token_index + offset];
|
next_token_loc = tree.token_locs[token_index + offset];
|
||||||
if (next_token.id != .LineComment) {
|
next_token_id = tree.token_ids[token_index + offset];
|
||||||
|
if (next_token_id != .LineComment) {
|
||||||
switch (space) {
|
switch (space) {
|
||||||
Space.Newline => {
|
Space.Newline => {
|
||||||
if (next_token.id == .MultilineStringLiteralLine) {
|
if (next_token_id == .MultilineStringLiteralLine) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
try stream.writeAll("\n");
|
try stream.writeAll("\n");
|
||||||
@ -2388,8 +2398,8 @@ fn renderTokenOffset(
|
|||||||
Space.None, Space.Space => {
|
Space.None, Space.Space => {
|
||||||
try stream.writeByte('\n');
|
try stream.writeByte('\n');
|
||||||
|
|
||||||
const after_comment_token = tree.tokens[token_index + offset];
|
const after_comment_token = tree.token_ids[token_index + offset];
|
||||||
const next_line_indent = switch (after_comment_token.id) {
|
const next_line_indent = switch (after_comment_token) {
|
||||||
.RParen, .RBrace, .RBracket => blk: {
|
.RParen, .RBrace, .RBracket => blk: {
|
||||||
if (indent > indent_delta) {
|
if (indent > indent_delta) {
|
||||||
break :blk indent - indent_delta;
|
break :blk indent - indent_delta;
|
||||||
@ -2412,7 +2422,7 @@ fn renderTokenOffset(
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
loc = tree.tokenLocationPtr(token.end, next_token);
|
loc = tree.tokenLocationLoc(token_loc.end, next_token_loc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2448,7 +2458,7 @@ fn renderDocCommentsToken(
|
|||||||
) (@TypeOf(stream).Error || Error)!void {
|
) (@TypeOf(stream).Error || Error)!void {
|
||||||
var tok_i = comment.first_line;
|
var tok_i = comment.first_line;
|
||||||
while (true) : (tok_i += 1) {
|
while (true) : (tok_i += 1) {
|
||||||
switch (tree.tokens[tok_i].id) {
|
switch (tree.token_ids[tok_i]) {
|
||||||
.DocComment, .ContainerDocComment => {
|
.DocComment, .ContainerDocComment => {
|
||||||
if (comment.first_line < first_token) {
|
if (comment.first_line < first_token) {
|
||||||
try renderToken(tree, stream, tok_i, indent, start_col, Space.Newline);
|
try renderToken(tree, stream, tok_i, indent, start_col, Space.Newline);
|
||||||
|
@ -3,8 +3,12 @@ const mem = std.mem;
|
|||||||
|
|
||||||
pub const Token = struct {
|
pub const Token = struct {
|
||||||
id: Id,
|
id: Id,
|
||||||
start: usize,
|
loc: Loc,
|
||||||
end: usize,
|
|
||||||
|
pub const Loc = struct {
|
||||||
|
start: usize,
|
||||||
|
end: usize,
|
||||||
|
};
|
||||||
|
|
||||||
pub const Keyword = struct {
|
pub const Keyword = struct {
|
||||||
bytes: []const u8,
|
bytes: []const u8,
|
||||||
@ -426,8 +430,10 @@ pub const Tokenizer = struct {
|
|||||||
var state: State = .start;
|
var state: State = .start;
|
||||||
var result = Token{
|
var result = Token{
|
||||||
.id = .Eof,
|
.id = .Eof,
|
||||||
.start = self.index,
|
.loc = .{
|
||||||
.end = undefined,
|
.start = self.index,
|
||||||
|
.end = undefined,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
var seen_escape_digits: usize = undefined;
|
var seen_escape_digits: usize = undefined;
|
||||||
var remaining_code_units: usize = undefined;
|
var remaining_code_units: usize = undefined;
|
||||||
@ -436,7 +442,7 @@ pub const Tokenizer = struct {
|
|||||||
switch (state) {
|
switch (state) {
|
||||||
.start => switch (c) {
|
.start => switch (c) {
|
||||||
' ', '\n', '\t', '\r' => {
|
' ', '\n', '\t', '\r' => {
|
||||||
result.start = self.index + 1;
|
result.loc.start = self.index + 1;
|
||||||
},
|
},
|
||||||
'"' => {
|
'"' => {
|
||||||
state = .string_literal;
|
state = .string_literal;
|
||||||
@ -686,7 +692,7 @@ pub const Tokenizer = struct {
|
|||||||
.identifier => switch (c) {
|
.identifier => switch (c) {
|
||||||
'a'...'z', 'A'...'Z', '_', '0'...'9' => {},
|
'a'...'z', 'A'...'Z', '_', '0'...'9' => {},
|
||||||
else => {
|
else => {
|
||||||
if (Token.getKeyword(self.buffer[result.start..self.index])) |id| {
|
if (Token.getKeyword(self.buffer[result.loc.start..self.index])) |id| {
|
||||||
result.id = id;
|
result.id = id;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -1313,7 +1319,7 @@ pub const Tokenizer = struct {
|
|||||||
=> {},
|
=> {},
|
||||||
|
|
||||||
.identifier => {
|
.identifier => {
|
||||||
if (Token.getKeyword(self.buffer[result.start..self.index])) |id| {
|
if (Token.getKeyword(self.buffer[result.loc.start..self.index])) |id| {
|
||||||
result.id = id;
|
result.id = id;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1420,7 +1426,7 @@ pub const Tokenizer = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result.end = self.index;
|
result.loc.end = self.index;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1430,8 +1436,10 @@ pub const Tokenizer = struct {
|
|||||||
if (invalid_length == 0) return;
|
if (invalid_length == 0) return;
|
||||||
self.pending_invalid_token = .{
|
self.pending_invalid_token = .{
|
||||||
.id = .Invalid,
|
.id = .Invalid,
|
||||||
.start = self.index,
|
.loc = .{
|
||||||
.end = self.index + invalid_length,
|
.start = self.index,
|
||||||
|
.end = self.index + invalid_length,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,27 +247,6 @@ pub const Context = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Helper function to append items to a singly linked list.
|
|
||||||
fn llpusher(c: *Context, list: *std.SinglyLinkedList(*ast.Node)) LinkedListPusher {
|
|
||||||
assert(list.first == null);
|
|
||||||
return .{
|
|
||||||
.c = c,
|
|
||||||
.it = &list.first,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn llpush(
|
|
||||||
c: *Context,
|
|
||||||
comptime T: type,
|
|
||||||
it: *?*std.SinglyLinkedList(T).Node,
|
|
||||||
data: T,
|
|
||||||
) !*?*std.SinglyLinkedList(T).Node {
|
|
||||||
const llnode = try c.arena.create(std.SinglyLinkedList(T).Node);
|
|
||||||
llnode.* = .{ .data = data };
|
|
||||||
it.* = llnode;
|
|
||||||
return &llnode.next;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn getMangle(c: *Context) u32 {
|
fn getMangle(c: *Context) u32 {
|
||||||
c.mangle_count += 1;
|
c.mangle_count += 1;
|
||||||
return c.mangle_count;
|
return c.mangle_count;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user