run zig fmt on the codebase

This commit is contained in:
Andrew Kelley 2018-05-30 16:09:11 -04:00
parent b082cd4580
commit ea58f4a5a9
48 changed files with 140 additions and 140 deletions

View File

@ -41,7 +41,7 @@ const usage =
const Command = struct {
name: []const u8,
exec: fn(&Allocator, []const []const u8) error!void,
exec: fn (&Allocator, []const []const u8) error!void,
};
pub fn main() !void {
@ -862,7 +862,7 @@ fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
for (args) |argv, i| {
if (mem.eql(u8, argv, "--")) {
compile_args = args[0..i];
runtime_args = args[i + 1..];
runtime_args = args[i + 1 ..];
break;
}
}

View File

@ -71,7 +71,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
try l.ensureCapacity(l.len + 1);
l.len += 1;
mem.copy(T, l.items[n + 1..l.len], l.items[n..l.len - 1]);
mem.copy(T, l.items[n + 1 .. l.len], l.items[n .. l.len - 1]);
l.items[n] = item.*;
}
@ -79,8 +79,8 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
try l.ensureCapacity(l.len + items.len);
l.len += items.len;
mem.copy(T, l.items[n + items.len..l.len], l.items[n..l.len - items.len]);
mem.copy(T, l.items[n..n + items.len], items);
mem.copy(T, l.items[n + items.len .. l.len], l.items[n .. l.len - items.len]);
mem.copy(T, l.items[n .. n + items.len], items);
}
pub fn append(l: &Self, item: &const T) !void {

View File

@ -450,7 +450,7 @@ fn testError(encoded: []const u8, expected_err: error) !void {
fn testOutputTooSmallError(encoded: []const u8) !void {
const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..calcDecodedSizeExactUnsafe(encoded, standard_pad_char) - 1];
var decoded = buffer[0 .. calcDecodedSizeExactUnsafe(encoded, standard_pad_char) - 1];
if (standard_decoder_ignore_space.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != error.OutputTooSmall) return err;

View File

@ -1912,12 +1912,12 @@ pub const RemoveDirStep = struct {
pub const Step = struct {
name: []const u8,
makeFn: fn(self: &Step) error!void,
makeFn: fn (self: &Step) error!void,
dependencies: ArrayList(&Step),
loop_flag: bool,
done_flag: bool,
pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn(&Step) error!void) Step {
pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step) error!void) Step {
return Step{
.name = name,
.makeFn = makeFn,

View File

@ -60,7 +60,7 @@ pub const sigset_t = u32;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with function name.
pub const Sigaction = extern struct {
handler: extern fn(c_int) void,
handler: extern fn (c_int) void,
sa_mask: sigset_t,
sa_flags: c_int,
};

View File

@ -52,7 +52,7 @@ pub extern "c" fn realloc(&c_void, usize) ?&c_void;
pub extern "c" fn free(&c_void) void;
pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn(?&c_void) ?&c_void, noalias arg: ?&c_void) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn (?&c_void) ?&c_void, noalias arg: ?&c_void) c_int;
pub extern "pthread" fn pthread_attr_init(attr: &pthread_attr_t) c_int;
pub extern "pthread" fn pthread_attr_setstack(attr: &pthread_attr_t, stackaddr: &c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: &pthread_attr_t) c_int;

View File

@ -105,7 +105,7 @@ fn Blake2s(comptime out_len: usize) type {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
d.t += 64;
d.round(b[off..off + 64], false);
d.round(b[off .. off + 64], false);
}
// Copy any remainder for next pass.
@ -120,10 +120,10 @@ fn Blake2s(comptime out_len: usize) type {
d.t += d.buf_len;
d.round(d.buf[0..], true);
const rr = d.h[0..out_len / 32];
const rr = d.h[0 .. out_len / 32];
for (rr) |s, j| {
mem.writeInt(out[4 * j..4 * j + 4], s, builtin.Endian.Little);
mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Little);
}
}
@ -134,7 +134,7 @@ fn Blake2s(comptime out_len: usize) type {
var v: [16]u32 = undefined;
for (m) |*r, i| {
r.* = mem.readIntLE(u32, b[4 * i..4 * i + 4]);
r.* = mem.readIntLE(u32, b[4 * i .. 4 * i + 4]);
}
var k: usize = 0;
@ -340,7 +340,7 @@ fn Blake2b(comptime out_len: usize) type {
// Full middle blocks.
while (off + 128 <= b.len) : (off += 128) {
d.t += 128;
d.round(b[off..off + 128], false);
d.round(b[off .. off + 128], false);
}
// Copy any remainder for next pass.
@ -353,10 +353,10 @@ fn Blake2b(comptime out_len: usize) type {
d.t += d.buf_len;
d.round(d.buf[0..], true);
const rr = d.h[0..out_len / 64];
const rr = d.h[0 .. out_len / 64];
for (rr) |s, j| {
mem.writeInt(out[8 * j..8 * j + 8], s, builtin.Endian.Little);
mem.writeInt(out[8 * j .. 8 * j + 8], s, builtin.Endian.Little);
}
}
@ -367,7 +367,7 @@ fn Blake2b(comptime out_len: usize) type {
var v: [16]u64 = undefined;
for (m) |*r, i| {
r.* = mem.readIntLE(u64, b[8 * i..8 * i + 8]);
r.* = mem.readIntLE(u64, b[8 * i .. 8 * i + 8]);
}
var k: usize = 0;

View File

@ -73,7 +73,7 @@ pub const Md5 = struct {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
d.round(b[off..off + 64]);
d.round(b[off .. off + 64]);
}
// Copy any remainder for next pass.
@ -112,7 +112,7 @@ pub const Md5 = struct {
d.round(d.buf[0..]);
for (d.s) |s, j| {
mem.writeInt(out[4 * j..4 * j + 4], s, builtin.Endian.Little);
mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Little);
}
}

View File

@ -73,7 +73,7 @@ pub const Sha1 = struct {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
d.round(b[off..off + 64]);
d.round(b[off .. off + 64]);
}
// Copy any remainder for next pass.
@ -111,7 +111,7 @@ pub const Sha1 = struct {
d.round(d.buf[0..]);
for (d.s) |s, j| {
mem.writeInt(out[4 * j..4 * j + 4], s, builtin.Endian.Big);
mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Big);
}
}

View File

@ -126,7 +126,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
d.round(b[off..off + 64]);
d.round(b[off .. off + 64]);
}
// Copy any remainder for next pass.
@ -164,10 +164,10 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.round(d.buf[0..]);
// May truncate for possible 224 output
const rr = d.s[0..params.out_len / 32];
const rr = d.s[0 .. params.out_len / 32];
for (rr) |s, j| {
mem.writeInt(out[4 * j..4 * j + 4], s, builtin.Endian.Big);
mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Big);
}
}
@ -467,7 +467,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
// Full middle blocks.
while (off + 128 <= b.len) : (off += 128) {
d.round(b[off..off + 128]);
d.round(b[off .. off + 128]);
}
// Copy any remainder for next pass.
@ -505,10 +505,10 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.round(d.buf[0..]);
// May truncate for possible 384 output
const rr = d.s[0..params.out_len / 64];
const rr = d.s[0 .. params.out_len / 64];
for (rr) |s, j| {
mem.writeInt(out[8 * j..8 * j + 8], s, builtin.Endian.Big);
mem.writeInt(out[8 * j .. 8 * j + 8], s, builtin.Endian.Big);
}
}

View File

@ -46,7 +46,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
// absorb
while (len >= rate) {
for (d.s[offset..offset + rate]) |*r, i|
for (d.s[offset .. offset + rate]) |*r, i|
r.* ^= b[ip..][i];
keccak_f(1600, d.s[0..]);
@ -57,7 +57,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
offset = 0;
}
for (d.s[offset..offset + len]) |*r, i|
for (d.s[offset .. offset + len]) |*r, i|
r.* ^= b[ip..][i];
d.offset = offset + len;
@ -193,7 +193,7 @@ fn keccak_f(comptime F: usize, d: []u8) void {
var c = []const u64{0} ** 5;
for (s) |*r, i| {
r.* = mem.readIntLE(u64, d[8 * i..8 * i + 8]);
r.* = mem.readIntLE(u64, d[8 * i .. 8 * i + 8]);
}
comptime var x: usize = 0;
@ -240,7 +240,7 @@ fn keccak_f(comptime F: usize, d: []u8) void {
}
for (s) |r, i| {
mem.writeInt(d[8 * i..8 * i + 8], r, builtin.Endian.Little);
mem.writeInt(d[8 * i .. 8 * i + 8], r, builtin.Endian.Little);
}
}

View File

@ -14,7 +14,7 @@ pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, inpu
pub fn assertEqual(comptime expected: []const u8, input: []const u8) void {
var expected_bytes: [expected.len / 2]u8 = undefined;
for (expected_bytes) |*r, i| {
r.* = fmt.parseInt(u8, expected[2 * i..2 * i + 2], 16) catch unreachable;
r.* = fmt.parseInt(u8, expected[2 * i .. 2 * i + 2], 16) catch unreachable;
}
debug.assert(mem.eql(u8, expected_bytes, input));

View File

@ -6,7 +6,7 @@ const mem = std.mem;
const posix = std.os.posix;
pub const TcpServer = struct {
handleRequestFn: async<&mem.Allocator> fn(&TcpServer, &const std.net.Address, &const std.os.File) void,
handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void,
loop: &Loop,
sockfd: i32,
@ -32,7 +32,7 @@ pub const TcpServer = struct {
};
}
pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn(&TcpServer, &const std.net.Address, &const std.os.File) void) !void {
pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void) !void {
self.handleRequestFn = handleRequestFn;
try std.os.posixBind(self.sockfd, &address.os_addr);

View File

@ -60,7 +60,7 @@ pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: Ro
// Re-size the buffer to use the reserved leading byte.
const one_before = @intToPtr(&u8, @ptrToInt(&float_decimal.digits[0]) - 1);
float_decimal.digits = one_before[0..float_decimal.digits.len + 1];
float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
float_decimal.digits[0] = '1';
return;
}
@ -84,7 +84,7 @@ pub fn errol3(value: f64, buffer: []u8) FloatDecimal {
const i = tableLowerBound(bits);
if (i < enum3.len and enum3[i] == bits) {
const data = enum3_data[i];
const digits = buffer[1..data.str.len + 1];
const digits = buffer[1 .. data.str.len + 1];
mem.copy(u8, digits, data.str);
return FloatDecimal{
.digits = digits,

View File

@ -11,7 +11,7 @@ const max_int_digits = 65;
/// Renders fmt string with args, calling output with slices of bytes.
/// If `output` returns an error, the error is returned from `format` and
/// `output` is not called again.
pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void, comptime fmt: []const u8, args: ...) Errors!void {
pub fn format(context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, comptime fmt: []const u8, args: ...) Errors!void {
const State = enum {
Start,
OpenBrace,
@ -268,7 +268,7 @@ pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context),
}
}
pub fn formatValue(value: var, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
pub fn formatValue(value: var, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
const T = @typeOf(value);
switch (@typeId(T)) {
builtin.TypeId.Int => {
@ -317,11 +317,11 @@ pub fn formatValue(value: var, context: var, comptime Errors: type, output: fn(@
}
}
pub fn formatAsciiChar(c: u8, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
pub fn formatAsciiChar(c: u8, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
return output(context, (&c)[0..1]);
}
pub fn formatBuf(buf: []const u8, width: usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
pub fn formatBuf(buf: []const u8, width: usize, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
try output(context, buf);
var leftover_padding = if (width > buf.len) (width - buf.len) else return;
@ -334,7 +334,7 @@ pub fn formatBuf(buf: []const u8, width: usize, context: var, comptime Errors: t
// Print a float in scientific notation to the specified precision. Null uses full precision.
// It should be the case that every full precision, printed value can be re-parsed back to the
// same type unambiguously.
pub fn formatFloatScientific(value: var, maybe_precision: ?usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
pub fn formatFloatScientific(value: var, maybe_precision: ?usize, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
var x = f64(value);
// Errol doesn't handle these special cases.
@ -423,7 +423,7 @@ pub fn formatFloatScientific(value: var, maybe_precision: ?usize, context: var,
// Print a float of the format x.yyyyy where the number of y is specified by the precision argument.
// By default floats are printed at full precision (no rounding).
pub fn formatFloatDecimal(value: var, maybe_precision: ?usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
pub fn formatFloatDecimal(value: var, maybe_precision: ?usize, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
var x = f64(value);
// Errol doesn't handle these special cases.
@ -512,7 +512,7 @@ pub fn formatFloatDecimal(value: var, maybe_precision: ?usize, context: var, com
// Remaining fractional portion, zero-padding if insufficient.
debug.assert(precision >= printed);
if (num_digits_whole_no_pad + precision - printed < float_decimal.digits.len) {
try output(context, float_decimal.digits[num_digits_whole_no_pad..num_digits_whole_no_pad + precision - printed]);
try output(context, float_decimal.digits[num_digits_whole_no_pad .. num_digits_whole_no_pad + precision - printed]);
return;
} else {
try output(context, float_decimal.digits[num_digits_whole_no_pad..]);
@ -568,7 +568,7 @@ pub fn formatBytes(
comptime radix: usize,
context: var,
comptime Errors: type,
output: fn(@typeOf(context), []const u8) Errors!void,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
if (value == 0) {
return output(context, "0B");
@ -604,7 +604,7 @@ pub fn formatInt(
width: usize,
context: var,
comptime Errors: type,
output: fn(@typeOf(context), []const u8) Errors!void,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
if (@typeOf(value).is_signed) {
return formatIntSigned(value, base, uppercase, width, context, Errors, output);
@ -613,7 +613,7 @@ pub fn formatInt(
}
}
fn formatIntSigned(value: var, base: u8, uppercase: bool, width: usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
fn formatIntSigned(value: var, base: u8, uppercase: bool, width: usize, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
const uint = @IntType(false, @typeOf(value).bit_count);
if (value < 0) {
const minus_sign: u8 = '-';
@ -632,7 +632,7 @@ fn formatIntSigned(value: var, base: u8, uppercase: bool, width: usize, context:
}
}
fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void) Errors!void {
fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void) Errors!void {
// max_int_digits accounts for the minus sign. when printing an unsigned
// number we don't need to do that.
var buf: [max_int_digits - 1]u8 = undefined;
@ -661,7 +661,7 @@ fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize, contex
mem.set(u8, buf[0..index], '0');
return output(context, buf);
} else {
const padded_buf = buf[index - padding..];
const padded_buf = buf[index - padding ..];
mem.set(u8, padded_buf[0..padding], '0');
return output(context, padded_buf);
}
@ -760,7 +760,7 @@ fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void {
pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
var context = BufPrintContext{ .remaining = buf };
try format(&context, error{BufferTooSmall}, bufPrintWrite, fmt, args);
return buf[0..buf.len - context.remaining.len];
return buf[0 .. buf.len - context.remaining.len];
}
pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {

View File

@ -61,7 +61,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
pub fn update(self: &Self, input: []const u8) void {
var i: usize = 0;
while (i + 8 <= input.len) : (i += 8) {
const p = input[i..i + 8];
const p = input[i .. i + 8];
// Unrolling this way gives ~50Mb/s increase
self.crc ^= (u32(p[0]) << 0);

View File

@ -76,7 +76,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
// Full middle blocks.
while (off + 8 <= b.len) : (off += 8) {
d.round(b[off..off + 8]);
d.round(b[off .. off + 8]);
}
// Remainder for next pass.

View File

@ -9,7 +9,7 @@ const builtin = @import("builtin");
const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
const debug_u32 = if (want_modification_safety) u32 else void;
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn(key: K) u32, comptime eql: fn(a: K, b: K) bool) type {
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
return struct {
entries: []Entry,
size: usize,

View File

@ -82,7 +82,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
readFn: fn(self: &Self, buffer: []u8) Error!usize,
readFn: fn (self: &Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
@ -208,7 +208,7 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = this;
pub const Error = WriteError;
writeFn: fn(self: &Self, bytes: []const u8) Error!void,
writeFn: fn (self: &Self, bytes: []const u8) Error!void,
pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, Error, self.writeFn, format, args);
@ -369,7 +369,7 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
while (src_index < bytes.len) {
const dest_space_left = self.buffer.len - self.index;
const copy_amt = math.min(dest_space_left, bytes.len - src_index);
mem.copy(u8, self.buffer[self.index..], bytes[src_index..src_index + copy_amt]);
mem.copy(u8, self.buffer[self.index..], bytes[src_index .. src_index + copy_amt]);
self.index += copy_amt;
assert(self.index <= self.buffer.len);
if (self.index == self.buffer.len) {

View File

@ -41,8 +41,8 @@ test "write a file, read it, then delete it" {
defer allocator.free(contents);
assert(mem.eql(u8, contents[0.."begin".len], "begin"));
assert(mem.eql(u8, contents["begin".len..contents.len - "end".len], data));
assert(mem.eql(u8, contents[contents.len - "end".len..], "end"));
assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data));
assert(mem.eql(u8, contents[contents.len - "end".len ..], "end"));
}
try os.deleteFile(allocator, tmp_file_name);
}

View File

@ -77,7 +77,7 @@ pub const Token = struct {
// Slice into the underlying input string.
pub fn slice(self: &const Token, input: []const u8, i: usize) []const u8 {
return input[i + self.offset - self.count..i + self.offset];
return input[i + self.offset - self.count .. i + self.offset];
}
};

View File

@ -13,7 +13,7 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
allocFn: fn(self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
@ -26,10 +26,10 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
reallocFn: fn(self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
freeFn: fn(self: &Allocator, old_mem: []u8) void,
freeFn: fn (self: &Allocator, old_mem: []u8) void,
fn create(self: &Allocator, comptime T: type) !&T {
if (@sizeOf(T) == 0) return &{};
@ -282,7 +282,7 @@ pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?us
var i: usize = haystack.len - needle.len;
while (true) : (i -= 1) {
if (mem.eql(T, haystack[i..i + needle.len], needle)) return i;
if (mem.eql(T, haystack[i .. i + needle.len], needle)) return i;
if (i == 0) return null;
}
}
@ -294,7 +294,7 @@ pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, nee
var i: usize = start_index;
const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
if (eql(T, haystack[i..i + needle.len], needle)) return i;
if (eql(T, haystack[i .. i + needle.len], needle)) return i;
}
return null;
}
@ -444,7 +444,7 @@ test "mem.startsWith" {
}
pub fn endsWith(comptime T: type, haystack: []const T, needle: []const T) bool {
return if (needle.len > haystack.len) false else eql(T, haystack[haystack.len - needle.len..], needle);
return if (needle.len > haystack.len) false else eql(T, haystack[haystack.len - needle.len ..], needle);
}
test "mem.endsWith" {

View File

@ -437,7 +437,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
var cact = c.Sigaction{
.handler = @ptrCast(extern fn(c_int) void, act.handler),
.handler = @ptrCast(extern fn (c_int) void, act.handler),
.sa_flags = @bitCast(c_int, act.flags),
.sa_mask = act.mask,
};
@ -448,7 +448,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti
}
if (oact) |old| {
old.* = Sigaction{
.handler = @ptrCast(extern fn(i32) void, coact.handler),
.handler = @ptrCast(extern fn (i32) void, coact.handler),
.flags = @bitCast(u32, coact.sa_flags),
.mask = coact.sa_mask,
};
@ -468,7 +468,7 @@ pub const sockaddr = c.sockaddr;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = struct {
handler: extern fn(i32) void,
handler: extern fn (i32) void,
mask: sigset_t,
flags: u32,
};

View File

@ -399,7 +399,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap)
pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0..cstr.len(ptr) + 1] else break;
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
}
allocator.free(envp_buf);
@ -449,7 +449,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap, allocator:
while (it.next()) |search_path| {
mem.copy(u8, path_buf, search_path);
path_buf[search_path.len] = '/';
mem.copy(u8, path_buf[search_path.len + 1..], exe_path);
mem.copy(u8, path_buf[search_path.len + 1 ..], exe_path);
path_buf[search_path.len + exe_path.len + 1] = 0;
err = posix.getErrno(posix.execve(path_buf.ptr, argv_buf.ptr, envp_buf.ptr));
assert(err > 0);
@ -532,7 +532,7 @@ pub fn getEnvMap(allocator: &Allocator) !BufMap {
var end_i: usize = line_i;
while (ptr[end_i] != 0) : (end_i += 1) {}
const value = ptr[line_i + 1..end_i];
const value = ptr[line_i + 1 .. end_i];
try result.set(key, value);
}
@ -549,7 +549,7 @@ pub fn getEnvPosix(key: []const u8) ?[]const u8 {
var end_i: usize = line_i;
while (ptr[end_i] != 0) : (end_i += 1) {}
const this_value = ptr[line_i + 1..end_i];
const this_value = ptr[line_i + 1 .. end_i];
return this_value;
}
@ -691,7 +691,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
mem.copy(u8, existing_buf, existing_path);
existing_buf[existing_path.len] = 0;
const new_buf = full_buf[existing_path.len + 1..];
const new_buf = full_buf[existing_path.len + 1 ..];
mem.copy(u8, new_buf, new_path);
new_buf[new_path.len] = 0;
@ -735,7 +735,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
tmp_path[dirname.len] = os.path.sep;
while (true) {
try getRandomBytes(rand_buf[0..]);
b64_fs_encoder.encode(tmp_path[dirname.len + 1..], rand_buf);
b64_fs_encoder.encode(tmp_path[dirname.len + 1 ..], rand_buf);
if (symLink(allocator, existing_path, tmp_path)) {
return rename(allocator, tmp_path, new_path);
@ -914,7 +914,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
mem.copy(u8, old_buf, old_path);
old_buf[old_path.len] = 0;
const new_buf = full_buf[old_path.len + 1..];
const new_buf = full_buf[old_path.len + 1 ..];
mem.copy(u8, new_buf, new_path);
new_buf[new_path.len] = 0;
@ -1141,7 +1141,7 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
const full_entry_path = full_entry_buf.toSlice();
mem.copy(u8, full_entry_path, full_path);
full_entry_path[full_path.len] = '/';
mem.copy(u8, full_entry_path[full_path.len + 1..], entry.name);
mem.copy(u8, full_entry_path[full_path.len + 1 ..], entry.name);
try deleteTree(allocator, full_entry_path);
}

View File

@ -939,7 +939,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.handler = act.handler,
.flags = act.flags | SA_RESTORER,
.mask = undefined,
.restorer = @ptrCast(extern fn() void, restore_rt),
.restorer = @ptrCast(extern fn () void, restore_rt),
};
var ksa_old: k_sigaction = undefined;
@memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8);
@ -962,22 +962,22 @@ const all_mask = []usize{@maxValue(usize)};
const app_mask = []usize{0xfffffffc7fffffff};
const k_sigaction = extern struct {
handler: extern fn(i32) void,
handler: extern fn (i32) void,
flags: usize,
restorer: extern fn() void,
restorer: extern fn () void,
mask: [2]u32,
};
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = struct {
handler: extern fn(i32) void,
handler: extern fn (i32) void,
mask: sigset_t,
flags: u32,
};
pub const SIG_ERR = @intToPtr(extern fn(i32) void, @maxValue(usize));
pub const SIG_DFL = @intToPtr(extern fn(i32) void, 0);
pub const SIG_IGN = @intToPtr(extern fn(i32) void, 1);
pub const SIG_ERR = @intToPtr(extern fn (i32) void, @maxValue(usize));
pub const SIG_DFL = @intToPtr(extern fn (i32) void, 0);
pub const SIG_IGN = @intToPtr(extern fn (i32) void, 1);
pub const empty_sigset = []usize{0} ** sigset_t.len;
pub fn raise(sig: i32) usize {

View File

@ -463,7 +463,7 @@ pub fn syscall6(
}
/// This matches the libc clone function.
pub extern fn clone(func: extern fn(arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize;
pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize;
pub nakedcc fn restore_rt() void {
return asm volatile ("syscall"

View File

@ -793,7 +793,7 @@ pub fn basenamePosix(path: []const u8) []const u8 {
start_index -= 1;
}
return path[start_index + 1..end_index];
return path[start_index + 1 .. end_index];
}
pub fn basenameWindows(path: []const u8) []const u8 {
@ -825,7 +825,7 @@ pub fn basenameWindows(path: []const u8) []const u8 {
start_index -= 1;
}
return path[start_index + 1..end_index];
return path[start_index + 1 .. end_index];
}
test "os.path.basename" {
@ -999,7 +999,7 @@ pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![
}
if (to_rest.len == 0) {
// shave off the trailing slash
return result[0..result_index - 1];
return result[0 .. result_index - 1];
}
mem.copy(u8, result[result_index..], to_rest);

View File

@ -369,7 +369,7 @@ pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000;
pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004;
pub const HEAP_NO_SERIALIZE = 0x00000001;
pub const PTHREAD_START_ROUTINE = extern fn(LPVOID) DWORD;
pub const PTHREAD_START_ROUTINE = extern fn (LPVOID) DWORD;
pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE;
test "import" {

View File

@ -73,7 +73,7 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
}
const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size..size + usize(name_info.FileNameLength)];
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
const name_wide = ([]u16)(name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
mem.indexOf(u16, name_wide, []u16{ '-', 'p', 't', 'y' }) != null;

View File

@ -153,7 +153,7 @@ pub fn map(v_addr: usize, p_addr: usize, size: usize, writable: bool) bool {
return syscall4(Syscall.map, v_addr, p_addr, size, usize(writable)) != 0;
}
pub fn createThread(function: fn() void) u16 {
pub fn createThread(function: fn () void) u16 {
return u16(syscall1(Syscall.createThread, @ptrToInt(function)));
}

View File

@ -28,7 +28,7 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
fillFn: fn(r: &Random, buf: []u8) void,
fillFn: fn (r: &Random, buf: []u8) void,
/// Read random bytes into the specified buffer until fill.
pub fn bytes(r: &Random, buf: []u8) void {

View File

@ -56,11 +56,11 @@ pub const ZigTable = struct {
f: [257]f64,
// probability density function used as a fallback
pdf: fn(f64) f64,
pdf: fn (f64) f64,
// whether the distribution is symmetric
is_symmetric: bool,
// fallback calculation in the case we are in the 0 block
zero_case: fn(&Random, f64) f64,
zero_case: fn (&Random, f64) f64,
};
// zigNorInit
@ -68,9 +68,9 @@ fn ZigTableGen(
comptime is_symmetric: bool,
comptime r: f64,
comptime v: f64,
comptime f: fn(f64) f64,
comptime f_inv: fn(f64) f64,
comptime zero_case: fn(&Random, f64) f64,
comptime f: fn (f64) f64,
comptime f_inv: fn (f64) f64,
comptime zero_case: fn (&Random, f64) f64,
) ZigTable {
var tables: ZigTable = undefined;

View File

@ -5,7 +5,7 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T) bool) void {
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
@ -108,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T) bool) void {
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@ -257,7 +257,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// merge A2 and B2 into the cache
if (lessThan(items[B2.end - 1], items[A2.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[A1.length() + B2.length()..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]);
} else if (lessThan(items[B2.start], items[A2.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
@ -265,7 +265,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else {
// copy A2 and B2 into the cache in the same order
mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length() + A2.length()..], items[B2.start..B2.end]);
mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]);
}
A2 = Range.init(A2.start, B2.end);
@ -275,7 +275,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (lessThan(cache[B3.end - 1], cache[A3.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the items
mem.copy(T, items[A1.start + A2.length()..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start..], cache[B3.start..B3.end]);
} else if (lessThan(cache[B3.start], cache[A3.end - 1])) {
// these two ranges weren't already in order, so merge them back into the items
@ -283,7 +283,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else {
// copy A3 and B3 into the items in the same order
mem.copy(T, items[A1.start..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start + A1.length()..], cache[B3.start..B3.end]);
mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]);
}
}
@ -640,7 +640,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (buffer2.length() > 0 or block_size <= cache.len) {
// copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
if (block_size <= cache.len) {
mem.copy(T, cache[0..], items[blockA.start..blockA.start + block_size]);
mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]);
} else {
blockSwap(T, items, blockA.start, buffer2.start, block_size);
}
@ -651,7 +651,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
} else {
// we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
mem.rotate(T, items[B_split..blockA.start + block_size], blockA.start - B_split);
mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
}
// update the range for the remaining A blocks, and the range remaining from the B block after it was split
@ -741,7 +741,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
}
// merge operation without a buffer
fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn(&const T, &const T) bool) void {
fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn (&const T, &const T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@ -783,7 +783,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
}
// merge operation using an internal buffer
fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn(&const T, &const T) bool, buffer: &const Range) void {
fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, buffer: &const Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@ -819,7 +819,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool, unique: usize) usize {
fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -833,7 +833,7 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool, unique: usize) usize {
fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -847,7 +847,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool, unique: usize) usize {
fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -861,7 +861,7 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool, unique: usize) usize {
fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -875,7 +875,7 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool) usize {
fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@ -893,7 +893,7 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang
return start;
}
fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T, &const T) bool) usize {
fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@ -911,7 +911,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range
return start;
}
fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn(&const T, &const T) bool, into: []T) void {
fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@ -941,7 +941,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less
}
}
fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn(&const T, &const T) bool, cache: []T) void {
fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@ -969,7 +969,7 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void {
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
@ -1345,7 +1345,7 @@ fn fuzzTest(rng: &std.rand.Random) void {
}
}
pub fn min(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T) bool) T {
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@ -1356,7 +1356,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const
return smallest;
}
pub fn max(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T) bool) T {
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {

View File

@ -71,7 +71,7 @@ pub fn main() !void {
}
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1..];
const option_value = option_contents[name_end + 1 ..];
if (builder.addUserInputOption(option_name, option_value))
return usageAndErr(&builder, false, try stderr_stream);
} else {

View File

@ -151,7 +151,7 @@ pub fn utf8ValidateSlice(s: []const u8) bool {
return false;
}
if (utf8Decode(s[i..i + cp_len])) |_| {} else |_| {
if (utf8Decode(s[i .. i + cp_len])) |_| {} else |_| {
return false;
}
i += cp_len;
@ -216,7 +216,7 @@ const Utf8Iterator = struct {
const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable;
it.i += cp_len;
return it.bytes[it.i - cp_len..it.i];
return it.bytes[it.i - cp_len .. it.i];
}
pub fn nextCodepoint(it: &Utf8Iterator) ?u32 {

View File

@ -1116,7 +1116,7 @@ pub const Tokenizer = struct {
if (self.index + length > self.buffer.len) {
return u3(self.buffer.len - self.index);
}
const bytes = self.buffer[self.index..self.index + length];
const bytes = self.buffer[self.index .. self.index + length];
switch (length) {
2 => {
const value = std.unicode.utf8Decode2(bytes) catch return length;

View File

@ -18,8 +18,8 @@ fn noop4() align(4) void {}
test "function alignment" {
assert(derp() == 1234);
assert(@typeOf(noop1) == fn() align(1) void);
assert(@typeOf(noop4) == fn() align(4) void);
assert(@typeOf(noop1) == fn () align(1) void);
assert(@typeOf(noop4) == fn () align(4) void);
noop1();
noop4();
}
@ -127,7 +127,7 @@ test "implicitly decreasing fn alignment" {
testImplicitlyDecreaseFnAlign(alignedBig, 5678);
}
fn testImplicitlyDecreaseFnAlign(ptr: fn() align(1) i32, answer: i32) void {
fn testImplicitlyDecreaseFnAlign(ptr: fn () align(1) i32, answer: i32) void {
assert(ptr() == answer);
}
@ -141,10 +141,10 @@ fn alignedBig() align(16) i32 {
test "@alignCast functions" {
assert(fnExpectsOnly1(simple4) == 0x19);
}
fn fnExpectsOnly1(ptr: fn() align(1) i32) i32 {
fn fnExpectsOnly1(ptr: fn () align(1) i32) i32 {
return fnExpects4(@alignCast(4, ptr));
}
fn fnExpects4(ptr: fn() align(4) i32) i32 {
fn fnExpects4(ptr: fn () align(4) i32) i32 {
return ptr();
}
fn simple4() align(4) i32 {

View File

@ -7,12 +7,12 @@ const ZigTable = struct {
x: [257]f64,
f: [257]f64,
pdf: fn(f64) f64,
pdf: fn (f64) f64,
is_symmetric: bool,
zero_case: fn(&Random, f64) f64,
zero_case: fn (&Random, f64) f64,
};
fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn(f64) f64, comptime f_inv: fn(f64) f64, comptime zero_case: fn(&Random, f64) f64) ZigTable {
fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (&Random, f64) f64) ZigTable {
var tables: ZigTable = undefined;
tables.is_symmetric = is_symmetric;

View File

@ -154,7 +154,7 @@ test "async function with dot syntax" {
test "async fn pointer in a struct field" {
var data: i32 = 1;
const Foo = struct {
bar: async<&std.mem.Allocator> fn(&i32) void,
bar: async<&std.mem.Allocator> fn (&i32) void,
};
var foo = Foo{ .bar = simpleAsyncFn2 };
const p = (async<std.debug.global_allocator> foo.bar(&data)) catch unreachable;

View File

@ -193,7 +193,7 @@ fn entry() void {
foo2(bar2);
}
fn foo2(f: fn() error!void) void {
fn foo2(f: fn () error!void) void {
const x = f();
}

View File

@ -215,7 +215,7 @@ test "inlined block and runtime block phi" {
const CmdFn = struct {
name: []const u8,
func: fn(i32) i32,
func: fn (i32) i32,
};
const cmd_fns = []CmdFn{

View File

@ -66,7 +66,7 @@ test "implicit cast function unreachable return" {
wantsFnWithVoid(fnWithUnreachable);
}
fn wantsFnWithVoid(f: fn() void) void {}
fn wantsFnWithVoid(f: fn () void) void {}
fn fnWithUnreachable() noreturn {
unreachable;

View File

@ -1,6 +1,6 @@
const assert = @import("std").debug.assert;
fn get_foo() fn(&u8) usize {
fn get_foo() fn (&u8) usize {
comptime {
return struct {
fn func(ptr: &u8) usize {

View File

@ -133,7 +133,7 @@ fn getFirstByte(comptime T: type, mem: []const T) u8 {
return getByte(@ptrCast(&const u8, &mem[0]));
}
const foos = []fn(var) bool{
const foos = []fn (var) bool{
foo1,
foo2,
};

View File

@ -511,7 +511,7 @@ test "@typeId" {
assert(@typeId(@typeOf(AUnionEnum.One)) == Tid.Enum);
assert(@typeId(AUnionEnum) == Tid.Union);
assert(@typeId(AUnion) == Tid.Union);
assert(@typeId(fn() void) == Tid.Fn);
assert(@typeId(fn () void) == Tid.Fn);
assert(@typeId(@typeOf(builtin)) == Tid.Namespace);
assert(@typeId(@typeOf(x: {
break :x this;

View File

@ -105,7 +105,7 @@ test "fn call of struct field" {
}
const Foo = struct {
ptr: fn() i32,
ptr: fn () i32,
};
fn aFunc() i32 {
@ -302,7 +302,7 @@ test "packed array 24bits" {
var bytes = []u8{0} ** (@sizeOf(FooArray24Bits) + 1);
bytes[bytes.len - 1] = 0xaa;
const ptr = &([]FooArray24Bits)(bytes[0..bytes.len - 1])[0];
const ptr = &([]FooArray24Bits)(bytes[0 .. bytes.len - 1])[0];
assert(ptr.a == 0);
assert(ptr.b[0].field == 0);
assert(ptr.b[1].field == 0);

View File

@ -196,7 +196,7 @@ fn testStruct() void {
assert(!struct_info.Struct.defs[0].data.Fn.is_extern);
assert(struct_info.Struct.defs[0].data.Fn.lib_name == null);
assert(struct_info.Struct.defs[0].data.Fn.return_type == void);
assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn(&const TestStruct) void);
assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (&const TestStruct) void);
}
const TestStruct = packed struct {

View File

@ -58,7 +58,7 @@ fn extraFn(extra: u32, args: ...) usize {
return args.len;
}
const foos = []fn(...) bool{
const foos = []fn (...) bool{
foo1,
foo2,
};