make OutStream and InStream take an error set param

This commit is contained in:
Andrew Kelley 2018-02-05 07:38:24 -05:00
parent 893f1088df
commit b7bc259093
3 changed files with 210 additions and 185 deletions

View File

@ -7174,32 +7174,37 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
return ira->codegen->builtin_types.entry_invalid;
}
if (err_set_type == nullptr) {
err_set_type = cur_type;
if (prev_type->id == TypeTableEntryIdErrorUnion) {
err_set_type = prev_type->data.error_union.err_set_type;
} else {
err_set_type = cur_type;
}
errors = allocate<ErrorTableEntry *>(ira->codegen->errors_by_index.length);
for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
errors[error_entry->value] = error_entry;
}
continue;
} else {
// check if the cur type error set is a subset
bool prev_is_superset = true;
for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
if (error_entry == nullptr) {
prev_is_superset = false;
break;
}
}
if (prev_is_superset) {
if (err_set_type == cur_type) {
continue;
}
// not a subset. invent new error set type, union of both of them
err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_type);
assert(errors != nullptr);
}
// check if the cur type error set is a subset
bool prev_is_superset = true;
for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
ErrorTableEntry *error_entry = errors[contained_error_entry->value];
if (error_entry == nullptr) {
prev_is_superset = false;
break;
}
}
if (prev_is_superset) {
continue;
}
// not a subset. invent new error set type, union of both of them
err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_type);
assert(errors != nullptr);
continue;
}
if (prev_type->id == TypeTableEntryIdErrorUnion && cur_type->id == TypeTableEntryIdErrorUnion) {

View File

@ -15,12 +15,12 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
/// TODO atomic/multithread support
var stderr_file: io.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
var stderr_stream: ?&io.OutStream = null;
var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null;
pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
fn getStderrStream() !&io.OutStream {
fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@ -140,7 +140,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: &io.OutStream, allocator: &mem.Allocator,
pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator,
debug_info: &ElfStackTrace, tty_color: bool) !void
{
var frame_index: usize = undefined;
@ -162,7 +162,7 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: &io.O
}
}
pub fn writeCurrentStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocator,
pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator,
debug_info: &ElfStackTrace, tty_color: bool, ignore_frame_count: usize) !void
{
var ignored_count: usize = 0;
@ -179,7 +179,7 @@ pub fn writeCurrentStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocat
}
}
fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: &io.OutStream, address: usize) !void {
fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void {
if (builtin.os == builtin.Os.windows) {
return error.UnsupportedDebugInfo;
}
@ -532,7 +532,7 @@ const LineNumberProgram = struct {
}
};
fn readStringRaw(allocator: &mem.Allocator, in_stream: &io.InStream) ![]u8 {
fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
@ -549,54 +549,62 @@ fn getString(st: &ElfStackTrace, offset: u64) ![]u8 {
return st.readString();
}
fn readAllocBytes(allocator: &mem.Allocator, in_stream: &io.InStream, size: usize) ![]u8 {
fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 {
const buf = try global_allocator.alloc(u8, size);
errdefer global_allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: &io.InStream, size: usize) !FormValue {
fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue { .Block = buf };
}
fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: &io.InStream, size: usize) !FormValue {
fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: &io.InStream, signed: bool, size: usize) !FormValue {
fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
return FormValue { .Const = Constant {
.signed = signed,
.payload = try readAllocBytes(allocator, in_stream, size),
}};
}
fn parseFormValueDwarfOffsetSize(in_stream: &io.InStream, is_64: bool) !u64 {
fn parseFormValueDwarfOffsetSize(in_stream: var, is_64: bool) !u64 {
return if (is_64) try in_stream.readIntLe(u64)
else u64(try in_stream.readIntLe(u32)) ;
}
fn parseFormValueTargetAddrSize(in_stream: &io.InStream) !u64 {
fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32))
else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64)
else unreachable;
}
fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: &io.InStream, size: usize) !FormValue {
fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue { .Ref = buf };
}
fn parseFormValueRef(allocator: &mem.Allocator, in_stream: &io.InStream, comptime T: type) !FormValue {
fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue {
const block_len = try in_stream.readIntLe(T);
return parseFormValueRefLen(allocator, in_stream, block_len);
}
const ParseFormValueError = error {};
const ParseFormValueError = error {
EndOfStream,
Io,
BadFd,
Unexpected,
InvalidDebugInfo,
EndOfFile,
OutOfMemory,
};
fn parseFormValue(allocator: &mem.Allocator, in_stream: &io.InStream, form_id: u64, is_64: bool)
fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool)
ParseFormValueError!FormValue
{
return switch (form_id) {
@ -739,7 +747,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
try in_file.seekTo(this_offset);
var is_64: bool = undefined;
const unit_length = try readInitialLength(in_stream, &is_64);
const unit_length = try readInitialLength(@typeOf(in_stream.readFn).ReturnType.ErrorSet, in_stream, &is_64);
if (unit_length == 0)
return error.MissingDebugInfo;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
@ -914,7 +922,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
try st.self_exe_file.seekTo(this_unit_offset);
var is_64: bool = undefined;
const unit_length = try readInitialLength(in_stream, &is_64);
const unit_length = try readInitialLength(@typeOf(in_stream.readFn).ReturnType.ErrorSet, in_stream, &is_64);
if (unit_length == 0)
return;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
@ -1014,7 +1022,7 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit
return error.MissingDebugInfo;
}
fn readInitialLength(in_stream: &io.InStream, is_64: &bool) !u64 {
fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
*is_64 = (first_32_bits == 0xffffffff);
if (*is_64) {
@ -1025,7 +1033,7 @@ fn readInitialLength(in_stream: &io.InStream, is_64: &bool) !u64 {
}
}
fn readULeb128(in_stream: &io.InStream) !u64 {
fn readULeb128(in_stream: var) !u64 {
var result: u64 = 0;
var shift: usize = 0;

View File

@ -61,18 +61,21 @@ pub fn getStdIn() GetStdIoErrs!File {
/// Implementation of InStream trait for File
pub const FileInStream = struct {
file: &File,
stream: InStream,
stream: Stream,
pub const Error = @typeOf(File.read).ReturnType.ErrorSet;
pub const Stream = InStream(Error);
pub fn init(file: &File) FileInStream {
return FileInStream {
.file = file,
.stream = InStream {
.stream = Stream {
.readFn = readFn,
},
};
}
fn readFn(in_stream: &InStream, buffer: []u8) !usize {
fn readFn(in_stream: &Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(FileInStream, "stream", in_stream);
return self.file.read(buffer);
}
@ -81,18 +84,21 @@ pub const FileInStream = struct {
/// Implementation of OutStream trait for File
pub const FileOutStream = struct {
file: &File,
stream: OutStream,
stream: Stream,
pub const Error = File.WriteError;
pub const Stream = OutStream(Error);
pub fn init(file: &File) FileOutStream {
return FileOutStream {
.file = file,
.stream = OutStream {
.stream = Stream {
.writeFn = writeFn,
},
};
}
fn writeFn(out_stream: &OutStream, bytes: []const u8) !void {
fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(FileOutStream, "stream", out_stream);
return self.file.write(bytes);
}
@ -298,6 +304,8 @@ pub const File = struct {
}
}
pub const ReadError = error {};
pub fn read(self: &File, buffer: []u8) !usize {
if (is_posix) {
var index: usize = 0;
@ -340,7 +348,7 @@ pub const File = struct {
}
}
const WriteError = os.WindowsWriteError || os.PosixWriteError;
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
fn write(self: &File, bytes: []const u8) WriteError!void {
if (is_posix) {
@ -353,161 +361,165 @@ pub const File = struct {
}
};
pub const InStream = struct {
// TODO allow specifying the error set
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
readFn: fn(self: &InStream, buffer: []u8) error!usize,
pub fn InStream(comptime Error: type) type {
return struct {
const Self = this;
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
pub fn readAllBuffer(self: &InStream, buffer: &Buffer, max_size: usize) !void {
try buffer.resize(0);
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
readFn: fn(self: &Self, buffer: []u8) Error!usize,
var actual_buf_len: usize = 0;
while (true) {
const dest_slice = buffer.toSlice()[actual_buf_len..];
const bytes_read = try self.readFn(self, dest_slice);
actual_buf_len += bytes_read;
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void {
try buffer.resize(0);
if (bytes_read != dest_slice.len) {
buffer.shrink(actual_buf_len);
return;
var actual_buf_len: usize = 0;
while (true) {
const dest_slice = buffer.toSlice()[actual_buf_len..];
const bytes_read = try self.readFn(self, dest_slice);
actual_buf_len += bytes_read;
if (bytes_read != dest_slice.len) {
buffer.shrink(actual_buf_len);
return;
}
const new_buf_size = math.min(max_size, actual_buf_len + os.page_size);
if (new_buf_size == actual_buf_len)
return error.StreamTooLong;
try buffer.resize(new_buf_size);
}
const new_buf_size = math.min(max_size, actual_buf_len + os.page_size);
if (new_buf_size == actual_buf_len)
return error.StreamTooLong;
try buffer.resize(new_buf_size);
}
}
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: &InStream, allocator: &mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
try self.readAllBuffer(&buf, max_size);
return buf.toOwnedSlice();
}
try self.readAllBuffer(&buf, max_size);
return buf.toOwnedSlice();
}
/// Replaces `buffer` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
pub fn readUntilDelimiterBuffer(self: &InStream, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
try buf.resize(0);
/// Replaces `buffer` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
try buf.resize(0);
while (true) {
var byte: u8 = try self.readByte();
while (true) {
var byte: u8 = try self.readByte();
if (byte == delimiter) {
return;
if (byte == delimiter) {
return;
}
if (buf.len() == max_size) {
return error.StreamTooLong;
}
try buf.appendByte(byte);
}
}
if (buf.len() == max_size) {
return error.StreamTooLong;
/// Allocates enough memory to read until `delimiter`. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator,
delimiter: u8, max_size: usize) ![]u8
{
var buf = Buffer.initNull(allocator);
defer buf.deinit();
try self.readUntilDelimiterBuffer(self, &buf, delimiter, max_size);
return buf.toOwnedSlice();
}
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn read(self: &Self, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
pub fn readNoEof(self: &Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: &Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: &Self) !i8 {
return @bitCast(i8, try self.readByte());
}
pub fn readIntLe(self: &Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
pub fn readIntBe(self: &Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
const input_slice = input_buf[0..size];
try self.readNoEof(input_slice);
return mem.readInt(input_slice, T, endian);
}
};
}
pub fn OutStream(comptime Error: type) type {
return struct {
const Self = this;
writeFn: fn(self: &Self, bytes: []const u8) Error!void,
pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, error, self.writeFn, format, args);
}
pub fn write(self: &Self, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
pub fn writeByte(self: &Self, byte: u8) !void {
const slice = (&byte)[0..1];
return self.writeFn(self, slice);
}
pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void {
const slice = (&byte)[0..1];
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeFn(self, slice);
}
try buf.appendByte(byte);
}
}
/// Allocates enough memory to read until `delimiter`. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(self: &InStream, allocator: &mem.Allocator,
delimiter: u8, max_size: usize) ![]u8
{
var buf = Buffer.initNull(allocator);
defer buf.deinit();
try self.readUntilDelimiterBuffer(self, &buf, delimiter, max_size);
return buf.toOwnedSlice();
}
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn read(self: &InStream, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
pub fn readNoEof(self: &InStream, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: &InStream) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: &InStream) !i8 {
return @bitCast(i8, try self.readByte());
}
pub fn readIntLe(self: &InStream, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
pub fn readIntBe(self: &InStream, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
pub fn readInt(self: &InStream, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
pub fn readVarInt(self: &InStream, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
const input_slice = input_buf[0..size];
try self.readNoEof(input_slice);
return mem.readInt(input_slice, T, endian);
}
};
pub const OutStream = struct {
// TODO allow specifying the error set
writeFn: fn(self: &OutStream, bytes: []const u8) error!void,
pub fn print(self: &OutStream, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, error, self.writeFn, format, args);
}
pub fn write(self: &OutStream, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
pub fn writeByte(self: &OutStream, byte: u8) !void {
const slice = (&byte)[0..1];
return self.writeFn(self, slice);
}
pub fn writeByteNTimes(self: &OutStream, byte: u8, n: usize) !void {
const slice = (&byte)[0..1];
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeFn(self, slice);
}
}
};
};
}
/// `path` may need to be copied in memory to add a null terminating byte. In this case
/// a fixed size buffer of size `std.os.max_noalloc_path_len` is an attempted solution. If the fixed