bitfields support for array of non-store-aligned packed structs
This commit is contained in:
parent
cf5108f222
commit
d794549985
@ -163,6 +163,7 @@ static TypeTableEntry *new_container_type_entry(TypeTableEntryId id, AstNode *so
|
||||
}
|
||||
|
||||
|
||||
// TODO no reason to limit to 8/16/32/64
|
||||
static size_t bits_needed_for_unsigned(uint64_t x) {
|
||||
if (x <= UINT8_MAX) {
|
||||
return 8;
|
||||
@ -262,6 +263,14 @@ uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry) {
|
||||
if (canon_type->id == TypeTableEntryIdStruct && canon_type->data.structure.layout == ContainerLayoutPacked) {
|
||||
uint64_t size_in_bits = type_size_bits(g, type_entry);
|
||||
return (size_in_bits + 7) / 8;
|
||||
} else if (canon_type->id == TypeTableEntryIdArray) {
|
||||
TypeTableEntry *canon_child_type = get_underlying_type(canon_type->data.array.child_type);
|
||||
if (canon_child_type->id == TypeTableEntryIdStruct &&
|
||||
canon_child_type->data.structure.layout == ContainerLayoutPacked)
|
||||
{
|
||||
uint64_t size_in_bits = type_size_bits(g, type_entry);
|
||||
return (size_in_bits + 7) / 8;
|
||||
}
|
||||
}
|
||||
|
||||
return LLVMStoreSizeOfType(g->target_data_ref, type_entry->type_ref);
|
||||
@ -280,6 +289,13 @@ uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry) {
|
||||
result += type_size_bits(g, canon_type->data.structure.fields[i].type_entry);
|
||||
}
|
||||
return result;
|
||||
} else if (canon_type->id == TypeTableEntryIdArray) {
|
||||
TypeTableEntry *canon_child_type = get_underlying_type(canon_type->data.array.child_type);
|
||||
if (canon_child_type->id == TypeTableEntryIdStruct &&
|
||||
canon_child_type->data.structure.layout == ContainerLayoutPacked)
|
||||
{
|
||||
return canon_type->data.array.len * type_size_bits(g, canon_child_type);
|
||||
}
|
||||
}
|
||||
|
||||
return LLVMSizeOfTypeInBits(g->target_data_ref, canon_type->type_ref);
|
||||
@ -537,14 +553,6 @@ TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t
|
||||
|
||||
ensure_complete_type(g, child_type);
|
||||
|
||||
TypeTableEntry *canon_child_type = get_underlying_type(child_type);
|
||||
if (canon_child_type->id == TypeTableEntryIdStruct &&
|
||||
canon_child_type->data.structure.layout == ContainerLayoutPacked &&
|
||||
type_size_bits(g, canon_child_type) != 8 * type_size(g, canon_child_type))
|
||||
{
|
||||
zig_panic("TODO array of packed struct with unaligned size");
|
||||
}
|
||||
|
||||
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdArray);
|
||||
entry->zero_bits = (array_size == 0) || child_type->zero_bits;
|
||||
|
||||
@ -1459,15 +1467,16 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
|
||||
type_struct_field->packed_bits_offset = packed_bits_offset - first_packed_bits_offset_misalign;
|
||||
type_struct_field->unaligned_bit_count = field_size_in_bits;
|
||||
|
||||
if (next_packed_bits_offset % 8 == 0) {
|
||||
// next field recovers byte alignment
|
||||
size_t full_bit_count = next_packed_bits_offset - first_packed_bits_offset_misalign;
|
||||
element_types[gen_field_index] = LLVMIntType(full_bit_count);
|
||||
size_t full_bit_count = next_packed_bits_offset - first_packed_bits_offset_misalign;
|
||||
LLVMTypeRef int_type_ref = LLVMIntType(full_bit_count);
|
||||
if (8 * LLVMStoreSizeOfType(g->target_data_ref, int_type_ref) == full_bit_count) {
|
||||
// next field recovers store alignment
|
||||
element_types[gen_field_index] = int_type_ref;
|
||||
gen_field_index += 1;
|
||||
|
||||
first_packed_bits_offset_misalign = SIZE_MAX;
|
||||
}
|
||||
} else if (next_packed_bits_offset % 8 != 0) {
|
||||
} else if (8 * LLVMStoreSizeOfType(g->target_data_ref, field_type->type_ref) != field_size_in_bits) {
|
||||
first_packed_bits_offset_misalign = packed_bits_offset;
|
||||
type_struct_field->packed_bits_offset = 0;
|
||||
type_struct_field->unaligned_bit_count = field_size_in_bits;
|
||||
@ -1489,7 +1498,9 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
|
||||
}
|
||||
if (first_packed_bits_offset_misalign != SIZE_MAX) {
|
||||
size_t full_bit_count = packed_bits_offset - first_packed_bits_offset_misalign;
|
||||
element_types[gen_field_index] = LLVMIntType(full_bit_count);
|
||||
LLVMTypeRef int_type_ref = LLVMIntType(full_bit_count);
|
||||
size_t store_bit_count = 8 * LLVMStoreSizeOfType(g->target_data_ref, int_type_ref);
|
||||
element_types[gen_field_index] = LLVMIntType(store_bit_count);
|
||||
gen_field_index += 1;
|
||||
}
|
||||
|
||||
@ -3620,33 +3631,22 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
static uint64_t max_unsigned_val(TypeTableEntry *type_entry) {
|
||||
uint64_t max_unsigned_val(TypeTableEntry *type_entry) {
|
||||
assert(type_entry->id == TypeTableEntryIdInt);
|
||||
if (type_entry->data.integral.bit_count == 64) {
|
||||
return UINT64_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 32) {
|
||||
return UINT32_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 16) {
|
||||
return UINT16_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 8) {
|
||||
return UINT8_MAX;
|
||||
} else {
|
||||
zig_unreachable();
|
||||
return (((uint64_t)1) << type_entry->data.integral.bit_count) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t max_signed_val(TypeTableEntry *type_entry) {
|
||||
assert(type_entry->id == TypeTableEntryIdInt);
|
||||
|
||||
if (type_entry->data.integral.bit_count == 64) {
|
||||
return INT64_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 32) {
|
||||
return INT32_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 16) {
|
||||
return INT16_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 8) {
|
||||
return INT8_MAX;
|
||||
} else {
|
||||
zig_unreachable();
|
||||
return (((uint64_t)1) << (type_entry->data.integral.bit_count - 1)) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3654,14 +3654,8 @@ int64_t min_signed_val(TypeTableEntry *type_entry) {
|
||||
assert(type_entry->id == TypeTableEntryIdInt);
|
||||
if (type_entry->data.integral.bit_count == 64) {
|
||||
return INT64_MIN;
|
||||
} else if (type_entry->data.integral.bit_count == 32) {
|
||||
return INT32_MIN;
|
||||
} else if (type_entry->data.integral.bit_count == 16) {
|
||||
return INT16_MIN;
|
||||
} else if (type_entry->data.integral.bit_count == 8) {
|
||||
return INT8_MIN;
|
||||
} else {
|
||||
zig_unreachable();
|
||||
return -((int64_t)(((uint64_t)1) << (type_entry->data.integral.bit_count - 1)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,6 +84,7 @@ bool ir_get_var_is_comptime(VariableTableEntry *var);
|
||||
bool const_values_equal(ConstExprValue *a, ConstExprValue *b);
|
||||
void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *const_val, bool is_max);
|
||||
int64_t min_signed_val(TypeTableEntry *type_entry);
|
||||
uint64_t max_unsigned_val(TypeTableEntry *type_entry);
|
||||
|
||||
void render_const_value(Buf *buf, ConstExprValue *const_val);
|
||||
void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars);
|
||||
|
@ -1466,6 +1466,27 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
|
||||
array_type->data.array.len, false);
|
||||
add_bounds_check(g, subscript_value, LLVMIntEQ, nullptr, LLVMIntULT, end);
|
||||
}
|
||||
if (array_ptr_type->data.pointer.unaligned_bit_count != 0) {
|
||||
return array_ptr_ptr;
|
||||
}
|
||||
TypeTableEntry *canon_child_type = get_underlying_type(array_type->data.array.child_type);
|
||||
if (canon_child_type->id == TypeTableEntryIdStruct &&
|
||||
canon_child_type->data.structure.layout == ContainerLayoutPacked)
|
||||
{
|
||||
LLVMTypeRef ptr_u8_type_ref = LLVMPointerType(LLVMInt8Type(), 0);
|
||||
LLVMValueRef u8_array_ptr = LLVMBuildBitCast(g->builder, array_ptr, ptr_u8_type_ref, "");
|
||||
size_t unaligned_bit_count = instruction->base.value.type->data.pointer.unaligned_bit_count;
|
||||
assert(unaligned_bit_count != 0);
|
||||
assert(unaligned_bit_count % 8 == 0);
|
||||
LLVMValueRef elem_size_bytes = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
|
||||
unaligned_bit_count / 8, false);
|
||||
LLVMValueRef byte_offset = LLVMBuildNUWMul(g->builder, subscript_value, elem_size_bytes, "");
|
||||
LLVMValueRef indices[] = {
|
||||
byte_offset
|
||||
};
|
||||
LLVMValueRef elem_byte_ptr = LLVMBuildInBoundsGEP(g->builder, u8_array_ptr, indices, 1, "");
|
||||
return LLVMBuildBitCast(g->builder, elem_byte_ptr, LLVMPointerType(canon_child_type->type_ref, 0), "");
|
||||
}
|
||||
LLVMValueRef indices[] = {
|
||||
LLVMConstNull(g->builtin_types.entry_usize->type_ref),
|
||||
subscript_value
|
||||
@ -1552,11 +1573,19 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa
|
||||
IrInstructionStructFieldPtr *instruction)
|
||||
{
|
||||
LLVMValueRef struct_ptr = ir_llvm_value(g, instruction->struct_ptr);
|
||||
// not necessarily a pointer. could be TypeTableEntryIdStruct
|
||||
TypeTableEntry *struct_ptr_type = instruction->struct_ptr->value.type;
|
||||
TypeStructField *field = instruction->field;
|
||||
|
||||
if (!type_has_bits(field->type_entry))
|
||||
return nullptr;
|
||||
|
||||
if (struct_ptr_type->id == TypeTableEntryIdPointer &&
|
||||
struct_ptr_type->data.pointer.unaligned_bit_count != 0)
|
||||
{
|
||||
return struct_ptr;
|
||||
}
|
||||
|
||||
assert(field->gen_index != SIZE_MAX);
|
||||
return LLVMBuildStructGEP(g->builder, struct_ptr, field->gen_index, "");
|
||||
}
|
||||
|
40
src/ir.cpp
40
src/ir.cpp
@ -7412,21 +7412,6 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
|
||||
return ira->codegen->builtin_types.entry_bool;
|
||||
}
|
||||
|
||||
static uint64_t max_unsigned_val(TypeTableEntry *type_entry) {
|
||||
assert(type_entry->id == TypeTableEntryIdInt);
|
||||
if (type_entry->data.integral.bit_count == 64) {
|
||||
return UINT64_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 32) {
|
||||
return UINT32_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 16) {
|
||||
return UINT16_MAX;
|
||||
} else if (type_entry->data.integral.bit_count == 8) {
|
||||
return UINT8_MAX;
|
||||
} else {
|
||||
zig_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
static int ir_eval_bignum(ConstExprValue *op1_val, ConstExprValue *op2_val,
|
||||
ConstExprValue *out_val, bool (*bignum_fn)(BigNum *, BigNum *, BigNum *),
|
||||
TypeTableEntry *type, bool wrapping_op)
|
||||
@ -8844,8 +8829,21 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
|
||||
buf_sprintf("index 0 outside array of size 0"));
|
||||
}
|
||||
TypeTableEntry *child_type = array_type->data.array.child_type;
|
||||
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
||||
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, 0, 0);
|
||||
if (ptr_type->data.pointer.unaligned_bit_count == 0) {
|
||||
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
||||
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, 0, 0);
|
||||
} else {
|
||||
ConstExprValue *elem_val = ir_resolve_const(ira, elem_index, UndefBad);
|
||||
if (!elem_val)
|
||||
return ira->codegen->builtin_types.entry_invalid;
|
||||
|
||||
size_t bit_width = type_size_bits(ira->codegen, child_type);
|
||||
size_t bit_offset = bit_width * elem_val->data.x_bignum.data.x_uint;
|
||||
|
||||
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
|
||||
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
|
||||
bit_offset, bit_width);
|
||||
}
|
||||
} else if (array_type->id == TypeTableEntryIdPointer) {
|
||||
return_type = array_type;
|
||||
} else if (is_slice(array_type)) {
|
||||
@ -9072,9 +9070,13 @@ static TypeTableEntry *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field
|
||||
return ptr_type;
|
||||
}
|
||||
}
|
||||
size_t ptr_bit_offset = container_ptr->value.type->data.pointer.bit_offset;
|
||||
size_t ptr_unaligned_bit_count = container_ptr->value.type->data.pointer.unaligned_bit_count;
|
||||
size_t unaligned_bit_count_for_result_type = (ptr_unaligned_bit_count == 0) ?
|
||||
field->unaligned_bit_count : type_size_bits(ira->codegen, field->type_entry);
|
||||
ir_build_struct_field_ptr_from(&ira->new_irb, &field_ptr_instruction->base, container_ptr, field);
|
||||
return get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const,
|
||||
is_volatile, field->packed_bits_offset, field->unaligned_bit_count);
|
||||
return get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile,
|
||||
ptr_bit_offset + field->packed_bits_offset, unaligned_bit_count_for_result_type);
|
||||
} else {
|
||||
return ir_analyze_container_member_access_inner(ira, bare_type, field_name,
|
||||
field_ptr_instruction, container_ptr, container_type);
|
||||
|
@ -50,27 +50,39 @@ fn intTypeBuiltin() {
|
||||
assert(!usize.is_signed);
|
||||
}
|
||||
|
||||
const u1 = @intType(false, 1);
|
||||
const u63 = @intType(false, 63);
|
||||
const i1 = @intType(true, 1);
|
||||
const i63 = @intType(true, 63);
|
||||
|
||||
fn minValueAndMaxValue() {
|
||||
@setFnTest(this);
|
||||
|
||||
assert(@maxValue(u1) == 1);
|
||||
assert(@maxValue(u8) == 255);
|
||||
assert(@maxValue(u16) == 65535);
|
||||
assert(@maxValue(u32) == 4294967295);
|
||||
assert(@maxValue(u64) == 18446744073709551615);
|
||||
|
||||
assert(@maxValue(i1) == 0);
|
||||
assert(@maxValue(i8) == 127);
|
||||
assert(@maxValue(i16) == 32767);
|
||||
assert(@maxValue(i32) == 2147483647);
|
||||
assert(@maxValue(i63) == 4611686018427387903);
|
||||
assert(@maxValue(i64) == 9223372036854775807);
|
||||
|
||||
assert(@minValue(u1) == 0);
|
||||
assert(@minValue(u8) == 0);
|
||||
assert(@minValue(u16) == 0);
|
||||
assert(@minValue(u32) == 0);
|
||||
assert(@minValue(u63) == 0);
|
||||
assert(@minValue(u64) == 0);
|
||||
|
||||
assert(@minValue(i1) == -1);
|
||||
assert(@minValue(i8) == -128);
|
||||
assert(@minValue(i16) == -32768);
|
||||
assert(@minValue(i32) == -2147483648);
|
||||
assert(@minValue(i63) == -4611686018427387904);
|
||||
assert(@minValue(i64) == -9223372036854775808);
|
||||
}
|
||||
|
||||
|
@ -285,8 +285,10 @@ const Foo96Bits = packed struct {
|
||||
fn packedStruct24Bits() {
|
||||
@setFnTest(this);
|
||||
|
||||
comptime assert(@sizeOf(Foo24Bits) == 3);
|
||||
comptime assert(@sizeOf(Foo96Bits) == 12);
|
||||
comptime {
|
||||
assert(@sizeOf(Foo24Bits) == 3);
|
||||
assert(@sizeOf(Foo96Bits) == 12);
|
||||
}
|
||||
|
||||
var value = Foo96Bits {
|
||||
.a = 0,
|
||||
@ -318,3 +320,52 @@ fn packedStruct24Bits() {
|
||||
assert(value.c == 1);
|
||||
assert(value.d == 1);
|
||||
}
|
||||
|
||||
const FooArray24Bits = packed struct {
|
||||
a: u16,
|
||||
b: [2]Foo24Bits,
|
||||
c: u16,
|
||||
};
|
||||
|
||||
fn packedArray24Bits() {
|
||||
@setFnTest(this);
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf([9]Foo24Bits) == 9 * 3);
|
||||
assert(@sizeOf(FooArray24Bits) == 2 + 2 * 3 + 2);
|
||||
}
|
||||
|
||||
var bytes = []u8{0} ** (@sizeOf(FooArray24Bits) + 1);
|
||||
bytes[bytes.len - 1] = 0xaa;
|
||||
const ptr = &([]FooArray24Bits)(bytes[0...bytes.len - 1])[0];
|
||||
assert(ptr.a == 0);
|
||||
assert(ptr.b[0].field == 0);
|
||||
assert(ptr.b[1].field == 0);
|
||||
assert(ptr.c == 0);
|
||||
|
||||
ptr.a = @maxValue(u16);
|
||||
assert(ptr.a == @maxValue(u16));
|
||||
assert(ptr.b[0].field == 0);
|
||||
assert(ptr.b[1].field == 0);
|
||||
assert(ptr.c == 0);
|
||||
|
||||
ptr.b[0].field = @maxValue(u24);
|
||||
assert(ptr.a == @maxValue(u16));
|
||||
assert(ptr.b[0].field == @maxValue(u24));
|
||||
assert(ptr.b[1].field == 0);
|
||||
assert(ptr.c == 0);
|
||||
|
||||
ptr.b[1].field = @maxValue(u24);
|
||||
assert(ptr.a == @maxValue(u16));
|
||||
assert(ptr.b[0].field == @maxValue(u24));
|
||||
assert(ptr.b[1].field == @maxValue(u24));
|
||||
assert(ptr.c == 0);
|
||||
|
||||
ptr.c = @maxValue(u16);
|
||||
assert(ptr.a == @maxValue(u16));
|
||||
assert(ptr.b[0].field == @maxValue(u24));
|
||||
assert(ptr.b[1].field == @maxValue(u24));
|
||||
assert(ptr.c == @maxValue(u16));
|
||||
|
||||
assert(bytes[bytes.len - 1] == 0xaa);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user