Remove useless wrappers around f32/f64 aeabi builtins

master
LemonBoy 2020-01-18 23:05:17 +01:00
parent fa52c9e36e
commit 7d94e712f1
2 changed files with 22 additions and 152 deletions

View File

@ -2,94 +2,29 @@
//
// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/arm/aeabi_dcmp.S
const ConditionalOperator = enum {
Eq,
Lt,
Le,
Ge,
Gt,
};
const comparedf2 = @import("../comparedf2.zig");
pub fn __aeabi_dcmpeq() callconv(.Naked) noreturn {
pub fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Eq});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparedf2.__eqdf2, .{ a, b }) == 0);
}
pub fn __aeabi_dcmplt() callconv(.Naked) noreturn {
pub fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Lt});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparedf2.__ltdf2, .{ a, b }) < 0);
}
pub fn __aeabi_dcmple() callconv(.Naked) noreturn {
pub fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Le});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparedf2.__ledf2, .{ a, b }) <= 0);
}
pub fn __aeabi_dcmpge() callconv(.Naked) noreturn {
pub fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Ge});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparedf2.__gedf2, .{ a, b }) >= 0);
}
pub fn __aeabi_dcmpgt() callconv(.Naked) noreturn {
pub fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Gt});
unreachable;
}
fn aeabi_dcmp(comptime cond: ConditionalOperator) void {
@setRuntimeSafety(false);
asm volatile (
\\ push { r4, lr }
);
switch (cond) {
.Eq => asm volatile (
\\ bl __eqdf2
\\ cmp r0, #0
\\ beq 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Lt => asm volatile (
\\ bl __ltdf2
\\ cmp r0, #0
\\ blt 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Le => asm volatile (
\\ bl __ledf2
\\ cmp r0, #0
\\ ble 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Ge => asm volatile (
\\ bl __ltdf2
\\ cmp r0, #0
\\ bge 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Gt => asm volatile (
\\ bl __gtdf2
\\ cmp r0, #0
\\ bgt 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
}
asm volatile (
\\ movs r0, #1
\\ pop { r4, pc }
);
return @boolToInt(@call(.{ .modifier = .always_inline }, comparedf2.__gtdf2, .{ a, b }) > 0);
}

View File

@ -2,94 +2,29 @@
//
// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/arm/aeabi_fcmp.S
const ConditionalOperator = enum {
Eq,
Lt,
Le,
Ge,
Gt,
};
const comparesf2 = @import("../comparesf2.zig");
pub fn __aeabi_fcmpeq() callconv(.Naked) noreturn {
pub fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Eq});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparesf2.__eqsf2, .{ a, b }) == 0);
}
pub fn __aeabi_fcmplt() callconv(.Naked) noreturn {
pub fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Lt});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparesf2.__ltsf2, .{ a, b }) < 0);
}
pub fn __aeabi_fcmple() callconv(.Naked) noreturn {
pub fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Le});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparesf2.__lesf2, .{ a, b }) <= 0);
}
pub fn __aeabi_fcmpge() callconv(.Naked) noreturn {
pub fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Ge});
unreachable;
return @boolToInt(@call(.{ .modifier = .always_inline }, comparesf2.__gesf2, .{ a, b }) >= 0);
}
pub fn __aeabi_fcmpgt() callconv(.Naked) noreturn {
pub fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Gt});
unreachable;
}
fn aeabi_fcmp(comptime cond: ConditionalOperator) void {
@setRuntimeSafety(false);
asm volatile (
\\ push { r4, lr }
);
switch (cond) {
.Eq => asm volatile (
\\ bl __eqsf2
\\ cmp r0, #0
\\ beq 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Lt => asm volatile (
\\ bl __ltsf2
\\ cmp r0, #0
\\ blt 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Le => asm volatile (
\\ bl __lesf2
\\ cmp r0, #0
\\ ble 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Ge => asm volatile (
\\ bl __ltsf2
\\ cmp r0, #0
\\ bge 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
.Gt => asm volatile (
\\ bl __gtsf2
\\ cmp r0, #0
\\ bgt 1f
\\ movs r0, #0
\\ pop { r4, pc }
\\ 1:
),
}
asm volatile (
\\ movs r0, #1
\\ pop { r4, pc }
);
return @boolToInt(@call(.{ .modifier = .always_inline }, comparesf2.__gtsf2, .{ a, b }) > 0);
}