compiler-rt: Add __addtf3, __subtf3 and __truncdfhf2

Allows addition/subtraction of f128 and narrowing casts to f16 from
larger float types.
master
Marc Tiehuis 2019-02-13 23:24:52 +13:00
parent 8e68d43ad3
commit be861a85c8
6 changed files with 353 additions and 0 deletions

View File

@ -608,6 +608,7 @@ set(ZIG_STD_FILES
"special/bootstrap_lib.zig"
"special/build_runner.zig"
"special/builtin.zig"
"special/compiler_rt/addXf3.zig"
"special/compiler_rt/aulldiv.zig"
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"

View File

@ -0,0 +1,191 @@
// Ported from:
//
// https://github.com/llvm-mirror/compiler-rt/blob/92f7768ce940f6437b32ecc0985a1446cd040f7a/lib/builtins/fp_add_impl.inc
const std = @import("std");
const builtin = @import("builtin");
const compiler_rt = @import("index.zig");
pub extern fn __addtf3(a: f128, b: f128) f128 {
return addXf3(f128, a, b);
}
pub extern fn __subtf3(a: f128, b: f128) f128 {
const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (u128(1) << 127));
return addXf3(f128, a, neg_b);
}
inline fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
const Z = @IntType(false, T.bit_count);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = Z(1) << significandBits;
const shift = @clz(significand.*) - @clz(implicitBit);
significand.* <<= @intCast(u7, shift);
return 1 - shift;
}
inline fn addXf3(comptime T: type, a: T, b: T) T {
const Z = @IntType(false, T.bit_count);
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (Z(1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
const exponentBias = (maxExponent >> 1);
const implicitBit = (Z(1) << significandBits);
const quietBit = implicitBit >> 1;
const significandMask = implicitBit - 1;
const absMask = signBit - 1;
const exponentMask = absMask ^ significandMask;
const qnanRep = exponentMask | quietBit;
var aRep = @bitCast(Z, a);
var bRep = @bitCast(Z, b);
const aAbs = aRep & absMask;
const bAbs = bRep & absMask;
const negative = (aRep & signBit) != 0;
const exponent = @intCast(i32, aAbs >> significandBits) - exponentBias;
const significand = (aAbs & significandMask) | implicitBit;
const infRep = @bitCast(Z, std.math.inf(T));
// Detect if a or b is zero, infinity, or NaN.
if (aAbs - Z(1) >= infRep - Z(1) or
bAbs - Z(1) >= infRep - Z(1))
{
// NaN + anything = qNaN
if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
// anything + NaN = qNaN
if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
if (aAbs == infRep) {
// +/-infinity + -/+infinity = qNaN
if ((@bitCast(Z, a) ^ @bitCast(Z, b)) == signBit) {
return @bitCast(T, qnanRep);
}
// +/-infinity + anything remaining = +/- infinity
else {
return a;
}
}
// anything remaining + +/-infinity = +/-infinity
if (bAbs == infRep) return b;
// zero + anything = anything
if (aAbs == 0) {
// but we need to get the sign right for zero + zero
if (bAbs == 0) {
return @bitCast(T, @bitCast(Z, a) & @bitCast(Z, b));
} else {
return b;
}
}
// anything + zero = anything
if (bAbs == 0) return a;
}
// Swap a and b if necessary so that a has the larger absolute value.
if (bAbs > aAbs) {
const temp = aRep;
aRep = bRep;
bRep = temp;
}
// Extract the exponent and significand from the (possibly swapped) a and b.
var aExponent = @intCast(i32, (aRep >> significandBits) & maxExponent);
var bExponent = @intCast(i32, (bRep >> significandBits) & maxExponent);
var aSignificand = aRep & significandMask;
var bSignificand = bRep & significandMask;
// Normalize any denormals, and adjust the exponent accordingly.
if (aExponent == 0) aExponent = normalize(T, &aSignificand);
if (bExponent == 0) bExponent = normalize(T, &bSignificand);
// The sign of the result is the sign of the larger operand, a. If they
// have opposite signs, we are performing a subtraction; otherwise addition.
const resultSign = aRep & signBit;
const subtraction = (aRep ^ bRep) & signBit != 0;
// Shift the significands to give us round, guard and sticky, and or in the
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
aSignificand = (aSignificand | implicitBit) << 3;
bSignificand = (bSignificand | implicitBit) << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
const @"align" = @intCast(Z, aExponent - bExponent);
if (@"align" != 0) {
if (@"align" < typeWidth) {
const sticky = if (bSignificand << @intCast(u7, typeWidth - @"align") != 0) Z(1) else 0;
bSignificand = (bSignificand >> @truncate(u7, @"align")) | sticky;
} else {
bSignificand = 1; // sticky; b is known to be non-zero.
}
}
if (subtraction) {
aSignificand -= bSignificand;
// If a == -b, return +zero.
if (aSignificand == 0) return @bitCast(T, Z(0));
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const shift = @intCast(i32, @clz(aSignificand)) - @intCast(i32, @clz(implicitBit << 3));
aSignificand <<= @intCast(u7, shift);
aExponent -= shift;
}
} else { // addition
aSignificand += bSignificand;
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if (aSignificand & (implicitBit << 4) != 0) {
const sticky = aSignificand & 1;
aSignificand = aSignificand >> 1 | sticky;
aExponent += 1;
}
}
// If we have overflowed the type, return +/- infinity:
if (aExponent >= maxExponent) return @bitCast(T, infRep | resultSign);
if (aExponent <= 0) {
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
const shift = @intCast(Z, 1 - aExponent);
const sticky = if (aSignificand << @intCast(u7, typeWidth - shift) != 0) Z(1) else 0;
aSignificand = aSignificand >> @intCast(u7, shift | sticky);
aExponent = 0;
}
// Low three bits are round, guard, and sticky.
const roundGuardSticky = aSignificand & 0x7;
// Shift the significand into place, and mask off the implicit bit.
var result = (aSignificand >> 3) & significandMask;
// Insert the exponent and sign.
result |= @intCast(Z, aExponent) << significandBits;
result |= resultSign;
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
if (roundGuardSticky > 0x4) result += 1;
if (roundGuardSticky == 0x4) result += result & 1;
return @bitCast(T, result);
}
test "import addXf3" {
_ = @import("addXf3_test.zig");
}

View File

@ -0,0 +1,85 @@
// Ported from:
//
// https://github.com/llvm-mirror/compiler-rt/blob/92f7768ce940f6437b32ecc0985a1446cd040f7a/test/builtins/Unit/addtf3_test.c
// https://github.com/llvm-mirror/compiler-rt/blob/92f7768ce940f6437b32ecc0985a1446cd040f7a/test/builtins/Unit/subtf3_test.c
const qnan128 = @bitCast(f128, u128(0x7fff800000000000) << 64);
const inf128 = @bitCast(f128, u128(0x7fff000000000000) << 64);
const __addtf3 = @import("addXf3.zig").__addtf3;
fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) void {
const x = __addtf3(a, b);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expected_hi and lo == expected_lo) {
return;
}
// test other possible NaN representation (signal NaN)
else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
return;
}
}
@panic("__addtf3 test failure");
}
test "addtf3" {
test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// NaN + any = NaN
test__addtf3(@bitCast(f128, (u128(0x7fff000000000000) << 64) | u128(0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// inf + inf = inf
test__addtf3(inf128, inf128, 0x7fff000000000000, 0x0);
// inf + any = inf
test__addtf3(inf128, 0x1.2335653452436234723489432abcdefp+5, 0x7fff000000000000, 0x0);
// any + any
test__addtf3(0x1.23456734245345543849abcdefp+5, 0x1.edcba52449872455634654321fp-1, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
}
const __subtf3 = @import("addXf3.zig").__subtf3;
fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) void {
const x = __subtf3(a, b);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expected_hi and lo == expected_lo) {
return;
}
// test other possible NaN representation (signal NaN)
else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
return;
}
}
@panic("__subtf3 test failure");
}
test "subtf3" {
// qNaN - any = qNaN
test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// NaN + any = NaN
test__subtf3(@bitCast(f128, (u128(0x7fff000000000000) << 64) | u128(0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// inf - any = inf
test__subtf3(inf128, 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
// any + any
test__subtf3(0x1.234567829a3bcdef5678ade36734p+5, 0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x40041b8af1915166, 0xa44a7bca780a166c);
}

View File

@ -21,6 +21,9 @@ comptime {
@export("__unordtf2", @import("comparetf2.zig").__unordtf2, linkage);
@export("__addtf3", @import("addXf3.zig").__addtf3, linkage);
@export("__subtf3", @import("addXf3.zig").__subtf3, linkage);
@export("__floattitf", @import("floattitf.zig").__floattitf, linkage);
@export("__floattidf", @import("floattidf.zig").__floattidf, linkage);
@export("__floattisf", @import("floattisf.zig").__floattisf, linkage);
@ -37,6 +40,7 @@ comptime {
@export("__extendhfsf2", @import("extendXfYf2.zig").__extendhfsf2, linkage);
@export("__truncsfhf2", @import("truncXfYf2.zig").__truncsfhf2, linkage);
@export("__truncdfhf2", @import("truncXfYf2.zig").__truncdfhf2, linkage);
@export("__trunctfdf2", @import("truncXfYf2.zig").__trunctfdf2, linkage);
@export("__trunctfsf2", @import("truncXfYf2.zig").__trunctfsf2, linkage);

View File

@ -4,6 +4,10 @@ pub extern fn __truncsfhf2(a: f32) u16 {
return @bitCast(u16, truncXfYf2(f16, f32, a));
}
pub extern fn __truncdfhf2(a: f64) u16 {
return @bitCast(u16, truncXfYf2(f16, f64, a));
}
pub extern fn __trunctfsf2(a: f128) f32 {
return truncXfYf2(f32, f128, a);
}

View File

@ -63,6 +63,74 @@ test "truncsfhf2" {
test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
}
const __truncdfhf2 = @import("truncXfYf2.zig").__truncdfhf2;
fn test__truncdfhf2(a: f64, expected: u16) void {
const rep = @bitCast(u16, __truncdfhf2(a));
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7e00) {
if ((rep & 0x7c00) == 0x7c00 and (rep & 0x3ff) > 0) {
return;
}
}
@panic("__truncdfhf2 test failure");
}
fn test__truncdfhf2_raw(a: u64, expected: u16) void {
const actual = __truncdfhf2(@bitCast(f64, a));
if (actual == expected) {
return;
}
@panic("__truncdfhf2 test failure");
}
test "truncdfhf2" {
test__truncdfhf2_raw(0x7ff8000000000000, 0x7e00); // qNaN
test__truncdfhf2_raw(0x7ff0000000008000, 0x7e00); // NaN
test__truncdfhf2_raw(0x7ff0000000000000, 0x7c00); //inf
test__truncdfhf2_raw(0xfff0000000000000, 0xfc00); // -inf
test__truncdfhf2(0.0, 0x0); // zero
test__truncdfhf2_raw(0x80000000 << 32, 0x8000); // -zero
test__truncdfhf2(3.1415926535, 0x4248);
test__truncdfhf2(-3.1415926535, 0xc248);
test__truncdfhf2(0x1.987124876876324p+1000, 0x7c00);
test__truncdfhf2(0x1.987124876876324p+12, 0x6e62);
test__truncdfhf2(0x1.0p+0, 0x3c00);
test__truncdfhf2(0x1.0p-14, 0x0400);
// denormal
test__truncdfhf2(0x1.0p-20, 0x0010);
test__truncdfhf2(0x1.0p-24, 0x0001);
test__truncdfhf2(-0x1.0p-24, 0x8001);
test__truncdfhf2(0x1.5p-25, 0x0001);
// and back to zero
test__truncdfhf2(0x1.0p-25, 0x0000);
test__truncdfhf2(-0x1.0p-25, 0x8000);
// max (precise)
test__truncdfhf2(65504.0, 0x7bff);
// max (rounded)
test__truncdfhf2(65519.0, 0x7bff);
// max (to +inf)
test__truncdfhf2(65520.0, 0x7c00);
test__truncdfhf2(-65520.0, 0xfc00);
test__truncdfhf2(65536.0, 0x7c00);
}
const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
fn test__trunctfsf2(a: f128, expected: u32) void {