Fix Noise compiled under clang >= 3.7.x with -O2 or higher

When compiled with optimizations, the most recent versions of clang seem
to 'optimize' out a crucial "and %reg, 0x7FFFFFFF" instruction in noise2d(),
probably because it somehow assumed the variable n would never become greater
than that amount.
Indeed, signed integer underflow is undefined behavior in C and C++, so while
this optimization is "correct" in that sense, it breaks lots of existing code.
Solved by changing n to an unsigned type, making behavior well-defined.
master
kwolekr 2015-11-01 11:16:18 -05:00
parent 9269a0ecc7
commit d198e420ec
1 changed files with 4 additions and 4 deletions

View File

@ -158,21 +158,21 @@ s32 PcgRandom::randNormalDist(s32 min, s32 max, int num_trials)
float noise2d(int x, int y, int seed)
{
int n = (NOISE_MAGIC_X * x + NOISE_MAGIC_Y * y
unsigned int n = (NOISE_MAGIC_X * x + NOISE_MAGIC_Y * y
+ NOISE_MAGIC_SEED * seed) & 0x7fffffff;
n = (n >> 13) ^ n;
n = (n * (n * n * 60493 + 19990303) + 1376312589) & 0x7fffffff;
return 1.f - (float)n / 0x40000000;
return 1.f - (float)(int)n / 0x40000000;
}
float noise3d(int x, int y, int z, int seed)
{
int n = (NOISE_MAGIC_X * x + NOISE_MAGIC_Y * y + NOISE_MAGIC_Z * z
unsigned int n = (NOISE_MAGIC_X * x + NOISE_MAGIC_Y * y + NOISE_MAGIC_Z * z
+ NOISE_MAGIC_SEED * seed) & 0x7fffffff;
n = (n >> 13) ^ n;
n = (n * (n * n * 60493 + 19990303) + 1376312589) & 0x7fffffff;
return 1.f - (float)n / 0x40000000;
return 1.f - (float)(int)n / 0x40000000;
}