Skip to content

Instantly share code, notes, and snippets.

@RJ-Infinity
Created March 10, 2025 22:44
Show Gist options
  • Select an option

  • Save RJ-Infinity/13c71cd1ef9f2b43337605e57746e240 to your computer and use it in GitHub Desktop.

Select an option

Save RJ-Infinity/13c71cd1ef9f2b43337605e57746e240 to your computer and use it in GitHub Desktop.
this is a c3 port of the murmur hash 3 that i didnt end up using for a project but it felt a shame to just delete it so i put it here
// This code has been ported from the original (https://github.com/aappleby/smhasher/) to c3
import ntypes;
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
macro Un32 getblock32 ( Un32 * p, int i )
{
return p[i];
}
macro Un64 getblock64 ( Un64 * p, int i )
{
return p[i];
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
macro Un32 fmix32 ( Un32 h )
{
h ^= h >> 16;
h *= 0x85ebca6bu32;
h ^= h >> 13;
h *= 0xc2b2ae35u32;
h ^= h >> 16;
return h;
}
//----------
macro Un64 fmix64 ( Un64 k )
{
k ^= k >> 33;
k *= 0xff51afd7ed558ccdu64;
k ^= k >> 33;
k *= 0xc4ceb9fe1a85ec53u64;
k ^= k >> 33;
return k;
}
//-----------------------------------------------------------------------------
fn void murmurHash3_x86_32 ( void * key, int len,
Un32 seed, void * out )
{
Un8 * data = (Un8*)key;
int nblocks = len / 4;
Un32 h1 = seed;
Un32 c1 = 0xcc9e2d51u32;
Un32 c2 = 0x1b873593u32;
//----------
// body
Un32 * blocks = (Un32 *)(data + nblocks*4);
for(int i = -nblocks; i; i++)
{
Un32 k1 = getblock32(blocks,i);
k1 *= c1;
k1 = Un32.rotl(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = Un32.rotl(h1,13);
h1 = h1*5u32+0xe6546b64u32;
}
//----------
// tail
Un8 * tail = (Un8*)(data + nblocks*4);
Un32 k1 = 0;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16; nextcase;
case 2: k1 ^= tail[1] << 8; nextcase;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = Un32.rotl(k1,15); k1 *= c2; h1 ^= k1; //nextcase;
};
//----------
// finalization
h1 ^= len;
h1 = fmix32(h1);
*(Un32*)out = h1;
}
//-----------------------------------------------------------------------------
fn void murmurHash3_x86_128 ( void * key, int len,
Un32 seed, void * out )
{
Un8 * data = (Un8*)key;
int nblocks = len / 16;
Un32 h1 = seed;
Un32 h2 = seed;
Un32 h3 = seed;
Un32 h4 = seed;
Un32 c1 = 0x239b961bu32;
Un32 c2 = 0xab0e9789u32;
Un32 c3 = 0x38b34ae5u32;
Un32 c4 = 0xa1e38b93u32;
//----------
// body
Un32 * blocks = (Un32 *)(data + nblocks*16);
for(int i = -nblocks; i; i++)
{
Un32 k1 = getblock32(blocks,i*4+0);
Un32 k2 = getblock32(blocks,i*4+1);
Un32 k3 = getblock32(blocks,i*4+2);
Un32 k4 = getblock32(blocks,i*4+3);
k1 *= c1; k1 = Un32.rotl(k1,15); k1 *= c2; h1 ^= k1;
h1 = Un32.rotl(h1,19); h1 += h2; h1 = h1*5u32+0x561ccd1bu32;
k2 *= c2; k2 = Un32.rotl(k2,16); k2 *= c3; h2 ^= k2;
h2 = Un32.rotl(h2,17); h2 += h3; h2 = h2*5u32+0x0bcaa747u32;
k3 *= c3; k3 = Un32.rotl(k3,17); k3 *= c4; h3 ^= k3;
h3 = Un32.rotl(h3,15); h3 += h4; h3 = h3*5u32+0x96cd1c35u32;
k4 *= c4; k4 = Un32.rotl(k4,18); k4 *= c1; h4 ^= k4;
h4 = Un32.rotl(h4,13); h4 += h1; h4 = h4*5u32+0x32ac3b17u32;
}
//----------
// tail
Un8 * tail = (Un8*)(data + nblocks*16);
Un32 k1 = 0;
Un32 k2 = 0;
Un32 k3 = 0;
Un32 k4 = 0;
switch(len & 15)
{
case 15: k4 ^= tail[14] << 16; nextcase;
case 14: k4 ^= tail[13] << 8; nextcase;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = Un32.rotl(k4,18); k4 *= c1; h4 ^= k4; nextcase;
case 12: k3 ^= tail[11] << 24; nextcase;
case 11: k3 ^= tail[10] << 16; nextcase;
case 10: k3 ^= tail[ 9] << 8; nextcase;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = Un32.rotl(k3,17); k3 *= c4; h3 ^= k3; nextcase;
case 8: k2 ^= tail[ 7] << 24; nextcase;
case 7: k2 ^= tail[ 6] << 16; nextcase;
case 6: k2 ^= tail[ 5] << 8; nextcase;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = Un32.rotl(k2,16); k2 *= c3; h2 ^= k2; nextcase;
case 4: k1 ^= tail[ 3] << 24; nextcase;
case 3: k1 ^= tail[ 2] << 16; nextcase;
case 2: k1 ^= tail[ 1] << 8; nextcase;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = Un32.rotl(k1,15); k1 *= c2; h1 ^= k1; //nextcase;
};
//----------
// finalization
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = fmix32(h1);
h2 = fmix32(h2);
h3 = fmix32(h3);
h4 = fmix32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
((Un32*)out)[0] = h1;
((Un32*)out)[1] = h2;
((Un32*)out)[2] = h3;
((Un32*)out)[3] = h4;
}
//-----------------------------------------------------------------------------
fn void murmurHash3_x64_128 ( void * key, int len,
Un32 seed, void * out )
{
Un8 * data = (Un8*)key;
int nblocks = len / 16;
Un64 h1 = seed;
Un64 h2 = seed;
Un64 c1 = 0x87c37b91114253d5u64;
Un64 c2 = 0x4cf5ad432745937fu64;
//----------
// body
Un64 * blocks = (Un64 *)(data);
for(int i = 0; i < nblocks; i++)
{
Un64 k1 = getblock64(blocks,i*2+0);
Un64 k2 = getblock64(blocks,i*2+1);
k1 *= c1; k1 = Un64.rotl(k1,31); k1 *= c2; h1 ^= k1;
h1 = Un64.rotl(h1,27); h1 += h2; h1 = h1*5u32+0x52dce729u32;
k2 *= c2; k2 = Un64.rotl(k2,33); k2 *= c1; h2 ^= k2;
h2 = Un64.rotl(h2,31); h2 += h1; h2 = h2*5u32+0x38495ab5u32;
}
//----------
// tail
Un8 * tail = (Un8*)(data + nblocks*16);
Un64 k1 = 0;
Un64 k2 = 0;
switch(len & 15)
{
case 15: k2 ^= ((Un64)tail[14]) << 48; nextcase;
case 14: k2 ^= ((Un64)tail[13]) << 40; nextcase;
case 13: k2 ^= ((Un64)tail[12]) << 32; nextcase;
case 12: k2 ^= ((Un64)tail[11]) << 24; nextcase;
case 11: k2 ^= ((Un64)tail[10]) << 16; nextcase;
case 10: k2 ^= ((Un64)tail[ 9]) << 8; nextcase;
case 9: k2 ^= ((Un64)tail[ 8]) << 0;
k2 *= c2; k2 = Un64.rotl(k2,33); k2 *= c1; h2 ^= k2; nextcase;
case 8: k1 ^= ((Un64)tail[ 7]) << 56; nextcase;
case 7: k1 ^= ((Un64)tail[ 6]) << 48; nextcase;
case 6: k1 ^= ((Un64)tail[ 5]) << 40; nextcase;
case 5: k1 ^= ((Un64)tail[ 4]) << 32; nextcase;
case 4: k1 ^= ((Un64)tail[ 3]) << 24; nextcase;
case 3: k1 ^= ((Un64)tail[ 2]) << 16; nextcase;
case 2: k1 ^= ((Un64)tail[ 1]) << 8; nextcase;
case 1: k1 ^= ((Un64)tail[ 0]) << 0;
k1 *= c1; k1 = Un64.rotl(k1,31); k1 *= c2; h1 ^= k1; //nextcase;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((Un64*)out)[0] = h1;
((Un64*)out)[1] = h2;
}
//-----------------------------------------------------------------------------
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment