#include #include #if __SIZEOF_INT128__ union u { unsigned __int128 x; uwide128 s; }; __attribute__((__weak__)) uwide128 __uwide128_neg(uwide128 a) { union u both = { .s = a, }; both.x = -both.x; return both.s; } __attribute__((__weak__)) uwide128 __uwide128_add(uwide128 a, uint8_t b) { union u both = { .s = a, }; both.x += b; return both.s; } __attribute__((__weak__)) uwide128 __uwide128_sub(uwide128 a, uint8_t b) { union u both = { .s = a, }; both.x -= b; return both.s; } __attribute__((__weak__)) uwide128 __uwide128_mul(uwide128 a, uint8_t b) { union u both = { .s = a, }; both.x *= b; return both.s; } __attribute__((__weak__)) uint8_t __uwide128_div10(uwide128* a) { union u both = { .s = *a, }; uint8_t ret = both.x % 10; both.x /= 10; *a = both.s; return ret; } __attribute__((__weak__)) uint8_t __uwide128_div2(uwide128* a) { union u both = { .s = *a, }; uint8_t ret = both.x % 2; both.x /= 2; *a = both.s; return ret; } __attribute__((__weak__)) uint8_t __uwide128_div8(uwide128* a) { union u both = { .s = *a, }; uint8_t ret = both.x % 8; both.x /= 8; *a = both.s; return ret; } __attribute__((__weak__)) uint8_t __uwide128_div16(uwide128* a) { union u both = { .s = *a, }; uint8_t ret = both.x % 16; both.x /= 16; *a = both.s; return ret; } __attribute__((__weak__)) _Bool __uwide128_le(uwide128 a, uwide128 b) { union u botha = { .s = a, }; union u bothb = { .s = b, }; return botha.x <= bothb.x; } __attribute__((__weak__)) _Bool __uwide128_iszero(uwide128 a) { union u both = { .s = a, }; return !both.x; } uwide128 __uwide128_pop(va_list *ap) { return (union u){ .x = va_arg(*ap, __int128) }.s; } #else __attribute__((__weak__)) _Bool __uwide128_le(uwide128 a, uwide128 b) { return (a.v64[hi64] > b.v64[hi64]) ? false : ((a.v64[hi64] < b.v64[hi64]) ? true : (a.v64[lo64] <= b.v64[lo64])); } __attribute__((__weak__)) _Bool __uwide128_iszero(uwide128 a) { return !a.v64[0] && !a.v64[1]; } __attribute__((__weak__)) uwide128 __uwide128_neg(uwide128 a) { uwide128 ret = { .v64 = { [0] = ~a.v64[0], [1] = ~a.v64[1], }, }; if (!a.v64[lo64]) ret.v64[hi64]--; ret.v64[lo64]--; return ret; } uwide128 __uwide128_add(uwide128 a, uint8_t b) { uwide128 ret; uint64_t carry = a.v32[wo32_0]; carry += b; ret.v32[wo32_0] = carry; carry >>= 32; carry += a.v32[wo32_1]; ret.v32[wo32_1] = carry; carry >>= 32; carry += a.v32[wo32_2]; ret.v32[wo32_2] = carry; carry >>= 32; carry += a.v32[wo32_3]; ret.v32[wo32_3] = carry; return ret; } uwide128 __uwide128_sub(uwide128 a, uint8_t b) { uwide128 ret; int64_t carry = a.v32[wo32_0]; carry -= b; ret.v32[wo32_0] = carry; carry /= UINT64_C(0x100000000); carry += a.v32[wo32_1]; ret.v32[wo32_1] = carry; carry /= UINT64_C(0x100000000); carry += a.v32[wo32_2]; ret.v32[wo32_2] = carry; carry /= UINT64_C(0x100000000); carry += a.v32[wo32_3]; ret.v32[wo32_3] = carry; return ret; } uwide128 __uwide128_mul(uwide128 a, uint8_t b) { uwide128 ret; uint64_t carry; uint64_t prod = a.v32[wo32_0]; prod *= b; carry = prod; ret.v32[wo32_0] = carry; carry >>= 32; prod = a.v32[wo32_1]; prod *= b; carry += prod; ret.v32[wo32_1] = carry; carry >>= 32; prod = a.v32[wo32_2]; prod *= b; carry += prod; ret.v32[wo32_2] = carry; carry >>= 32; prod = a.v32[wo32_3]; prod *= b; carry += prod; ret.v32[wo32_3] = carry; return ret; } static uint8_t __uwide128_div(uwide128* a, uint8_t b) { uint64_t rest = a->v64[hi64] % b; a->v64[hi64] /= b; rest <<= 32; rest |= a->v32[wo32_1]; a->v32[wo32_1] = rest / b; rest %= b; rest <<= 32; rest |= a->v32[wo32_0]; a->v32[wo32_0] = rest / b; rest %= b; return rest; } uint8_t __uwide128_div10(uwide128* a) { return __uwide128_div(a, 10); } uint8_t __uwide128_div2(uwide128* a) { return __uwide128_div(a, 2); } uint8_t __uwide128_div8(uwide128* a) { return __uwide128_div(a, 8); } uint8_t __uwide128_div16(uwide128* a) { return __uwide128_div(a, 16); } #endif