cryprot_core/block/
gf128.rs

1use super::Block;
2
3/// The irreducible polynomial for gf128 operations.
4const MOD: u64 = 0b10000111; // 0x87
5
6#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
7cpufeatures::new!(target_feature_pclmulqdq, "pclmulqdq");
8
9impl Block {
10    /// Carryless multiplication of two Blocks as polynomials over GF(2).
11    ///
12    /// Returns (low, high) bits.
13    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
14    #[inline]
15    pub fn clmul(&self, rhs: &Self) -> (Self, Self) {
16        if target_feature_pclmulqdq::get() {
17            // SAFETY: pclmulqdq is available
18            unsafe {
19                let (low, high) = clmul::clmul128(self.into(), rhs.into());
20                (low.into(), high.into())
21            }
22        } else {
23            let (low, high) = scalar::clmul128(self.into(), rhs.into());
24            (low.into(), high.into())
25        }
26    }
27
28    /// Carryless multiplication of two Blocks as polynomials over GF(2).
29    ///
30    /// Returns (low, high) bits.
31    #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
32    #[inline]
33    pub fn clmul(&self, rhs: &Self) -> (Self, Self) {
34        let (low, high) = scalar::clmul128(self.into(), rhs.into());
35        (low.into(), high.into())
36    }
37
38    /// Multiplication over GF(2^128).
39    ///
40    /// Uses the irreducible polynomial `x^128 + x^7 + x^2 + x + 1.
41    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
42    #[inline]
43    pub fn gf_mul(&self, rhs: &Self) -> Self {
44        if target_feature_pclmulqdq::get() {
45            // SAFETY: pclmulqdq is available
46            unsafe { clmul::gf128_mul(self.into(), rhs.into()).into() }
47        } else {
48            scalar::gf128_mul(self.into(), rhs.into()).into()
49        }
50    }
51
52    /// Multiplication over GF(2^128).
53    ///
54    /// Uses the irreducible polynomial `x^128 + x^7 + x^2 + x + 1`.
55    #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
56    #[inline]
57    pub fn gf_mul(&self, rhs: &Self) -> Self {
58        scalar::gf128_mul(self.into(), rhs.into()).into()
59    }
60
61    /// Reduce polynomial over GF(2) by `x^128 + x^7 + x^2 + x + 1`.
62    #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
63    #[inline]
64    pub fn gf_reduce(low: &Self, high: &Self) -> Self {
65        if target_feature_pclmulqdq::get() {
66            // SAFETY: pclmulqdq is available
67            unsafe { clmul::gf128_reduce(low.into(), high.into()).into() }
68        } else {
69            scalar::gf128_reduce(low.into(), high.into()).into()
70        }
71    }
72
73    /// Reduce polynomial over GF(2) by `x^128 + x^7 + x^2 + x + 1`.
74    #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
75    #[inline]
76    pub fn gf_reduce(low: &Self, high: &Self) -> Self {
77        scalar::gf128_reduce(low.into(), high.into()).into()
78    }
79
80    #[inline]
81    pub fn gf_pow(&self, mut exp: u64) -> Block {
82        let mut s = Block::ONE;
83        let mut pow2 = *self;
84
85        while exp != 0 {
86            if exp & 1 != 0 {
87                s = s.gf_mul(&pow2);
88            }
89            pow2 = pow2.gf_mul(&pow2);
90            exp >>= 1;
91        }
92        s
93    }
94}
95
96#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
97mod clmul {
98    #[cfg(target_arch = "x86")]
99    use std::arch::x86::*;
100    #[cfg(target_arch = "x86_64")]
101    use std::arch::x86_64::*;
102
103    use super::MOD;
104
105    #[target_feature(enable = "pclmulqdq")]
106    #[inline]
107    pub fn gf128_mul(a: __m128i, b: __m128i) -> __m128i {
108        let (low, high) = clmul128(a, b);
109        gf128_reduce(low, high)
110    }
111
112    /// Carry-less multiply of two 128-bit numbers.
113    ///
114    /// Return (low, high) bits
115    #[target_feature(enable = "pclmulqdq")]
116    #[inline]
117    pub fn clmul128(a: __m128i, b: __m128i) -> (__m128i, __m128i) {
118        // NOTE: I tried using karatsuba but it was slightly slower than the naive
119        // multiplication
120        let ab_low = _mm_clmulepi64_si128::<0x00>(a, b);
121        let ab_high = _mm_clmulepi64_si128::<0x11>(a, b);
122        let ab_lohi1 = _mm_clmulepi64_si128::<0x01>(a, b);
123        let ab_lohi2 = _mm_clmulepi64_si128::<0x10>(a, b);
124        let ab_mid = _mm_xor_si128(ab_lohi1, ab_lohi2);
125        let low = _mm_xor_si128(ab_low, _mm_slli_si128::<8>(ab_mid));
126        let high = _mm_xor_si128(ab_high, _mm_srli_si128::<8>(ab_mid));
127        (low, high)
128    }
129
130    #[target_feature(enable = "pclmulqdq")]
131    #[inline]
132    pub fn gf128_reduce(mut low: __m128i, mut high: __m128i) -> __m128i {
133        // NOTE: I tried a sse shift based reduction but it was slower than the clmul
134        // implementation
135        let modulus = [MOD, 0];
136        // SAFETY: Ptr to modulus is valid and pclmulqdq implies sse2 is enabled
137        let modulus = unsafe { _mm_loadu_si64(modulus.as_ptr().cast()) };
138
139        let tmp = _mm_clmulepi64_si128::<0x01>(high, modulus);
140        let tmp_shifted = _mm_slli_si128::<8>(tmp);
141        low = _mm_xor_si128(low, tmp_shifted);
142        high = _mm_xor_si128(high, tmp_shifted);
143
144        // reduce overflow
145        let tmp = _mm_clmulepi64_si128::<0x01>(tmp, modulus);
146        low = _mm_xor_si128(low, tmp);
147
148        let tmp = _mm_clmulepi64_si128::<0x00>(high, modulus);
149        _mm_xor_si128(low, tmp)
150    }
151
152    #[cfg(all(test, target_feature = "pclmulqdq"))]
153    mod test {
154        use std::{arch::x86_64::__m128i, mem::transmute};
155
156        use crate::block::gf128::clmul::{clmul128, gf128_mul, gf128_reduce};
157
158        #[test]
159        fn test_gf128_mul_zero() {
160            unsafe {
161                let a = transmute(0x19831239123916248127031273012381_u128);
162                let b = transmute(0_u128);
163                let exp = 0_u128;
164                let mul = transmute(gf128_mul(a, b));
165                assert_eq!(exp, mul);
166            }
167        }
168
169        #[test]
170        fn test_gf128_mul_onw() {
171            unsafe {
172                let a = transmute(0x19831239123916248127031273012381_u128);
173                let b = transmute(0x1_u128);
174                let exp = 0x19831239123916248127031273012381_u128;
175                let mul = transmute(gf128_mul(a, b));
176                assert_eq!(exp, mul);
177            }
178        }
179
180        #[test]
181        fn test_gf128_mul() {
182            unsafe {
183                let a = transmute(0x19831239123916248127031273012381_u128);
184                let b = transmute(0xabcdef0123456789abcdef0123456789_u128);
185                let exp = 0x63a033d0ed643e85153c50f4268a7d9_u128;
186                let mul = transmute(gf128_mul(a, b));
187                assert_eq!(exp, mul);
188            }
189        }
190
191        #[test]
192        fn test_clmul128() {
193            unsafe {
194                let a: __m128i = transmute(0x19831239123916248127031273012381_u128);
195                let b: __m128i = transmute(0xabcdef0123456789abcdef0123456789_u128);
196                let (low, high) = clmul128(a, b);
197                let [low, high] = transmute([low, high]);
198                let exp_low: u128 = 0xa5de9b50e6db7b5147e92b99ee261809;
199                let exp_high: u128 = 0xf1d6d37d58114afed2addfedd7c77f7;
200                assert_eq!(exp_low, low);
201                assert_eq!(exp_high, high);
202            }
203        }
204
205        #[test]
206        fn test_gf128_reduce() {
207            unsafe {
208                // test vectors computed using sage
209                let low: __m128i = transmute(0x0123456789abcdef0123456789abcdef_u128);
210                let high: __m128i = transmute(0xabcdef0123456789abcdef0123456789_u128);
211                let exp = 0xb4b548f1c3c23f86b4b548f1c3c21572_u128;
212                let res: u128 = transmute(gf128_reduce(low, high));
213
214                println!("res: {res:b}");
215                println!("exp: {exp:b}");
216                assert_eq!(exp, res);
217            }
218        }
219    }
220
221    #[cfg(all(is_nightly, test, target_feature = "pclmulqdq"))]
222    mod benches {
223        extern crate test;
224
225        use std::{hint::black_box, mem::transmute};
226
227        use rand::{Rng, rng};
228        use test::Bencher;
229
230        #[bench]
231        fn bench_gf128_mul(b: &mut Bencher) {
232            let [low, high] = unsafe { transmute(rng().random::<[u128; 2]>()) };
233            b.iter(|| black_box(unsafe { super::gf128_mul(black_box(low), black_box(high)) }));
234        }
235
236        #[bench]
237        fn bench_gf128_reduce(b: &mut Bencher) {
238            let [low, high] = unsafe { transmute(rng().random::<[u128; 2]>()) };
239            b.iter(|| black_box(unsafe { super::gf128_reduce(black_box(low), black_box(high)) }));
240        }
241    }
242}
243
244// used in tests, but if we're not compiling tests these will otherwise be
245// flagged as unused
246#[allow(dead_code)]
247mod scalar {
248    #[inline]
249    pub fn gf128_mul(a: u128, b: u128) -> u128 {
250        let (low, high) = clmul128(a, b);
251        gf128_reduce(low, high)
252    }
253
254    /// Carry-less multiply of two 128-bit numbers.
255    ///
256    /// Return (low, high) bits
257    #[inline]
258    pub fn clmul128(a: u128, b: u128) -> (u128, u128) {
259        let (a_low, a_high) = (a as u64, (a >> 64) as u64);
260        let (b_low, b_high) = (b as u64, (b >> 64) as u64);
261
262        // Use karatsuba multiplication
263        let ab_low = clmul64(a_low, b_low);
264        let ab_high = clmul64(a_high, b_high);
265        let ab_mid = clmul64(a_low ^ a_high, b_low ^ b_high) ^ ab_low ^ ab_high;
266        let low = ab_low ^ (ab_mid << 64);
267        let high = ab_high ^ (ab_mid >> 64);
268        (low, high)
269    }
270
271    // Adapted from https://github.com/RustCrypto/universal-hashes/blob/802b40974a08bbd2663c63780fc87a23ee931868/polyval/src/backend/soft64.rs#L201C1-L227C2
272    // Uses the technique described in https://www.bearssl.org/constanttime.html#ghash-for-gcm
273    // but directly outputs the 128 bits wihtout needing the Rev trick.
274    // This method is constant time and significantly faster than iterating over the
275    // bits of y and xoring shifted x.
276    /// Multiplication in GF(2)[X] with “holes”
277    /// (sequences of zeroes) to avoid carry spilling.
278    ///
279    /// When carries do occur, they wind up in a "hole" and are subsequently
280    /// masked out of the result.
281    #[inline]
282    fn clmul64(x: u64, y: u64) -> u128 {
283        let x0 = (x & 0x1111_1111_1111_1111) as u128;
284        let x1 = (x & 0x2222_2222_2222_2222) as u128;
285        let x2 = (x & 0x4444_4444_4444_4444) as u128;
286        let x3 = (x & 0x8888_8888_8888_8888) as u128;
287        let y0 = (y & 0x1111_1111_1111_1111) as u128;
288        let y1 = (y & 0x2222_2222_2222_2222) as u128;
289        let y2 = (y & 0x4444_4444_4444_4444) as u128;
290        let y3 = (y & 0x8888_8888_8888_8888) as u128;
291
292        let mut z0 = (x0 * y0) ^ (x1 * y3) ^ (x2 * y2) ^ (x3 * y1);
293        let mut z1 = (x0 * y1) ^ (x1 * y0) ^ (x2 * y3) ^ (x3 * y2);
294        let mut z2 = (x0 * y2) ^ (x1 * y1) ^ (x2 * y0) ^ (x3 * y3);
295        let mut z3 = (x0 * y3) ^ (x1 * y2) ^ (x2 * y1) ^ (x3 * y0);
296
297        z0 &= 0x1111_1111_1111_1111_1111_1111_1111_1111;
298        z1 &= 0x2222_2222_2222_2222_2222_2222_2222_2222;
299        z2 &= 0x4444_4444_4444_4444_4444_4444_4444_4444;
300        z3 &= 0x8888_8888_8888_8888_8888_8888_8888_8888;
301
302        z0 | z1 | z2 | z3
303    }
304
305    /// Generated by ChatGPT o3-mini and reviewed by me.
306    /// Reduces a 256-bit value (given as two u128 words, `high` and `low`)
307    /// modulo the irreducible polynomial f(x) = x^128 + x^7 + x^2 + x + 1.
308    ///
309    /// That is, it computes:
310    ///      low ^ reduce(high * (x^7 + x^2 + x + 1))
311    /// since x^128 ≡ x^7 + x^2 + x + 1 (mod f(x)).
312    #[inline]
313    pub fn gf128_reduce(low: u128, high: u128) -> u128 {
314        // Helper: performs a left shift on a 128-bit word and returns
315        // a tuple (overflow, lower) where:
316        //    x << shift = (overflow << 128) | lower.
317        #[inline]
318        fn shift_u128(x: u128, shift: u32) -> (u128, u128) {
319            // For 0 < shift < 128.
320            let overflow = x >> (128 - shift);
321            let lower = x << shift;
322            (overflow, lower)
323        }
324
325        // For the reduction, note that:
326        //   x^128 ≡ x^7 + x^2 + x + 1 (mod f(x)).
327        // So the contribution of the high word is:
328        //   (high << 7) ^ (high << 2) ^ (high << 1) ^ high,
329        // but each shift must be computed as a 256–bit quantity.
330        let (ov7, lo7) = shift_u128(high, 7);
331        let (ov2, lo2) = shift_u128(high, 2);
332        let (ov1, lo1) = shift_u128(high, 1);
333        let lo0 = high; // equivalent to shift 0
334
335        // Combine the 128-bit parts of each term.
336        let combined_low = lo7 ^ lo2 ^ lo1 ^ lo0;
337        // Combine the overflow (upper) parts.
338        let combined_overflow = ov7 ^ ov2 ^ ov1;
339
340        // The bits in `combined_overflow` represent extra contributions from bits
341        // at positions ≥ 128. Since they are at most 7 bits wide, we can reduce them
342        // by multiplying with the reduction polynomial (i.e. shifting and XORing):
343        let reduced_overflow = (combined_overflow << 7)
344            ^ (combined_overflow << 2)
345            ^ (combined_overflow << 1)
346            ^ combined_overflow;
347
348        // The full contribution from `high` is then given by the low part
349        // combined with the reduced overflow.
350        let poly_contrib = combined_low ^ reduced_overflow;
351
352        // Finally, reduce the entire 256-bit value by XORing in the contribution.
353        low ^ poly_contrib
354    }
355
356    #[cfg(test)]
357    mod tests {
358        use super::{clmul128, gf128_mul, gf128_reduce};
359
360        #[test]
361        fn test_gf128_mul_zero() {
362            let a = 0x19831239123916248127031273012381;
363            let b = 0;
364            let exp = 0;
365            let mul = gf128_mul(a, b);
366            assert_eq!(exp, mul);
367        }
368
369        #[test]
370        fn test_gf128_mul_one() {
371            let a = 0x19831239123916248127031273012381;
372            let b = 1;
373            let exp = 0x19831239123916248127031273012381;
374            let mul = gf128_mul(a, b);
375            assert_eq!(exp, mul);
376        }
377
378        #[test]
379        fn test_gf128_mul() {
380            let a = 0x19831239123916248127031273012381;
381            let b = 0xabcdef0123456789abcdef0123456789;
382            let exp = 0x63a033d0ed643e85153c50f4268a7d9;
383            let mul = gf128_mul(a, b);
384            assert_eq!(exp, mul);
385        }
386
387        #[test]
388        fn test_gf128_reduce_zero() {
389            assert_eq!(gf128_reduce(0, 0), 0);
390        }
391
392        #[test]
393        fn test_gf128_reduce_low_only() {
394            assert_eq!(gf128_reduce(1, 0), 1);
395            assert_eq!(gf128_reduce(0x87, 0), 0x87); // Reduction polynomial itself.
396            assert_eq!(gf128_reduce(0xFFFFFFFFFFFFFFFF, 0), 0xFFFFFFFFFFFFFFFF);
397        }
398
399        #[test]
400        fn test_gf128_reduce_high_only() {
401            // high << 64
402            assert_eq!(gf128_reduce(0, 1), 0x87);
403            assert_eq!(gf128_reduce(0, 2), 0x87 << 1);
404            assert_eq!(gf128_reduce(0, 3), (0x87 << 1) ^ 0x87);
405
406            assert_eq!(gf128_reduce(0, 1 << 63), 0x87 << 63);
407        }
408
409        #[test]
410        fn test_gf128_reduce_overflow() {
411            let high = u128::MAX; // All bits set in high
412            let low = u128::MAX; // All bits set in low.
413            assert_eq!(gf128_reduce(low, high), 0xffffffffffffffffffffffffffffc071);
414        }
415
416        #[test]
417        fn tests_gf128_reduce() {
418            // test vectors computed using sage
419            let low = 0x0123456789abcdef0123456789abcdef;
420            let high = 0xabcdef0123456789abcdef0123456789;
421            let exp = 0xb4b548f1c3c23f86b4b548f1c3c21572;
422            let res = gf128_reduce(low, high);
423
424            println!("res: {res:b}");
425            println!("exp: {exp:b}");
426            assert_eq!(exp, res);
427        }
428
429        #[test]
430        fn test_clmul128() {
431            let a = 0x19831239123916248127031273012381;
432            let b = 0xabcdef0123456789abcdef0123456789;
433            let (low, high) = clmul128(a, b);
434            let exp_low = 0xa5de9b50e6db7b5147e92b99ee261809;
435            let exp_high = 0xf1d6d37d58114afed2addfedd7c77f7;
436            assert_eq!(exp_low, low);
437            assert_eq!(exp_high, high);
438        }
439    }
440
441    #[cfg(all(is_nightly, test))]
442    mod benches {
443        extern crate test;
444
445        use criterion::black_box;
446        use rand::{Rng, rng};
447        use test::Bencher;
448
449        #[bench]
450        fn bench_gf128_mul(b: &mut Bencher) {
451            let [low, high] = rng().random::<[u128; 2]>();
452            b.iter(|| black_box(super::gf128_mul(black_box(low), black_box(high))));
453        }
454
455        #[bench]
456        fn bench_gf128_reduce(b: &mut Bencher) {
457            let [low, high] = rng().random::<[u128; 2]>();
458            b.iter(|| black_box(super::gf128_reduce(black_box(low), black_box(high))));
459        }
460    }
461}
462
463/// Test that scalar implementation and clmul implementation produce the same
464/// results
465#[cfg(all(test, not(miri), target_feature = "pclmulqdq"))]
466mod scalar_simd_tests {
467    use std::mem::transmute;
468
469    use rand::{Rng, rng};
470
471    use super::{clmul, scalar};
472
473    #[test]
474    fn test_clmul128() {
475        for _ in 0..1000 {
476            let (a, b) = rng().random::<(u128, u128)>();
477            unsafe {
478                let clmul_res = clmul::clmul128(transmute(a), transmute(b));
479                let scalar_res = scalar::clmul128(a, b);
480                assert_eq!(scalar_res.0, transmute(clmul_res.0));
481            }
482        }
483    }
484
485    #[test]
486    fn test_gf128_reduce() {
487        for _ in 0..1000 {
488            let (a, b) = rng().random::<(u128, u128)>();
489            unsafe {
490                let clmul_res = clmul::gf128_reduce(transmute(a), transmute(b));
491                let scalar_res = scalar::gf128_reduce(a, b);
492                assert_eq!(scalar_res, transmute(clmul_res));
493            }
494        }
495    }
496
497    #[test]
498    fn test_gf128_mul() {
499        for _ in 0..1000 {
500            let (a, b) = rng().random::<(u128, u128)>();
501            unsafe {
502                let clmul_res = clmul::gf128_mul(transmute(a), transmute(b));
503                let scalar_res = scalar::gf128_mul(a, b);
504                assert_eq!(scalar_res, transmute(clmul_res));
505            }
506        }
507    }
508}
509
510#[cfg(test)]
511mod tests {
512    use crate::Block;
513
514    #[test]
515    fn test_gf_pow() {
516        let b: Block = 24646523424323_u128.into();
517        assert_eq!(Block::ONE, b.gf_pow(0));
518        assert_eq!(b, b.gf_pow(1));
519        assert_eq!(b.gf_mul(&b), b.gf_pow(2));
520        assert_eq!(b.gf_mul(&b.gf_mul(&b)), b.gf_pow(3));
521    }
522}