aboutsummaryrefslogtreecommitdiff
path: root/src/lib.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib.rs')
-rw-r--r--src/lib.rs71
1 files changed, 38 insertions, 33 deletions
diff --git a/src/lib.rs b/src/lib.rs
index 7f2e5c3..6e411a0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,3 +1,4 @@
+#![feature(portable_simd)]
//! AEZ *\[sic!\]* v5 encryption implemented in Rust.
//!
//! # ☣️ Cryptographic hazmat ☣️
@@ -297,7 +298,8 @@ fn append_auth(data_len: usize, buffer: &mut [u8]) {
fn encrypt(aez: &Aez, nonce: &[u8], ad: &[&[u8]], tau: u32, buffer: &mut [u8]) {
// We treat tau as bytes, but according to the spec, tau is actually in bits.
let tau_block = Block::from_int(tau as u128 * 8);
- let mut tweaks = vec![&tau_block.0, nonce];
+ let tau_bytes = tau_block.bytes();
+ let mut tweaks = vec![&tau_bytes, nonce];
tweaks.extend(ad);
assert!(buffer.len() >= tau as usize);
if buffer.len() == tau as usize {
@@ -321,7 +323,8 @@ fn decrypt<'a>(
}
let tau_block = Block::from_int(tau * 8);
- let mut tweaks = vec![&tau_block.0, nonce];
+ let tau_bytes = tau_block.bytes();
+ let mut tweaks = vec![&tau_bytes, nonce];
tweaks.extend(ad);
if ciphertext.len() == tau as usize {
@@ -387,12 +390,12 @@ fn encipher_aez_tiny(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
(left, right) = (right, right_);
}
if n % 8 == 0 {
- message[..n / 8].copy_from_slice(&right.0[..n / 8]);
- message[n / 8..].copy_from_slice(&left.0[..n / 8]);
+ message[..n / 8].copy_from_slice(&right.bytes()[..n / 8]);
+ message[n / 8..].copy_from_slice(&left.bytes()[..n / 8]);
} else {
let mut index = n / 8;
- message[..index + 1].copy_from_slice(&right.0[..index + 1]);
- for byte in &left.0[..n / 8 + 1] {
+ message[..index + 1].copy_from_slice(&right.bytes()[..index + 1]);
+ for byte in &left.bytes()[..n / 8 + 1] {
message[index] |= byte >> 4;
if index < message.len() - 1 {
message[index + 1] = (byte & 0x0f) << 4;
@@ -402,8 +405,8 @@ fn encipher_aez_tiny(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
}
if mu < 128 {
let mut c = Block::from_slice(&message);
- c = c ^ (e(0, 3, aez, delta ^ (c | Block::ONE)) & Block::ONE);
- message.copy_from_slice(&c.0[..mu / 8]);
+ c = c ^ (e(0, 3, aez, delta ^ (c | Block::one())) & Block::one());
+ message.copy_from_slice(&c.bytes()[..mu / 8]);
}
}
@@ -420,7 +423,7 @@ fn encipher_aez_core(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
);
let len_v = d.saturating_sub(128);
- let mut x = Block::NULL;
+ let mut x = Block::null();
let mut e1_eval = E::new(1, 0, aez);
let e0_eval = E::new(0, 0, aez);
@@ -431,8 +434,8 @@ fn encipher_aez_core(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
let wi = mi ^ e1_eval.eval(mi_);
let xi = mi_ ^ e0_eval.eval(wi);
- *raw_mi = wi.0;
- *raw_mi_ = xi.0;
+ wi.write_to(raw_mi);
+ xi.write_to(raw_mi_);
x = x ^ xi;
}
@@ -452,7 +455,7 @@ fn encipher_aez_core(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
let s_y = m_y ^ e(-1, 1, aez, s_x);
let s = s_x ^ s_y;
- let mut y = Block::NULL;
+ let mut y = Block::null();
let mut e2_eval = E::new(2, 0, aez);
let mut e1_eval = E::new(1, 0, aez);
let e0_eval = E::new(0, 0, aez);
@@ -467,8 +470,8 @@ fn encipher_aez_core(aez: &Aez, tweaks: Tweak, message: &mut [u8]) {
let ci_ = yi ^ e0_eval.eval(zi);
let ci = zi ^ e1_eval.eval(ci_);
- *raw_wi = ci.0;
- *raw_xi = ci_.0;
+ ci.write_to(raw_wi);
+ ci_.write_to(raw_xi);
y = y ^ yi;
}
@@ -520,8 +523,8 @@ fn decipher_aez_tiny(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
if mu < 128 {
let mut c = Block::from_slice(buffer);
- c = c ^ (e(0, 3, aez, delta ^ (c | Block::ONE)) & Block::ONE);
- buffer.copy_from_slice(&c.0[..mu / 8]);
+ c = c ^ (e(0, 3, aez, delta ^ (c | Block::one())) & Block::one());
+ buffer.copy_from_slice(&c.bytes()[..mu / 8]);
}
let (mut left, mut right);
@@ -540,12 +543,12 @@ fn decipher_aez_tiny(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
}
if n % 8 == 0 {
- buffer[..n / 8].copy_from_slice(&right.0[..n / 8]);
- buffer[n / 8..].copy_from_slice(&left.0[..n / 8]);
+ buffer[..n / 8].copy_from_slice(&right.bytes()[..n / 8]);
+ buffer[n / 8..].copy_from_slice(&left.bytes()[..n / 8]);
} else {
let mut index = n / 8;
- buffer[..index + 1].copy_from_slice(&right.0[..index + 1]);
- for byte in &left.0[..n / 8 + 1] {
+ buffer[..index + 1].copy_from_slice(&right.bytes()[..index + 1]);
+ for byte in &left.bytes()[..n / 8 + 1] {
buffer[index] |= byte >> 4;
if index < buffer.len() - 1 {
buffer[index + 1] = (byte & 0x0f) << 4;
@@ -568,7 +571,7 @@ fn decipher_aez_core(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
);
let len_v = d.saturating_sub(128);
- let mut y = Block::NULL;
+ let mut y = Block::null();
let mut e1_eval = E::new(1, 0, aez);
let e0_eval = E::new(0, 0, aez);
for (raw_ci, raw_ci_) in blocks.pairs_mut() {
@@ -578,8 +581,8 @@ fn decipher_aez_core(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
let wi = ci ^ e1_eval.eval(ci_);
let yi = ci_ ^ e0_eval.eval(wi);
- *raw_ci = wi.0;
- *raw_ci_ = yi.0;
+ *raw_ci = wi.bytes();
+ *raw_ci_ = yi.bytes();
y = y ^ yi;
}
@@ -599,7 +602,7 @@ fn decipher_aez_core(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
let s_y = c_y ^ e(-1, 2, aez, s_x);
let s = s_x ^ s_y;
- let mut x = Block::NULL;
+ let mut x = Block::null();
let mut e2_eval = E::new(2, 0, aez);
let mut e1_eval = E::new(1, 0, aez);
let e0_eval = E::new(0, 0, aez);
@@ -614,8 +617,8 @@ fn decipher_aez_core(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
let mi_ = xi ^ e0_eval.eval(zi);
let mi = zi ^ e1_eval.eval(mi_);
- *raw_wi = mi.0;
- *raw_yi = mi_.0;
+ *raw_wi = mi.bytes();
+ *raw_yi = mi_.bytes();
x = x ^ xi;
}
@@ -659,7 +662,7 @@ fn pad_to_blocks(value: &[u8]) -> Vec<Block> {
}
fn aez_hash(aez: &Aez, tweaks: Tweak) -> Block {
- let mut hash = Block::NULL;
+ let mut hash = Block::null();
for (i, tweak) in tweaks.iter().enumerate() {
// Adjust for zero-based vs one-based indexing
let j = i + 2 + 1;
@@ -667,7 +670,7 @@ fn aez_hash(aez: &Aez, tweaks: Tweak) -> Block {
// set l = 1 and then xor E_K^{j, 0}(10*). We could modify the last if branch to cover this
// as well, but then we need to fiddle with getting an empty chunk from an empty iterator.
if tweak.is_empty() {
- hash = hash ^ e(j.try_into().unwrap(), 0, aez, Block::ONE);
+ hash = hash ^ e(j.try_into().unwrap(), 0, aez, Block::one());
} else if tweak.len() % 16 == 0 {
for (l, chunk) in tweak.chunks(16).enumerate() {
hash = hash
@@ -704,7 +707,7 @@ fn aez_prf(aez: &Aez, tweaks: Tweak, buffer: &mut [u8]) {
let delta = aez_hash(aez, tweaks);
for chunk in buffer.chunks_mut(16) {
let block = e(-1, 3, aez, delta ^ Block::from_int(index));
- for (a, b) in chunk.iter_mut().zip(block.0.iter()) {
+ for (a, b) in chunk.iter_mut().zip(block.bytes().iter()) {
*a ^= b;
}
index += 1;
@@ -749,7 +752,9 @@ impl<'a> E<'a> {
// We need to advance ki_p_i if exponent = old_exponent + 1
// This happens exactly when the old exponent was just a multiple of 8, because the
// next exponent is then not a multiple anymore and will be rounded *up*.
- if self.i % 8 == 0 { self.ki_p_i = self.ki_p_i * 2 };
+ if self.i % 8 == 0 {
+ self.ki_p_i = self.ki_p_i * 2
+ };
self.i += 1;
}
}
@@ -796,7 +801,7 @@ mod test {
let a = hex::decode(a).unwrap();
let a = Block::from_slice(&a);
let b = hex::decode(b).unwrap();
- assert_eq!(&e(*j, *i, &aez, a).0, b.as_slice(), "{name}");
+ assert_eq!(&e(*j, *i, &aez, a).bytes(), b.as_slice(), "{name}");
}
}
@@ -808,13 +813,13 @@ mod test {
let aez = Aez::new(k.as_slice());
let v = hex::decode(v).unwrap();
- let mut tweaks = vec![Vec::from(Block::from_int(*tau).0)];
+ let mut tweaks = vec![Vec::from(Block::from_int(*tau).bytes())];
for t in *tw {
tweaks.push(hex::decode(t).unwrap());
}
let tweaks = tweaks.iter().map(Vec::as_slice).collect::<Vec<_>>();
- assert_eq!(&aez_hash(&aez, &tweaks).0, v.as_slice(), "{name}");
+ assert_eq!(&aez_hash(&aez, &tweaks).bytes(), v.as_slice(), "{name}");
}
}