var v_2829: vec2<u32> = vec2(0x04BE4294u, 0xE07C2654u);
var v_3031: vec2<u32> = vec2(0x137E2179u, 0x5BE0CD19u);
- var v24: u32 = 0xADE682F9u;
- var v25: u32 = 0x510E527Fu;
- var v26: u32 = 0x2B3E6C1Fu;
- var v27: u32 = 0x9B05688Cu;
- var v28: u32 = 0x04BE4294u;
- var v29: u32 = 0xE07C2654u;
- var v30: u32 = 0x137E2179u;
- var v31: u32 = 0x5BE0CD19u;
-
/**
* Twelve rounds of G mixing as part of BLAKE2b compression step.
* Each sigma r index correlates with the reference implementation, but each
v_01 = v_01 + vec2(m0, m1) + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_01.x + m0 < v_01.x);
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_89 ^ v_1617;
v_01 = v_01 + vec2(m2, m3) + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_01.x + m2 < v_01.x);
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_89 ^ v_1617;
v_23 = v_23 + vec2(m4, m5) + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_23.x + m4 < v_23.x);
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1011 ^ v_1819;
v_23 = v_23 + vec2(m6, m7) + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_23.x + m6 < v_23.x);
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1011 ^ v_1819;
v_45 = v_45 + vec2(m8, m9) + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_45.x + m8 < v_45.x);
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1213 ^ v_2021;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1213 ^ v_2021;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1415 ^ v_2223;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1415 ^ v_2223;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1011 ^ v_2021;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1011 ^ v_2021;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1213 ^ v_2223;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1213 ^ v_2223;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_1415 ^ v_1617;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_1415 ^ v_1617;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
xor = v_89 ^ v_1819;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
xor = v_89 ^ v_1819;
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// skip since adding 0u does nothing
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_01;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_1617 = v_1617 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2425.x < v_1617.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_23;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1819 = v_1819 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2627.x < v_1819.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_45;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_2021 = v_2021 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_2829.x < v_2021.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_67;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2223 = v_2223 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_3031.x < v_2223.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2(xor.y, xor.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_3031.x = v30;
- v_3031.y = v31;
+
+
xor = v_3031 ^ v_01;
v_3031 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// c = c + d
- v_3031.x = v30;
- v_3031.y = v31;
+
+
v_2021 = v_2021 + v_3031 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2021.x + v_3031.x < v_2021.x);
- v30 = v_3031.x;
- v31 = v_3031.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2(xor.y, xor.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2425.x = v24;
- v_2425.y = v25;
+
+
xor = v_2425 ^ v_23;
v_2425 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// c = c + d
- v_2425.x = v24;
- v_2425.y = v25;
+
+
v_2223 = v_2223 + v_2425 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_2223.x + v_2425.x < v_2223.x);
- v24 = v_2425.x;
- v25 = v_2425.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2(xor.y, xor.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2627.x = v26;
- v_2627.y = v27;
+
+
xor = v_2627 ^ v_45;
v_2627 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// c = c + d
- v_2627.x = v26;
- v_2627.y = v27;
+
+
v_1617 = v_1617 + v_2627 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1617.x + v_2627.x < v_1617.x);
- v26 = v_2627.x;
- v27 = v_2627.y;
+
+
// b = rotr64(b ^ c, 63)
// d = rotr64(d ^ a, 32)
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2(xor.y, xor.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 24)
// d = rotr64(d ^ a, 16)
- v_2829.x = v28;
- v_2829.y = v29;
+
+
xor = v_2829 ^ v_67;
v_2829 = vec2((xor.x >> 16u) | (xor.y << 16u), (xor.y >> 16u) | (xor.x << 16u));
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// c = c + d
- v_2829.x = v28;
- v_2829.y = v29;
+
+
v_1819 = v_1819 + v_2829 + select(vec2<u32>(0u), vec2<u32>(0u, 1u), v_1819.x + v_2829.x < v_1819.x);
- v28 = v_2829.x;
- v29 = v_2829.y;
+
+
// b = rotr64(b ^ c, 63)