3939#include "hash-ops.h"
4040#include "oaes_lib.h"
4141#include "variant2_int_sqrt.h"
42+ #include "variant4_random_math.h"
4243
4344#define MEMORY (1 << 21) // 2MB scratchpad
4445#define ITER (1 << 20)
@@ -172,7 +173,7 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
172173 const uint64_t sqrt_input = SWAP64LE(((uint64_t*)(ptr))[0]) + division_result
173174
174175#define VARIANT2_INTEGER_MATH_SSE2 (b , ptr ) \
175- do if (variant >= 2 ) \
176+ do if (( variant == 2) || (variant == 3) ) \
176177 { \
177178 VARIANT2_INTEGER_MATH_DIVISION_STEP(b, ptr); \
178179 VARIANT2_INTEGER_MATH_SQRT_STEP_SSE2(); \
@@ -182,7 +183,7 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
182183#if defined DBL_MANT_DIG && (DBL_MANT_DIG >= 50 )
183184 // double precision floating point type has enough bits of precision on current platform
184185 #define VARIANT2_PORTABLE_INTEGER_MATH (b , ptr ) \
185- do if (variant >= 2 ) \
186+ do if (( variant == 2) || (variant == 3) ) \
186187 { \
187188 VARIANT2_INTEGER_MATH_DIVISION_STEP(b, ptr); \
188189 VARIANT2_INTEGER_MATH_SQRT_STEP_FP64(); \
@@ -192,7 +193,7 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
192193 // double precision floating point type is not good enough on current platform
193194 // fall back to the reference code (integer only)
194195 #define VARIANT2_PORTABLE_INTEGER_MATH (b , ptr ) \
195- do if (variant >= 2 ) \
196+ do if (( variant == 2) || (variant == 3) ) \
196197 { \
197198 VARIANT2_INTEGER_MATH_DIVISION_STEP(b, ptr); \
198199 VARIANT2_INTEGER_MATH_SQRT_STEP_REF(); \
@@ -214,6 +215,47 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
214215 lo ^= SWAP64LE(*(U64(hp_state + (j ^ 0x20)) + 1)); \
215216 } while (0)
216217
218+ #define V4_REG_LOAD (dst , src ) \
219+ do { \
220+ memcpy((dst), (src), sizeof(v4_reg)); \
221+ if (sizeof(v4_reg) == sizeof(uint32_t)) \
222+ *(dst) = SWAP32LE(*(dst)); \
223+ else \
224+ *(dst) = SWAP64LE(*(dst)); \
225+ } while (0)
226+
227+ #define VARIANT4_RANDOM_MATH_INIT () \
228+ v4_reg r[9]; \
229+ struct V4_Instruction code[NUM_INSTRUCTIONS_MAX + 1]; \
230+ do if (variant >= 4) \
231+ { \
232+ for (int i = 0; i < 4; ++i) \
233+ V4_REG_LOAD(r + i, (uint8_t*)(state.hs.w + 12) + sizeof(v4_reg) * i); \
234+ v4_random_math_init(code, height); \
235+ } while (0)
236+
237+ #define VARIANT4_RANDOM_MATH (a , b , r , _b , _b1 ) \
238+ do if (variant >= 4) \
239+ { \
240+ uint64_t t; \
241+ memcpy(&t, b, sizeof(uint64_t)); \
242+ \
243+ if (sizeof(v4_reg) == sizeof(uint32_t)) \
244+ t ^= SWAP64LE((r[0] + r[1]) | ((uint64_t)(r[2] + r[3]) << 32)); \
245+ else \
246+ t ^= SWAP64LE((r[0] + r[1]) ^ (r[2] + r[3])); \
247+ \
248+ memcpy(b, &t, sizeof(uint64_t)); \
249+ \
250+ V4_REG_LOAD(r + 4, a); \
251+ V4_REG_LOAD(r + 5, (uint64_t*)(a) + 1); \
252+ V4_REG_LOAD(r + 6, _b); \
253+ V4_REG_LOAD(r + 7, _b1); \
254+ V4_REG_LOAD(r + 8, (uint64_t*)(_b1) + 1); \
255+ \
256+ v4_random_math(code, r); \
257+ } while (0)
258+
217259
218260#if !defined NO_AES && (defined(__x86_64__ ) || (defined(_MSC_VER ) && defined(_WIN64 )))
219261// Optimised code below, uses x86-specific intrinsics, SSE2, AES-NI
@@ -298,6 +340,7 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
298340 p = U64(&hp_state[j]); \
299341 b[0] = p[0]; b[1] = p[1]; \
300342 VARIANT2_INTEGER_MATH_SSE2(b, c); \
343+ VARIANT4_RANDOM_MATH(a, b, r, &_b, &_b1); \
301344 __mul(); \
302345 VARIANT2_2(); \
303346 VARIANT2_SHUFFLE_ADD_SSE2(hp_state, j); \
@@ -694,7 +737,7 @@ void slow_hash_free_state(void)
694737 * @param length the length in bytes of the data
695738 * @param hash a pointer to a buffer in which the final 256 bit hash will be stored
696739 */
697- void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed )
740+ void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed , uint64_t height )
698741{
699742 RDATA_ALIGN16 uint8_t expandedKey [240 ]; /* These buffers are aligned to use later with SSE functions */
700743
@@ -730,6 +773,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
730773
731774 VARIANT1_INIT64 ();
732775 VARIANT2_INIT64 ();
776+ VARIANT4_RANDOM_MATH_INIT ();
733777
734778 /* CryptoNight Step 2: Iteratively encrypt the results from Keccak to fill
735779 * the 2MB large random access buffer.
@@ -901,6 +945,7 @@ union cn_slow_hash_state
901945 p = U64(&hp_state[j]); \
902946 b[0] = p[0]; b[1] = p[1]; \
903947 VARIANT2_PORTABLE_INTEGER_MATH(b, c); \
948+ VARIANT4_RANDOM_MATH(a, b, r, &_b, &_b1); \
904949 __mul(); \
905950 VARIANT2_2(); \
906951 VARIANT2_SHUFFLE_ADD_NEON(hp_state, j); \
@@ -1063,7 +1108,7 @@ STATIC INLINE void aligned_free(void *ptr)
10631108}
10641109#endif /* FORCE_USE_HEAP */
10651110
1066- void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed )
1111+ void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed , uint64_t height )
10671112{
10681113 RDATA_ALIGN16 uint8_t expandedKey [240 ];
10691114
@@ -1100,6 +1145,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
11001145
11011146 VARIANT1_INIT64 ();
11021147 VARIANT2_INIT64 ();
1148+ VARIANT4_RANDOM_MATH_INIT ();
11031149
11041150 /* CryptoNight Step 2: Iteratively encrypt the results from Keccak to fill
11051151 * the 2MB large random access buffer.
@@ -1278,7 +1324,7 @@ STATIC INLINE void xor_blocks(uint8_t* a, const uint8_t* b)
12781324 U64 (a )[1 ] ^= U64 (b )[1 ];
12791325}
12801326
1281- void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed )
1327+ void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed , uint64_t height )
12821328{
12831329 uint8_t text [INIT_SIZE_BYTE ];
12841330 uint8_t a [AES_BLOCK_SIZE ];
@@ -1317,6 +1363,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
13171363
13181364 VARIANT1_INIT64 ();
13191365 VARIANT2_INIT64 ();
1366+ VARIANT4_RANDOM_MATH_INIT ();
13201367
13211368 // use aligned data
13221369 memcpy (expandedKey , aes_ctx -> key -> exp_data , aes_ctx -> key -> exp_data_len );
@@ -1353,6 +1400,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
13531400 copy_block (c , p );
13541401
13551402 VARIANT2_PORTABLE_INTEGER_MATH (c , c1 );
1403+ VARIANT4_RANDOM_MATH (a , c , r , b , b + AES_BLOCK_SIZE );
13561404 mul (c1 , c , d );
13571405 VARIANT2_2_PORTABLE ();
13581406 VARIANT2_PORTABLE_SHUFFLE_ADD (long_state , j );
@@ -1476,7 +1524,7 @@ union cn_slow_hash_state {
14761524};
14771525#pragma pack(pop)
14781526
1479- void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed ) {
1527+ void cn_slow_hash (const void * data , size_t length , char * hash , int variant , int prehashed , uint64_t height ) {
14801528#ifndef FORCE_USE_HEAP
14811529 uint8_t long_state [MEMORY ];
14821530#else
@@ -1505,6 +1553,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
15051553
15061554 VARIANT1_PORTABLE_INIT ();
15071555 VARIANT2_PORTABLE_INIT ();
1556+ VARIANT4_RANDOM_MATH_INIT ();
15081557
15091558 oaes_key_import_data (aes_ctx , aes_key , AES_KEY_SIZE );
15101559 for (i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i ++ ) {
@@ -1537,6 +1586,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
15371586 j = e2i (c1 , MEMORY / AES_BLOCK_SIZE ) * AES_BLOCK_SIZE ;
15381587 copy_block (c2 , & long_state [j ]);
15391588 VARIANT2_PORTABLE_INTEGER_MATH (c2 , c1 );
1589+ VARIANT4_RANDOM_MATH (a , c2 , r , b , b + AES_BLOCK_SIZE );
15401590 mul (c1 , c2 , d );
15411591 VARIANT2_2_PORTABLE ();
15421592 VARIANT2_PORTABLE_SHUFFLE_ADD (long_state , j );
0 commit comments