mod variant4_random_math
This commit is contained in:
parent
e224b4af93
commit
a177241046
|
@ -200,7 +200,6 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
memset(data, 0, sizeof(data));
|
memset(data, 0, sizeof(data));
|
||||||
uint64_t tmp = SWAP64LE(height);
|
uint64_t tmp = SWAP64LE(height);
|
||||||
memcpy(data, &tmp, sizeof(uint64_t));
|
memcpy(data, &tmp, sizeof(uint64_t));
|
||||||
data[20] = -38; // change seed
|
|
||||||
|
|
||||||
// Set data_index past the last byte in data
|
// Set data_index past the last byte in data
|
||||||
// to trigger full data update with blake hash
|
// to trigger full data update with blake hash
|
||||||
|
@ -211,10 +210,9 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
|
|
||||||
// There is a small chance (1.8%) that register R8 won't be used in the generated program
|
// There is a small chance (1.8%) that register R8 won't be used in the generated program
|
||||||
// So we keep track of it and try again if it's not used
|
// So we keep track of it and try again if it's not used
|
||||||
bool r8_used;
|
|
||||||
do {
|
do {
|
||||||
int latency[9];
|
int latency[8];
|
||||||
int asic_latency[9];
|
int asic_latency[8];
|
||||||
|
|
||||||
// Tracks previous instruction and value of the source operand for registers R0-R3 throughout code execution
|
// Tracks previous instruction and value of the source operand for registers R0-R3 throughout code execution
|
||||||
// byte 0: current value of the destination register
|
// byte 0: current value of the destination register
|
||||||
|
@ -223,7 +221,7 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
//
|
//
|
||||||
// Registers R4-R8 are constant and are treated as having the same value because when we do
|
// Registers R4-R8 are constant and are treated as having the same value because when we do
|
||||||
// the same operation twice with two constant source registers, it can be optimized into a single operation
|
// the same operation twice with two constant source registers, it can be optimized into a single operation
|
||||||
uint32_t inst_data[9] = { 0, 1, 2, 3, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF };
|
uint32_t inst_data[8] = { 0, 1, 2, 3, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF };
|
||||||
|
|
||||||
bool alu_busy[TOTAL_LATENCY + 1][ALU_COUNT];
|
bool alu_busy[TOTAL_LATENCY + 1][ALU_COUNT];
|
||||||
bool is_rotation[V4_INSTRUCTION_COUNT];
|
bool is_rotation[V4_INSTRUCTION_COUNT];
|
||||||
|
@ -242,7 +240,6 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
code_size = 0;
|
code_size = 0;
|
||||||
|
|
||||||
int total_iterations = 0;
|
int total_iterations = 0;
|
||||||
r8_used = false;
|
|
||||||
|
|
||||||
// Generate random code to achieve minimal required latency for our abstract CPU
|
// Generate random code to achieve minimal required latency for our abstract CPU
|
||||||
// Try to get this latency for all 4 registers
|
// Try to get this latency for all 4 registers
|
||||||
|
@ -287,8 +284,8 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b))
|
if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b))
|
||||||
{
|
{
|
||||||
// Use register R8 as source instead
|
// Use register R8 as source instead
|
||||||
b = 8;
|
b = a + 4;
|
||||||
src_index = 8;
|
src_index = b;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't do rotation with the same destination twice because it's equal to a single rotation
|
// Don't do rotation with the same destination twice because it's equal to a single rotation
|
||||||
|
@ -368,10 +365,6 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
code[code_size].src_index = src_index;
|
code[code_size].src_index = src_index;
|
||||||
code[code_size].C = 0;
|
code[code_size].C = 0;
|
||||||
|
|
||||||
if (src_index == 8)
|
|
||||||
{
|
|
||||||
r8_used = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (opcode == ADD)
|
if (opcode == ADD)
|
||||||
{
|
{
|
||||||
|
@ -402,7 +395,7 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
// We need to add a few more MUL and ROR instructions to achieve minimal required latency for ASIC
|
// We need to add a few more MUL and ROR instructions to achieve minimal required latency for ASIC
|
||||||
// Get this latency for at least 1 of the 4 registers
|
// Get this latency for at least 1 of the 4 registers
|
||||||
const int prev_code_size = code_size;
|
const int prev_code_size = code_size;
|
||||||
while ((code_size < NUM_INSTRUCTIONS_MAX) && (asic_latency[0] < TOTAL_LATENCY) && (asic_latency[1] < TOTAL_LATENCY) && (asic_latency[2] < TOTAL_LATENCY) && (asic_latency[3] < TOTAL_LATENCY))
|
while ((asic_latency[0] < TOTAL_LATENCY) && (asic_latency[1] < TOTAL_LATENCY) && (asic_latency[2] < TOTAL_LATENCY) && (asic_latency[3] < TOTAL_LATENCY))
|
||||||
{
|
{
|
||||||
int min_idx = 0;
|
int min_idx = 0;
|
||||||
int max_idx = 0;
|
int max_idx = 0;
|
||||||
|
@ -426,7 +419,7 @@ static inline int v4_random_math_init(struct V4_Instruction* code, const uint64_
|
||||||
|
|
||||||
// There is ~98.15% chance that loop condition is false, so this loop will execute only 1 iteration most of the time
|
// There is ~98.15% chance that loop condition is false, so this loop will execute only 1 iteration most of the time
|
||||||
// It never does more than 4 iterations for all block heights < 10,000,000
|
// It never does more than 4 iterations for all block heights < 10,000,000
|
||||||
} while (!r8_used || (code_size < NUM_INSTRUCTIONS_MIN) || (code_size > NUM_INSTRUCTIONS_MAX));
|
} while (code_size < NUM_INSTRUCTIONS_MIN);
|
||||||
|
|
||||||
// It's guaranteed that NUM_INSTRUCTIONS_MIN <= code_size <= NUM_INSTRUCTIONS_MAX here
|
// It's guaranteed that NUM_INSTRUCTIONS_MIN <= code_size <= NUM_INSTRUCTIONS_MAX here
|
||||||
// Add final instruction to stop the interpreter
|
// Add final instruction to stop the interpreter
|
||||||
|
|
Loading…
Reference in New Issue