def test_recoding_inverse(self): expected_digit_columns = np.array([[1, -1, 1], [1, -1, 1], [0, 0, 1], [0, 0, 1]]) m = 4 base_point_order = 2**256 length = int(math.ceil(math.log(base_point_order, 2) / m)) + 1 scalars_in_matrix_form = scalar_recoding.get_valid_recoded_matrix( expected_digit_columns, length) # print(scalars_in_matrix_form) scalar_vals = scalar_recoding.matrix_to_scalars(scalars_in_matrix_form) scalar_vals = np.asarray(scalar_vals, dtype=np.uint64) glv_sac_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( scalar_vals, base_point_order) # Extract the relevant sub-matrix containing the digit column we want to compare digit_columns = glv_sac_matrix[:, :expected_digit_columns.shape[1]] self.assertTrue(np.array_equal(digit_columns, expected_digit_columns)) # Test that if we reconstruct the original scalar, decompose it and apply the signed non-zero encoding, # the expected digit columns come out c1 = 5 * constants.basis2[0] - 3 * constants.basis3[ 0] + 2 * constants.basis4[0] c1_prime = 5 * constants.basis2[0] - 3 * constants.basis3[ 0] + 3 * constants.basis4[0] # Determine what the multi-scalar is given a1 # Decompose org scalar org_scalar = scalar_decomposition.inverse_decomposition_using_eigen( scalar_vals) decomp_scalar = scalar_decomposition.decompose_scalar(org_scalar) decomp_scalar = np.asarray(decomp_scalar, dtype=np.uint64) # # TEST subtract constant vectors c or c_prime # b1, b2, b3, b4 = scalar_decomposition.calculate_babai_optimal_basis() # c = [5 * b2[i] - 3 * b3[i] + 2 * b4[i] for i in range(4)] # c_prime = [c[i] + b4[i] for i in range(4)] # # decomp_scalar = [decomp_scalar[i] - c[i] for i in range(4)] # # END TEST decomp_scalar = np.asarray(decomp_scalar, dtype=np.uint64) recoded_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( decomp_scalar, base_point_order) # print(recoded_matrix) # Check if the recoded matrix has matching digit columns digit_columns = recoded_matrix[:, :expected_digit_columns.shape[1]] matching_digit_columns = np.array_equal(digit_columns, expected_digit_columns)
def fourq_scalar_mult(base_point: Point, scalar): assert 0 <= scalar < 2**256 # Compute endomorphisms p_phi = apply_endomorphism_phi(base_point) p_psi = apply_endomorphism_psi(base_point) psi_phi_p = apply_endomorphism_psi(p_phi) # Precompute lookup table lookup_table = {} for u in range(8): # u = (u2, u1, u0)_2 u0, u1, u2 = u & 1, (u >> 1) & 1, (u >> 2) & 1 t_u = base_point + u0 * p_phi + u1 * p_psi + u2 * psi_phi_p lookup_table[u] = t_u # Decompose scalar multi_scalar = decompose_scalar(scalar) multi_scalar = np.asarray(multi_scalar, dtype=np.uint64) # Recode scalar base_point_order = 2**256 recoded_matrix = recode_multi_scalar_general_unoptimized( multi_scalar, base_point_order) signs, digit_cols_vals = interpret_recoded_matrix(recoded_matrix) # Main loop q = signs[64] * lookup_table[digit_cols_vals[64]] for i in reversed(range(64)): q = q.dbl() t_i = signs[i] * lookup_table[digit_cols_vals[i]] q = q + t_i return q
def test_recode_example(self): base_point_order = 2**16 multi_scalar = np.asarray([11, 6, 14, 3], dtype=np.uint64) glv_sac_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( multi_scalar, base_point_order, print_debug=True) expected_result = np.asarray([[1, -1, 1, -1, 1], [1, -1, 0, -1, 0], [1, 0, 0, -1, 0], [0, 0, 1, -1, 1]]) self.assertTrue(np.array_equal(glv_sac_matrix, expected_result))
def test_recode_example_64bit(self): # multi_scalar = self._generate_random_64bit_scalars() multi_scalar = np.asarray([ 11141347229464416257, 14047439610996959232, 4001508484362378240, 1245141304914268672 ], dtype=np.uint64) base_point_order = 2**256 glv_sac_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( multi_scalar, base_point_order, print_debug=False) print(glv_sac_matrix)
def test_scalar_recoding(self): for scalar_decomp_test_vector in scalar_decomp_test_vectors: scalar = scalar_decomp_test_vector[0] expected_decomposed_scalar = np.asarray( scalar_decomp_test_vector[1], dtype=np.uint64) recoded_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( expected_decomposed_scalar, 2**256) scalar_vals = np.asarray( scalar_recoding.matrix_to_scalars(recoded_matrix), dtype=np.uint64) self.assertTrue( np.array_equal(scalar_vals, expected_decomposed_scalar))
def test_recode_random(self): for i in range(100): multi_scalar = self._generate_random_64bit_scalars() glv_sac_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( multi_scalar, 2**256) for row in range(glv_sac_matrix.shape[0]): sign_aligner_val = scalar_recoding.scalar_array_to_decimal( glv_sac_matrix[row]) print(sign_aligner_val) print(glv_sac_matrix) # print(scalar_recoding.scalar_array_to_decimal([])) print("\n")
def test_structure_of_recoded_matrix(self): base_point_order = 2**16 multi_scalar = np.asarray([31, 18, 26, 2], dtype=np.uint64) glv_sac_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( multi_scalar, base_point_order) print(glv_sac_matrix)
def online_template_attack(base_point, secret_scalar, use_decomposed_scalar=True, average_template_signals=False, max_nr_of_iterations=64, enable_output=True, recapture_target_trace=False, plot_intermediate_templates=False): """ This function does the following: - load the base point onto the FourQ implementation - capture the target trace - [determine the offsets of the doubling operations] - Attack the key bits: * Generate the corresponding templates: * Obtain the multi_scalar that belongs to the wanted recoded matrix * Obtain the scalar that decomposes into the multi-scalar in the previous step * Load this scalar in the FourQ implementation * Correlate the template (at the correct doubling operation using the offsets) with the target trace (again at the correct doubling operation using the offsets) * Choose the values for the digit-column and sign that give the highest correlation and repeat for every iteration in the scalar multiplication of FourQ. :param base_point: :return: """ # Decompose scalar [if necessary] and calculate recoded matrix for verifying column guesses if not use_decomposed_scalar: decomposed_secret_scalar = scalar_decomposition.decompose_scalar( secret_scalar) else: decomposed_secret_scalar = secret_scalar decomposed_secret_scalar = np.asarray(decomposed_secret_scalar, dtype=np.uint64) recoded_secret_scalar_matrix = scalar_recoding.recode_multi_scalar_general_unoptimized( decomposed_secret_scalar, 2**256) # Connect to Sakura sakura = ftdi_interface.SaseboGii() # lecroy_if.save_panel_to_file("lecroy_ota_config.dat") # lecroy_if.load_lecroy_cfg() # Initialize ROM constants fourq_scalar_mult.fourq_initialize_rom(sakura) # Load base point x0, x1 = base_point[0] y0, y1 = base_point[1] fourq_scalar_mult.fourq_write_base_point(sakura, x0, x1, y0, y1) # Load secret scalar load_scalar(sakura, secret_scalar, use_decomposed_scalar) # capture_trace(sakura) # Capture target trace [if needed] if recapture_target_trace: target_trace_interpreted = capture_trace( sakura, save_to_file=recapture_target_trace, file_name="target_trace") save_target_trace(target_trace_interpreted) else: target_trace_interpreted = load_target_trace() # Determine offsets oper_trigger_trace = capture_trace(sakura, channel="C2", save_to_file=False, file_name="oper_trigger_trace") # The offsets containing offsets for both the doubling and addition operations. offsets = determine_offsets_static(oper_trigger_trace, nth_diff=1) # The order of the offsets is: [DBL, ADD, DBL, ..., DBL, ADD] # Even elements contain the DBL offsets, odd elements the ADD offsets # There are 64 DBL and 64 ADD operations, giving 128 offsets in total (if the whole main loop was captured) rank_per_iter = [] attacked_digit_columns = None # We are now going to attack the digit columns iteratively (starting from digit column 64) for iteration in reversed(range(64)): corr_results = attack_digit_column( sakura, iteration, offsets, target_trace_interpreted, attacked_digit_columns, use_decomposed_scalar=use_decomposed_scalar, average_template_signals=average_template_signals, plot_intermediate_templates=plot_intermediate_templates, use_points_of_interest=False, use_fft=False) # Determine which (template digit column, correlation value) had the highest correlation value template_digit_column, max_corr_coeff = max(corr_results, key=operator.itemgetter(1)) if enable_output: print("Iteration: {}. Attacking d{}".format( iteration, iteration + 1)) print("Expected digit column: \t{}".format( recoded_secret_scalar_matrix[:, 63 - iteration])) print("Digit column guess: \t{}".format(template_digit_column[:, 0])) print("Correlation results (from lowest to highest:") for idx, (tmpl_digit_col, corr_coeff) in enumerate( sorted(corr_results, key=operator.itemgetter(1))): equals_correct_template = np.array_equal( tmpl_digit_col[:, 0], recoded_secret_scalar_matrix[:, 63 - iteration]) if equals_correct_template: rank = len(corr_results) - idx rank_per_iter.append(rank) if enable_output: print("{}: \t {}{} ".format( tmpl_digit_col[:, 0], corr_coeff, "*" if equals_correct_template else "")) if enable_output: print("Rank of expected: \t {} from {} templates in total".format( rank, len(corr_results))) # TODO for testing, we always assume we have guesses the current digit column correctly if attacked_digit_columns is not None: attacked_digit_columns = np.concatenate( (attacked_digit_columns, recoded_secret_scalar_matrix[:, [63 - iteration]]), axis=1) else: attacked_digit_columns = recoded_secret_scalar_matrix[:, [ 63 - iteration ]] # attacked_digit_columns = np.concatenate((attacked_digit_columns, template_digit_column), axis=1) \ # if attacked_digit_columns is not None else template_digit_column if 63 - max_nr_of_iterations == iteration: break del corr_results gc.collect() if enable_output: print("Ranks per iteration (starting from i=63: {}".format( rank_per_iter)) # return attacked_digit_columns return rank_per_iter