if '~' in PATH_TO_MARGDISTS: PATH_TO_MARGDISTS = os.path.expanduser(PATH_TO_MARGDISTS) key_nodes = ['k{}-K'.format(pad_string_zeros(i + 1)) for i in range(16)] # Get set of all available files marginal_distributions_files = [ f.replace('.npy', '') for f in listdir(PATH_TO_MARGDISTS) if isfile(join(PATH_TO_MARGDISTS, f)) and string_starts_with(f, 'marginaldist_') ] # print marginal_distributions_files sim = lSimF.LeakageSimulatorAESFurious() sim.fix_key(KEY) sim.fix_plaintext(PLAINTEXT) sim.simulate(read_plaintexts=0, print_all=0, random_plaintexts=0, affect_with_noise=False, hw_leakage_model=False, real_values=True) leakage_dict = sim.get_leakage_dictionary() # Get Default Key Distribtion key_file = [ k_file for k_file in marginal_distributions_files if '_K_' in k_file ] if len(key_file) > 1:
def matching_performance(): # Match to extra extra_plaintexts = np.load(PLAINTEXT_EXTRA_FILEPATH) extra_keys = np.load(KEY_EXTRA_FILEPATH) extra_traces = np.transpose( load_trace_data(filepath=TRACEDATA_EXTRA_FILEPATH, memory_mapped=MEMORY_MAPPED)) musig_dict_path = MUSIGMA_FILEPATH musig_dict = pickle.load(open(musig_dict_path, 'ro')) # Containers to hold ranks rank_dict = {} for v, length in variable_dict.iteritems(): rank_dict[v] = [[] for _ in range(length)] all_ranks = [] trace_average_rank_holder = [] try: for i, (plaintext, key) in enumerate(zip(extra_plaintexts, extra_keys)): print "Trace {:5}: {}".format(i, plaintext) # Simulate actual values sim = lSimF.LeakageSimulatorAESFurious() sim.fix_key(key) sim.fix_plaintext(plaintext) sim.simulate(read_plaintexts=0, print_all=0, random_plaintexts=0, affect_with_noise=False, hw_leakage_model=False, real_values=True) leakage_dict = sim.get_leakage_dictionary() # print 'Plaintext:', plaintext # For each node in graph, get time point -> corresponding power value from extra trace data # Template match this against MuSigma pairs, get probability distribution # Check to see how high actual value scores trace_average_rank_list = [] for var, musigma_array in sorted(musig_dict.iteritems()): # Get var name and var number var_name, var_number, _ = split_variable_name(var) # Actual value of the node actual_value = int(leakage_dict[var_name][0][var_number - 1]) # Time point of node in trace time_points = np.load("{}{}.npy".format( TIMEPOINTS_FOLDER, var_name)) time_point = time_points[var_number - 1] # print var # print 'Actual Value:', actual_value # Power value received power_value = extra_traces[i][time_point] # print power_value # Real Value Match matched_dist = real_value_match(var, power_value) # print matched_dist ranked_dist = np.array(matched_dist).argsort()[::-1] # print ranked_dist rank = ranked_dist[actual_value] # print rank # # print zip(ranked_dist, matched_dist) # # exit(1) # print "-> {} {}, Ranked: {}".format(var_name, var_number, ranked_dist[actual_value]) # Add to Ranked List rank_dict[var_name][var_number - 1].append(rank) all_ranks.append(rank) trace_average_rank_list.append(rank) trace_average_rank_holder.append( get_average(trace_average_rank_list)) # if i >= 100: # break except KeyboardInterrupt: pass finally: # Print Statistics for v, l in rank_dict.iteritems(): for i, lst in enumerate(l): print "{}{}:\n".format(v, pad_string_zeros(i + 1)) print_statistics(lst) # ALL print "* ALL NODES RANK STATISTICS *" print_statistics(all_ranks) print "* AVERAGE RANK PER TRACE STATISTICS *" print_statistics(trace_average_rank_holder)
def get_trace_data_and_plaintexts(just_keys_and_plaintexts=False): print "+ Trace File: {}\n+ Size: {} bytes\n".format( TRACE_FILE, os.path.getsize(TRACE_FILE)) traces, samples, samplespace, float_coding, data_space, start_offset = parse_header( ) profile_traces, attack_traces, _, _ = load_meta() bytes_in_trace = (samples * samplespace) + data_space print "Traces: {}\nSamples: {}\nSample Space: {}\nData Space: {}\nFloat Coding: {}\nStart Offset: {}\n".format( traces, samples, samplespace, data_space, float_coding, start_offset) offset = start_offset coding = np.float32 if float_coding else np.int16 if not just_keys_and_plaintexts: if MEMORY_MAPPED: all_data = np.memmap(TRACEDATA_FILEPATH, shape=(profile_traces, samples), mode='w+', dtype=coding) if profile_traces < traces: extra_data = np.memmap(TRACEDATA_EXTRA_FILEPATH, shape=(attack_traces, samples), mode='w+', dtype=coding) else: all_data = np.empty([profile_traces, samples], dtype=coding) if profile_traces < traces: extra_data = np.empty([attack_traces, samples], dtype=coding) all_plaintexts = np.empty([profile_traces, 16], dtype=np.int16) extra_plaintexts = np.empty([attack_traces, 16], dtype=np.int16) all_keys = np.empty([profile_traces, 16], dtype=np.int16) extra_keys = np.empty([attack_traces, 16], dtype=np.int16) percent = traces / 100 for t in range(traces): if PRINT: print "*** Trace {} ***".format(t) if PRINT: print "Length of File: {}".format(os.path.getsize(TRACE_FILE)) print "Offset: {}".format(offset) final_byte = offset + data_space + (samples * samplespace) print "Final Byte: {}".format(final_byte) print "Is this ok: {}".format( final_byte <= os.path.getsize(TRACE_FILE)) title_data = read_to_list(offset, data_space) if not just_keys_and_plaintexts: trace_data = read_to_list(offset + data_space, samples, number_of_bytes=samplespace, signedint=True, float_coding=float_coding) if PRINT: print "First 100 values of trace data:\n{}\n".format( list(trace_data[:100])) if data_space == 32: plaintext = title_data[:16] ciphertext = title_data[16:32] key = KEY elif data_space == 48: # plaintext = title_data[:16] # key = title_data[16:32] # ciphertext = title_data[32:48] key = title_data[:16] plaintext = title_data[16:32] ciphertext = title_data[32:48] if PRINT: print "Key: {}".format(key) print "Plaintext: {}".format(plaintext) print "Ciphertext: {}".format(ciphertext) print_new_line() # Simulate sim = lSimF.LeakageSimulatorAESFurious() sim.fix_key(key) sim.fix_plaintext(plaintext) sim.simulate(read_plaintexts=0, print_all=0, random_plaintexts=0, affect_with_noise=False, hw_leakage_model=False, real_values=True) leakage_dict = sim.get_leakage_dictionary() simulated_ciphertext = leakage_dict['p'][0][-16:] simulated_end_of_round_one = leakage_dict['p'][0][16:32] simulated_end_of_g2 = leakage_dict['t'][0][32:48] if PRINT: print "* SIMULATED *" print "Key: {}".format(leakage_dict['k'][:16]) print "Plaintext: {}".format(leakage_dict['p'][0][:16]) print "Ciphertext: {}".format(simulated_ciphertext) print "Eof Round1: {}".format(simulated_end_of_round_one) print "End of G2: {}".format(simulated_end_of_g2) print_new_line() # Check for correctness if CHECK_CORRECTNESS and not ( (ciphertext == simulated_ciphertext).all() or (ciphertext == simulated_end_of_round_one).all() or (ciphertext == simulated_end_of_g2).all()): print "*** Error in Trace {}: Did not Match!".format(t) raise ValueError elif PRINT: print "+ Checked: Correct!" # Add Trace Data to all_data if t < profile_traces: if not just_keys_and_plaintexts: all_data[t] = np.array(trace_data) all_plaintexts[t] = np.array(plaintext) all_keys[t] = np.array(key) else: if not just_keys_and_plaintexts: extra_data[t - profile_traces] = np.array(trace_data) extra_plaintexts[t - profile_traces] = np.array(plaintext) extra_keys[t - profile_traces] = np.array(key) if (t % percent) == 0: print "{}% Complete".format(t / percent) if PRINT: print "This is what we stored:\n{}\n".format(all_data[t]) print_new_line() # exit(1) # Increment offset offset = offset + bytes_in_trace # # Just first # if t > 3: # exit(1) if not just_keys_and_plaintexts: if MEMORY_MAPPED: del all_data if profile_traces < traces: del extra_data else: # Save the tranpose as a file! np.save(TRACEDATA_FILEPATH, np.transpose(all_data)) # Save the tranpose as a file! np.save(TRACEDATA_EXTRA_FILEPATH, np.transpose(extra_data)) # Save plaintexts as file np.save(PLAINTEXT_FILEPATH, all_plaintexts) # Save plaintexts as file np.save(PLAINTEXT_EXTRA_FILEPATH, extra_plaintexts) # Save keys as file np.save(KEY_FILEPATH, all_keys) # Save keys as file np.save(KEY_EXTRA_FILEPATH, extra_keys) print "Saved and Completed!" print_new_line()
def simulate_data_from_plaintexts(): extra = [0, 1] # for plaintext_count, plaintext_filepath in enumerate([PLAINTEXT_FILEPATH, PLAINTEXT_EXTRA_FILEPATH]): for use_extra_data in extra: plaintext_filepath = PLAINTEXT_EXTRA_FILEPATH if use_extra_data else PLAINTEXT_FILEPATH key_filepath = KEY_EXTRA_FILEPATH if use_extra_data else KEY_FILEPATH # Show plaintexts! plaintexts = np.load(plaintext_filepath, mmap_mode='r') keys = np.load(key_filepath, mmap_mode='r') traces = plaintexts.shape[0] # k = np.empty([16, traces], dtype=np.uint8) # p = np.empty([32, traces], dtype=np.uint8) # t = np.empty([32, traces], dtype=np.uint8) # s = np.empty([32, traces], dtype=np.uint8) # mc = np.empty([16, traces], dtype=np.uint8) # xt = np.empty([16, traces], dtype=np.uint8) # cm = np.empty([16, traces], dtype=np.uint8) # h = np.empty([12, traces], dtype=np.uint8) k = np.empty([48, traces], dtype=np.uint8) p = np.empty([48, traces], dtype=np.uint8) t = np.empty([48, traces], dtype=np.uint8) s = np.empty([48, traces], dtype=np.uint8) mc = np.empty([32, traces], dtype=np.uint8) xt = np.empty([32, traces], dtype=np.uint8) cm = np.empty([32, traces], dtype=np.uint8) h = np.empty([24, traces], dtype=np.uint8) sk = np.empty([8, traces], dtype=np.uint8) xk = np.empty([2, traces], dtype=np.uint8) for i, (plaintext, key) in enumerate(zip(plaintexts, keys)): if PRINT: print "Trace {}\nPlaintext: {}\nKey: {}".format( i, plaintext, key) sim = lSimF.LeakageSimulatorAESFurious() sim.fix_key(key) sim.fix_plaintext(plaintext) sim.simulate(read_plaintexts=0, print_all=0, random_plaintexts=0, affect_with_noise=False, hw_leakage_model=False, real_values=True) leakage_dict = sim.get_leakage_dictionary() for j in range(48): # p p[j][i] = leakage_dict['p'][0][j] # t t[j][i] = leakage_dict['t'][0][j] # s s[j][i] = leakage_dict['s'][0][j] # k k[j][i] = leakage_dict['k'][j] if j < 32: # mc mc[j][i] = leakage_dict['mc'][0][j] # xt xt[j][i] = leakage_dict['xt'][0][j] # cm cm[j][i] = leakage_dict['cm'][0][j] if j < 24: # h h[j][i] = leakage_dict['h'][0][j] if j < 8: sk[j][i] = leakage_dict['sk'][j] if j < 2: xk[j][i] = leakage_dict['xk'][j] if traces < 100: print "Finished Trace {}".format(i) elif i % (traces // 100) == 0: print "{}% Complete".format(i / (traces // 100)) # Save to files! extra_string = "extra_" if use_extra_data == 1 else "" np.save(REALVALUES_FOLDER + extra_string + 'k.npy', k) np.save(REALVALUES_FOLDER + extra_string + 'p.npy', p) np.save(REALVALUES_FOLDER + extra_string + 't.npy', t) np.save(REALVALUES_FOLDER + extra_string + 's.npy', s) np.save(REALVALUES_FOLDER + extra_string + 'mc.npy', mc) np.save(REALVALUES_FOLDER + extra_string + 'xt.npy', xt) np.save(REALVALUES_FOLDER + extra_string + 'cm.npy', cm) np.save(REALVALUES_FOLDER + extra_string + 'h.npy', h) np.save(REALVALUES_FOLDER + extra_string + 'sk.npy', sk) np.save(REALVALUES_FOLDER + extra_string + 'xk.npy', xk) print "Saved and Completed!" print_new_line()
def lda_matching_performance(tprange=200): # Match to extra extra_plaintexts = np.load(NUMPY_EXTRA_PLAINTEXT_FILE) extra_traces = np.transpose( load_trace_data(filepath=NUMPY_EXTRA_TRACE_FILE, memory_mapped=MEMORY_MAPPED)) print extra_traces.shape # Containers to hold ranks rank_dict = {} for v, length in variable_dict.iteritems(): rank_dict[v] = [[] for x in range(length)] all_ranks = [] trace_average_rank_holder = [] try: for i, plaintext in enumerate(extra_plaintexts): print "Trace {:5}: {}".format(i, plaintext) # Simulate actual values sim = lSimF.LeakageSimulatorAESFurious() print "TODO" break sim.fix_key(KEY) sim.fix_plaintext(plaintext) sim.simulate(read_plaintexts=0, print_all=0, random_plaintexts=0, affect_with_noise=False, hw_leakage_model=False, real_values=True) leakage_dict = sim.get_leakage_dictionary() # For each node in graph, get time point -> corresponding power value from extra trace data # Template match this against MuSigma pairs, get probability distribution # Check to see how high actual value scores trace_average_rank_list = [] for var_name, vlength in variable_dict.iteritems(): time_points = np.load("{}{}.npy".format( TIMEPOINTS_FOLDER, var_name)) for var_number in range(vlength): # Get Time Point time_point = time_points[var_number - 1] # Load linDisAnalysis lda = pickle.load( open( "{}{}_{}_{}.p".format(LDA_FOLDER, tprange, var_name, var_number), "ro")) # Get Trace Data around time point X = extra_traces[i, time_point - (tprange / 2):time_point + (tprange / 2)] # Predict Values predicted_probabilities = lda.predict_proba([X])[0] # Get Actual Values actual_value = ( leakage_dict[var_name][0][var_number]).astype(np.uint8) # Get Rank temp = predicted_probabilities.argsort()[::-1] ranked_dist = np.empty_like(temp) ranked_dist[temp] = np.arange(len(predicted_probabilities)) rank = ranked_dist[actual_value] + 1 # top_ranked = np.where(ranked_dist == 0) max_prob = np.max(predicted_probabilities) # max_index = np.where(predicted_probabilities == max_prob) # if var_name == 't': # print "TEST!" # print "Variable {}{}".format(var_name, var_number) # print "Real Value: {}".format(actual_value) # print "Predicted Probabilities:\n{}\n".format(predicted_probabilities) # print "Top Ranked: {} ({})".format(top_ranked, predicted_probabilities[top_ranked]) # print "CHECK: Max Value: {} ({})".format(max_prob, max_index) # print "Our Rank: {} ({})".format(rank, predicted_probabilities[actual_value]) # exit(1) # Add to Ranked List rank_dict[var_name][var_number - 1].append(rank) all_ranks.append(rank) trace_average_rank_list.append(rank) trace_average_rank_holder.append( get_average(trace_average_rank_list)) # exit(1) except KeyboardInterrupt: pass finally: # Print Statistics for v, l in rank_dict.iteritems(): for i, lst in enumerate(l): print "{}{}:\n".format(v, pad_string_zeros(i + 1)) print_statistics(lst) # ALL print "* ALL NODES RANK STATISTICS *" print_statistics(all_ranks) print "* AVERAGE RANK PER TRACE STATISTICS *" print_statistics(trace_average_rank_holder) pass