def merge(self, to_merge, conf): if type(to_merge) is EMResult: to_merge = [to_merge] # Is it useful to merge? if len(to_merge) >= 1: result = EMResult(task_id=self.request.id) # If we are attacking, merge the correlations # TODO this can be cleaned up if conf_has_op(conf, 'attack') or conf_has_op( conf, 'memattack') or conf_has_op(conf, 'spattack'): # Get size of correlations shape = to_merge[ 0].correlations._n.shape # TODO fixme init hetzelfde als in attack # Init result result.correlations = CorrelationList(shape) # Start merging for m in to_merge: result.correlations.merge(m.correlations) elif conf_has_op( conf, 'dattack' ): # TODO just check for presence of to_merge.distances instead of doing this shape = to_merge[0].distances._n.shape result.distances = DistanceList(shape) for m in to_merge: result.distances.merge(m.distances) elif conf_has_op(conf, 'pattack'): shape = to_merge[0].probabilities.shape result.probabilities = np.zeros(shape) for m in to_merge: result.probabilities += m.probabilities elif conf_has_op(conf, 'keyplot'): result.means = {} tmp = defaultdict(lambda: []) for m in to_merge: for key, mean_traces in m.means.items(): tmp[key].extend(mean_traces) for key, mean_traces in tmp.items(): all_traces = np.array(mean_traces) print("Merging %d traces for subkey value %s" % (all_traces.shape[0], key)) result.means[key] = np.mean(all_traces, axis=0) # Clean up tasks if conf.remote: for m in to_merge: logger.warning("Deleting %s" % m.task_id) app.AsyncResult(m.task_id).forget() return result else: return None
def test_update(self): test_array = np.array([[1, 2], [3, 5], [4, 5], [4, 8]]) x = test_array[:, 0] y = test_array[:, 1] clist1 = CorrelationList(1) clist1.update(0, x, y) clist2 = CorrelationList([1, 1]) clist2.update((0, 0), x, y) # Checks self.assertAlmostEqual(clist1[0], np.corrcoef(x, y)[1, 0], places=13) self.assertAlmostEqual(clist2[0, 0], np.corrcoef(x, y)[1, 0], places=13)
def spattack_trace_set(trace_set, result, conf=None, params=None): logger.info("spattack %s" % (str(params) if not params is None else "")) num_keys = conf.key_high - conf.key_low num_outputs_per_key = LeakageModel.get_num_outputs(conf) // num_keys # Init if first time if result.correlations is None: result.correlations = CorrelationList( [256, 1]) # We only have 1 output point (correlation) if not trace_set.windowed: logger.warning("Trace set not windowed. Skipping attack.") return if trace_set.num_traces <= 0: logger.warning("Skipping empty trace set.") return hypotheses = np.empty([256, trace_set.num_traces, num_outputs_per_key]) # 1. Build hypotheses for all 256 possibilities of the key and all traces leakage_model = LeakageModel(conf) for subkey_guess in range(0, 256): for i in range(0, trace_set.num_traces): hypotheses[subkey_guess, i, :] = leakage_model.get_trace_leakages( trace=trace_set.traces[i], key_byte_index=conf.subkey, key_hypothesis=subkey_guess) # 2. Given point j of trace i, calculate the correlation between all hypotheses for i in range(0, trace_set.num_traces): k = conf.subkey - conf.key_low # Get measurements (columns) from all traces for this subkey measurements = trace_set.traces[i].signal[num_outputs_per_key * k:num_outputs_per_key * (k + 1)] # Correlate measurements with 256 hypotheses for subkey_guess in range(0, 256): # Update correlation result.correlations.update((subkey_guess, 0), hypotheses[subkey_guess, i, :], measurements)
def memattack_trace_set(trace_set, result, conf=None, params=None): logger.info("memattack %s" % (str(params) if not params is None else "")) if result.correlations is None: result.correlations = CorrelationList([16, 256, trace_set.window.size]) for byte_idx in range(0, conf.key_high - conf.key_low): for j in range(0, trace_set.window.size): # Get measurements (columns) from all traces measurements = np.empty(trace_set.num_traces) for i in range(0, trace_set.num_traces): measurements[i] = trace_set.traces[i].signal[j] # Correlate measurements with 256 hypotheses for byte_guess in range(0, 256): # Update correlation hypotheses = [hw[byte_guess]] * trace_set.num_traces result.correlations.update((byte_idx, byte_guess, j), hypotheses, measurements)
def attack_trace_set(trace_set, result, conf=None, params=None): """ Perform CPA attack on a trace set. Assumes the traces in trace_set are real time domain signals. """ logger.info("attack %s" % (str(params) if not params is None else "")) if not trace_set.windowed: logger.warning("Trace set not windowed. Skipping attack.") return if trace_set.num_traces <= 0: logger.warning("Skipping empty trace set.") return # Init if first time if result.correlations is None: result.correlations = CorrelationList([256, trace_set.window.size]) hypotheses = np.empty([256, trace_set.num_traces]) # 1. Build hypotheses for all 256 possibilities of the key and all traces leakage_model = LeakageModel(conf) for subkey_guess in range(0, 256): for i in range(0, trace_set.num_traces): hypotheses[subkey_guess, i] = leakage_model.get_trace_leakages( trace=trace_set.traces[i], key_byte_index=conf.subkey, key_hypothesis=subkey_guess) # 2. Given point j of trace i, calculate the correlation between all hypotheses for j in range(0, trace_set.window.size): # Get measurements (columns) from all traces measurements = np.empty(trace_set.num_traces) for i in range(0, trace_set.num_traces): measurements[i] = trace_set.traces[i].signal[j] # Correlate measurements with 256 hypotheses for subkey_guess in range(0, 256): # Update correlation result.correlations.update( (subkey_guess, j), hypotheses[subkey_guess, :], measurements)
def test_max(self): test_array_1 = np.array([[1, 2], [3, 5], [4, 5], [4, 8]]) test_array_2 = np.array([[4, 3], [5, 4], [6, 1], [8, 8]]) test_array_3 = np.array([[-1, 1], [-2, 2], [-3, 3], [-4, 4]]) x1 = test_array_1[:, 0] y1 = test_array_1[:, 1] x2 = test_array_2[:, 0] y2 = test_array_2[:, 1] x3 = test_array_3[:, 0] y3 = test_array_3[:, 1] clist = CorrelationList([1, 3]) clist.update((0, 0), x1, y1) clist.update((0, 1), x2, y2) clist.update((0, 2), x3, y3) max_corr_over_points = np.max(np.abs(clist[0, :])) self.assertEqual(max_corr_over_points, 1.0)
def test_merge(self): test_array_1 = np.array([[1, 2], [3, 5], [4, 5], [4, 8]]) test_array_2 = np.array([[4, 3], [5, 4], [6, 1], [8, 8]]) test_array_check = np.array([[1, 2], [3, 5], [4, 5], [4, 8], [4, 3], [5, 4], [6, 1], [8, 8]]) x1 = test_array_1[:, 0] y1 = test_array_1[:, 1] x2 = test_array_2[:, 0] y2 = test_array_2[:, 1] x_check = test_array_check[:, 0] y_check = test_array_check[:, 1] c1 = CorrelationList(1) c1.update(0, x1, y1) c2 = CorrelationList(1) c2.update(0, x2, y2) c3 = CorrelationList(1) c3.merge(c1) c3.merge(c2) self.assertAlmostEqual(c3[0], np.corrcoef(x_check, y_check)[1, 0], places=13)