def merge(self, to_merge, conf): if type(to_merge) is EMResult: to_merge = [to_merge] # Is it useful to merge? if len(to_merge) >= 1: result = EMResult(task_id=self.request.id) # If we are attacking, merge the correlations # TODO this can be cleaned up if conf_has_op(conf, 'attack') or conf_has_op( conf, 'memattack') or conf_has_op(conf, 'spattack'): # Get size of correlations shape = to_merge[ 0].correlations._n.shape # TODO fixme init hetzelfde als in attack # Init result result.correlations = CorrelationList(shape) # Start merging for m in to_merge: result.correlations.merge(m.correlations) elif conf_has_op( conf, 'dattack' ): # TODO just check for presence of to_merge.distances instead of doing this shape = to_merge[0].distances._n.shape result.distances = DistanceList(shape) for m in to_merge: result.distances.merge(m.distances) elif conf_has_op(conf, 'pattack'): shape = to_merge[0].probabilities.shape result.probabilities = np.zeros(shape) for m in to_merge: result.probabilities += m.probabilities elif conf_has_op(conf, 'keyplot'): result.means = {} tmp = defaultdict(lambda: []) for m in to_merge: for key, mean_traces in m.means.items(): tmp[key].extend(mean_traces) for key, mean_traces in tmp.items(): all_traces = np.array(mean_traces) print("Merging %d traces for subkey value %s" % (all_traces.shape[0], key)) result.means[key] = np.mean(all_traces, axis=0) # Clean up tasks if conf.remote: for m in to_merge: logger.warning("Deleting %s" % m.task_id) app.AsyncResult(m.task_id).forget() return result else: return None
def test_update(self): test_array = np.array([[1, 2], [3, 5], [4, 5], [4, 8]]) x = test_array[:, 0] y = test_array[:, 1] clist1 = DistanceList(1) clist1.update(0, x, y) clist2 = DistanceList([1, 1]) clist2.update((0, 0), x, y) # Checks self.assertAlmostEqual(clist1[0], np.sum(np.abs(x - y)), places=13) self.assertAlmostEqual(clist2[0, 0], np.sum(np.abs(x - y)), places=13)
def dattack_trace_set(trace_set, result, conf=None, params=None): """ Perform CPA attack on a trace set. Assumes the traces in trace_set are real time domain signals. """ logger.info("dattack %s" % (str(params) if not params is None else "")) # Init if first time if result.distances is None: result.distances = DistanceList([256, trace_set.window.size]) if not trace_set.windowed: logger.warning("Trace set not windowed. Skipping attack.") return if trace_set.num_traces <= 0: logger.warning("Skipping empty trace set.") return hypotheses = np.empty([256, trace_set.num_traces]) # 1. Build hypotheses for all 256 possibilities of the key and all traces leakage_model = LeakageModel(conf) for subkey_guess in range(0, 256): for i in range(0, trace_set.num_traces): hypotheses[subkey_guess, i] = leakage_model.get_trace_leakages( trace=trace_set.traces[i], key_byte_index=conf.subkey, key_hypothesis=subkey_guess) # 2. Given point j of trace i, calculate the distance between all hypotheses for j in range(0, trace_set.window.size): # Get measurements (columns) from all traces measurements = np.empty(trace_set.num_traces) for i in range(0, trace_set.num_traces): measurements[i] = trace_set.traces[i].signal[j] # Correlate measurements with 256 hypotheses for subkey_guess in range(0, 256): # Update distamces result.distances.update((subkey_guess, j), hypotheses[subkey_guess, :], measurements)
def test_merge(self): test_array_1 = np.array([[1, 2], [3, 5], [4, 5], [4, 8]]) test_array_2 = np.array([[4, 3], [5, 4], [6, 1], [8, 8]]) test_array_check = np.array([[1, 2], [3, 5], [4, 5], [4, 8], [4, 3], [5, 4], [6, 1], [8, 8]]) x1 = test_array_1[:, 0] y1 = test_array_1[:, 1] x2 = test_array_2[:, 0] y2 = test_array_2[:, 1] x_check = test_array_check[:, 0] y_check = test_array_check[:, 1] c1 = DistanceList(1) c1.update(0, x1, y1) c2 = DistanceList(1) c2.update(0, x2, y2) c3 = DistanceList(1) c3.merge(c1) c3.merge(c2) self.assertAlmostEqual(c3[0], np.sum(np.abs(x_check - y_check)), places=13)