def roll_trace_set(trace_set, result, conf, params=None): if params is None: # If no parameters provided, window according to reference signal roll_window = Window(begin=0, end=len(conf.reference_signal)) else: roll_window = Window(begin=int(params[0]), end=int(params[1])) for trace in trace_set.traces: trace.signal = np.roll( trace.signal, np.random.randint(roll_window.begin, roll_window.end))
def corrtest_trace_set(trace_set, result, conf=None, params=None): if trace_set.windowed: # Get params if params is None: model_type = "aicorrnet" # TODO model_type can be inferred from conf. Therefore change AI to only require conf. else: model_type = str(params[0]) if result.ai is None: result.ai = models.AI(conf, model_type) result.ai.load() # Fetch inputs from trace_set x = AIInput(conf).get_trace_set_inputs(trace_set) # Get encodings of signals encodings = result.ai.predict(x) # Replace original signal with encoding assert (encodings.shape[0] == len(trace_set.traces)) for i in range(0, len(trace_set.traces)): trace_set.traces[i].signal = encodings[i] # Adjust window size trace_set.window = Window(begin=0, end=encodings.shape[1]) trace_set.windowed = True else: logger.error( "The trace set must be windowed before testing can take place because a fixed-size input tensor is required by Tensorflow." )
def window_trace_set(trace_set, result, conf, params=None): """ Perform windowing on a specific trace set. See https://en.wikipedia.org/wiki/Window_function#Spectral_analysis for a good overview of the effects of the different windowing methods on the PSD of the signal. The trace is windowed according to conf.window.size, or according to the size of the reference trace if the window is not configured. Interesting excerpt: 'What cannot be seen from the graphs is that the rectangular window has the best noise bandwidth, which makes it a good candidate for detecting low-level sinusoids in an otherwise white noise environment. Interpolation techniques, such as zero-padding and frequency-shifting, are available to mitigate its potential scalloping loss.' Params: (window start, window end) """ windowing_method = conf.windowing_method # Default windowing if params is None: # If no parameters provided, window according to reference signal window = Window(begin=0, end=len(conf.reference_signal)) else: window = Window(begin=int(params[0]), end=int(params[1])) if len(params) > 2: # Override windowing windowing_method = params[2] for trace in trace_set.traces: length_diff = len(trace.signal[window.begin:]) - window.size # Pad or cut if length_diff < 0: trace.signal = np.lib.pad(trace.signal[window.begin:], (0, abs(length_diff)), 'constant', constant_values=(0.0)) else: trace.signal = trace.signal[window.begin:window.end] assert (len(trace.signal) == window.size) # Apply window if windowing_method == 'rectangular': continue # Already cut rectangularly elif windowing_method == 'kaiser': trace.signal = trace.signal * np.kaiser(window.size, 14) elif windowing_method == 'blackman': trace.signal = trace.signal * np.blackman(window.size) else: logger.warning( "Requested unknown windowing method '%d'. Skipping." % windowing_method) return trace_set.windowed = True trace_set.window = window
def align_trace_set(trace_set, result, conf, params=None): """ Align a set of traces based on a single reference trace using cross-correlation. If a trace is empty, it is discarded. """ prefilter = False if params is None: # If no parameters provided, assume percent% max offset percent = 0.30 length = len(conf.reference_signal) end = int(length - length * percent) begin = int(0 + length * percent) window = Window(begin=begin, end=end) else: window = Window(begin=int(params[0]), end=int(params[1])) if len(params) > 2: prefilter = bool(params[2]) logger.info("Aligning %d traces" % len(trace_set.traces)) aligned_trace_set = [] reference = conf.reference_signal[window.begin:window.end] discarded = 0 for trace in trace_set.traces: aligned_trace = align(trace.signal, reference, cutoff=conf.butter_cutoff, order=conf.butter_order, prefilter=prefilter) if not aligned_trace is None: trace.signal = aligned_trace aligned_trace_set.append(trace) else: discarded += 1 if discarded > 0: logger.warning("Discarded %d unable to align traces." % discarded) trace_set.set_traces(np.array(aligned_trace_set))
def pca_trace_set(trace_set, result, conf=None, params=None): if result.pca is None: if params is None: params = ['manifest.emcap'] with open( params[0], 'rb') as f: # TODO fix path to make this more general (param?) manifest = pickle.load(f) result.pca = manifest['pca'] for trace in trace_set.traces: trace.signal = result.pca.transform([trace.signal])[0] assert (len(trace.signal) == result.pca.n_components) trace_set.windowed = True trace_set.window = Window(begin=0, end=result.pca.n_components)
def on_epoch_end(self, epoch, logs=None): logs = logs or {} if epoch % self.metric_freq != 0 or epoch == 0: return if self.trace_set is not None: # Fetch inputs from trace_set x = AIInput(self.conf).get_trace_set_inputs(self.trace_set) if self.cnn: x = np.expand_dims(x, axis=-1) encodings = self.model.predict(x) # Output: [?, 16] # Store encodings as fake traceset keys = np.array([trace.key for trace in self.trace_set.traces]) plaintexts = np.array( [trace.plaintext for trace in self.trace_set.traces]) fake_ts = TraceSet(traces=encodings, plaintexts=plaintexts, keys=keys, name="fake_ts") fake_ts.window = Window(begin=0, end=encodings.shape[1]) fake_ts.windowed = True for i in range(self.key_low, self.key_high): if len(set(keys[:, i])) > 1: print( "Warning: nonidentical key bytes detected. Skipping rank calculation" ) print("Subkey %d:" % i) print(keys[:, i]) break rank, confidence = calculate_traceset_rank( fake_ts, i, keys[0][i], self.conf ) # TODO: It is assumed here that all true keys of the test set are the same self._save_best_rank_model(rank, confidence) logs['rank %d' % i] = rank logs['confidence %d' % i] = confidence #self._save_best_rank_model(np.mean(ranks)) else: print("Warning: no trace_set supplied to RankCallback")
def get_ascad_trace_set(uri): """ Given a URI, convert ASCAD data to a TraceSet object. """ trace_set = None # Process URI path, _, group_subset = uri.partition("#") group, _, index = group_subset.partition("[") index = index.rstrip("]") min_index, _, max_index = index.partition(":") min_index = int(min_index) max_index = int(max_index) with h5py.File(path, "r") as h5file: h5group = h5file[group] traces = h5group["traces"][min_index:max_index] plaintexts = h5group["metadata"][min_index:max_index]["plaintext"] keys = h5group["metadata"][min_index:max_index]["key"] masks = h5group["metadata"][min_index:max_index]["masks"] traces = np.array(traces) plaintexts = np.array(plaintexts) keys = np.array(keys) masks = np.array(masks) trace_set = TraceSet(name=uri, traces=traces, plaintexts=plaintexts, ciphertexts=None, keys=keys, masks=masks) trace_set.window = Window(begin=0, end=len(trace_set.traces[0].signal)) trace_set.windowed = True return trace_set
def basetest(self, trace_set_paths, conf, rank_trace_step=1000, t=10): resolve_paths(trace_set_paths) # Get absolute paths if type(trace_set_paths) is list: result = EMResult(task_id=self.request.id) # Keep state and results # Process trace set paths process_trace_set_paths(result, trace_set_paths, conf, request_id=self.request.id, keep_trace_sets=True) all_traces_list = [] for trace_set in result.trace_sets: all_traces_list.extend(trace_set.traces) del result all_traces = traceset.TraceSet(name="all_traces") all_traces.set_traces(all_traces_list) num_validation_traces = 60000 # Perform t-fold base test ranks = np.zeros(shape=(10, int(num_validation_traces / rank_trace_step))) + 256 confidences = np.zeros(shape=(10, int(num_validation_traces / rank_trace_step))) for i in range(0, t): print("Fold %d" % i) # Randomize trace_sets random_indices = np.arange(len(all_traces.traces)) np.random.shuffle(random_indices) validation_traces = np.take(all_traces.traces, random_indices, axis=0)[0:num_validation_traces] # Now, evaluate the rank for increasing number of traces from the validation set (steps of 10) for j in range(0, int(num_validation_traces / rank_trace_step)): subset = traceset.TraceSet(name="all_traces") subset.set_traces(validation_traces[0:(j + 1) * rank_trace_step]) subset.window = Window(begin=0, end=len(subset.traces[0].signal)) subset.windowed = True r, c = rankcallbacks.calculate_traceset_rank( subset, 2, subset.traces[0].key[2], conf) ranks[i][j] = r confidences[i][j] = c print("Rank is %d with confidence %f (%d traces)" % (r, c, (j + 1) * rank_trace_step)) print(ranks) print(confidences) data_to_save = { 'ranks': ranks, 'confidences': confidences, 'rank_trace_step': rank_trace_step, 'folds': t, 'num_validation_traces': num_validation_traces, 'conf': conf, } directory = "./models/%s" % conf_to_id(conf) os.makedirs(directory, exist_ok=True) pickle.dump(data_to_save, open("%s/basetest-t-ranks.p" % directory, "wb")) else: logger.error("Must provide a list of trace set paths to worker!") return None
def sum_trace_set(trace_set, result, conf=None, params=None): for trace in trace_set.traces: trace.signal = np.array([np.sum(trace.signal)]) trace_set.windowed = True trace_set.window = Window(begin=0, end=1)