예제 #1
0
    def fetch_features(self, trace_set_path):
        '''
        Fethes the features (raw trace and y-values) for a single trace path.
        '''
        # Memoize
        if trace_set_path in self.cache:
            return self.cache[trace_set_path]

        # Apply actions from work()
        result = EMResult(
            task_id=self.request_id)  # Make new collection of results
        ops.process_trace_set_paths(
            result, [trace_set_path],
            self.conf,
            keep_trace_sets=True,
            request_id=self.request_id)  # Store processed trace path in result

        if len(result.trace_sets) > 0:
            signals, values = self._preprocess_trace_set(
                result.trace_sets[0]
            )  # Since we iterate per path, there will be only 1 result in trace_sets

            # Cache
            if (self.max_cache is None) or len(
                    self.cache.keys()) < self.max_cache:
                self.cache[trace_set_path] = (signals, values)

            return signals, values
        else:
            return None
예제 #2
0
파일: rank.py 프로젝트: github16cp/emma
def calculate_traceset_rank(trace_set, key_index, true_key, orig_conf):
    conf = Namespace(subkey=key_index,
                     leakage_model=orig_conf.leakage_model,
                     key_low=orig_conf.key_low,
                     key_high=orig_conf.key_high)
    result = EMResult(task_id=None)

    if orig_conf.loss_type == 'categorical_crossentropy':
        ops.pattack_trace_set(trace_set, result, conf, params=None)
        scores = result.probabilities

        key_scores = np.zeros(256)
        for key_guess in range(0, 256):
            key_scores[key_guess] = np.max(scores[key_guess, :])
    else:
        if conf.leakage_model == LeakageModelType.AES_MULTI or conf.leakage_model == LeakageModelType.AES_MULTI_TEST or conf.leakage_model == LeakageModelType.HAMMING_WEIGHT_SBOX_OH or conf.leakage_model == LeakageModelType.KEY_BITS or conf.leakage_model == LeakageModelType.KEY_OH or conf.leakage_model == LeakageModelType.AES_BITS_EX or conf.leakage_model == LeakageModelType.HMAC_BITS or conf.leakage_model == LeakageModelType.HMAC or conf.leakage_model == LeakageModelType.HMAC_HAMMING_WEIGHT:
            ops.spattack_trace_set(trace_set, result, conf, params=None)
        else:
            ops.attack_trace_set(trace_set, result, conf, params=None)
        scores = result.correlations
        print("Num entries: %d" % scores._n[0][0])

        # Get maximum correlations over all points and interpret as score
        key_scores = np.zeros(256)
        for key_guess in range(0, 256):
            key_scores[key_guess] = np.max(np.abs(scores[key_guess, :]))

    ranks = calculate_ranks(key_scores)
    rank, confidence = get_rank_and_confidence(ranks, key_scores, true_key)

    return rank, confidence
예제 #3
0
def work(self,
         trace_set_paths,
         conf,
         keep_trace_sets=False,
         keep_scores=True,
         keep_ai=False):
    """
    Actions to be performed by workers on the trace set given in trace_set_path.
    """
    resolve_paths(trace_set_paths)  # Get absolute paths

    if type(trace_set_paths) is list:
        result = EMResult(task_id=self.request.id)  # Keep state and results

        # Process trace set paths and fill in results of analysis
        process_trace_set_paths(result,
                                trace_set_paths,
                                conf,
                                request_id=self.request.id,
                                keep_trace_sets=keep_trace_sets)

        if not keep_trace_sets:  # Do not return processed traces
            result.trace_sets = None
            result.reference_signal = None
        if not keep_scores:  # Do not return attack scores
            result.correlations = None
            result.distances = None
        if not keep_ai:
            result.ai = None  # Do not return AI object

        return result
    else:
        logger.error("Must provide a list of trace set paths to worker!")
        return None
예제 #4
0
def merge(self, to_merge, conf):
    if type(to_merge) is EMResult:
        to_merge = [to_merge]

    # Is it useful to merge?
    if len(to_merge) >= 1:
        result = EMResult(task_id=self.request.id)

        # If we are attacking, merge the correlations
        # TODO this can be cleaned up
        if conf_has_op(conf, 'attack') or conf_has_op(
                conf, 'memattack') or conf_has_op(conf, 'spattack'):
            # Get size of correlations
            shape = to_merge[
                0].correlations._n.shape  # TODO fixme init hetzelfde als in attack

            # Init result
            result.correlations = CorrelationList(shape)

            # Start merging
            for m in to_merge:
                result.correlations.merge(m.correlations)
        elif conf_has_op(
                conf, 'dattack'
        ):  # TODO just check for presence of to_merge.distances instead of doing this
            shape = to_merge[0].distances._n.shape
            result.distances = DistanceList(shape)

            for m in to_merge:
                result.distances.merge(m.distances)
        elif conf_has_op(conf, 'pattack'):
            shape = to_merge[0].probabilities.shape
            result.probabilities = np.zeros(shape)

            for m in to_merge:
                result.probabilities += m.probabilities
        elif conf_has_op(conf, 'keyplot'):
            result.means = {}

            tmp = defaultdict(lambda: [])
            for m in to_merge:
                for key, mean_traces in m.means.items():
                    tmp[key].extend(mean_traces)

            for key, mean_traces in tmp.items():
                all_traces = np.array(mean_traces)
                print("Merging %d traces for subkey value %s" %
                      (all_traces.shape[0], key))
                result.means[key] = np.mean(all_traces, axis=0)

        # Clean up tasks
        if conf.remote:
            for m in to_merge:
                logger.warning("Deleting %s" % m.task_id)
                app.AsyncResult(m.task_id).forget()

        return result
    else:
        return None
예제 #5
0
def optimize_capture(self, trace_set_paths, conf):
    """
    Apply PCA in order to obtain transformation that lowers the dimensionality of the input data.

    :param self:
    :param trace_set_paths: List of trace set paths to be used in the PCA fit
    :param conf: EMMA configuration blob
    :return:
    """

    logger.info("Resolving traces")
    resolve_paths(trace_set_paths)

    logger.info("Performing actions")
    result = EMResult()
    process_trace_set_paths(result,
                            trace_set_paths,
                            conf,
                            request_id=None,
                            keep_trace_sets=True)

    logger.info("Extracting signals")
    signals_to_fit = []
    for trace_set in result.trace_sets:
        if not trace_set.windowed:
            logger.warning("Skipping trace_set because not windowed")
            continue

        signals_to_fit.extend([trace.signal for trace in trace_set.traces])
    del result
    signals_to_fit = np.array(signals_to_fit)
    print(signals_to_fit.shape)

    logger.info("Performing PCA")
    pca = PCA(n_components=256)
    pca.fit(signals_to_fit)
    print(pca.explained_variance_ratio_)

    import visualizations
    dummy = traceset.TraceSet(name="test")
    traces = [
        traceset.Trace(signal=x,
                       key=None,
                       plaintext=None,
                       ciphertext=None,
                       mask=None) for x in pca.components_
    ]
    dummy.set_traces(traces)
    visualizations.plot_trace_sets(np.array([0]), [dummy])
    print(pca.singular_values_)

    logger.info("Writing manifest")
    emio.write_emcap_manifest(conf, pca)
예제 #6
0
    def get_all_as_trace_set(self, limit=None):
        if limit is None:
            traces_to_get = self.trace_set_paths
        else:
            traces_to_get = self.trace_set_paths[0:limit]

        result = EMResult(
            task_id=self.request_id)  # Make new collection of results
        ops.process_trace_set_paths(
            result,
            traces_to_get,
            self.conf,
            keep_trace_sets=True,
            request_id=self.request_id)  # Store processed trace path in result

        all_traces = []
        for trace_set in result.trace_sets:
            all_traces.extend(trace_set.traces)

        result = TraceSet(name="all_traces")
        result.set_traces(all_traces)

        return result
예제 #7
0
    def fetch_features_online(self):
        logger.debug("Stream: waiting for packet in queue")
        # Get from blocking queue
        trace_set = self.stream_server.queue.get()

        # Apply work()
        logger.debug("Stream: processing trace set")
        result = EMResult(task_id=self.request_id)
        ops.process_trace_set(result,
                              trace_set,
                              self.conf,
                              keep_trace_sets=False,
                              request_id=self.request_id)

        # Get signals and values
        signals, values = self._preprocess_trace_set(trace_set)

        return signals, values
예제 #8
0
def compress_trace_set(trace_set_path):
    if trace_set_path.endswith('.npy'):
        parent_dataset_path = os.path.dirname(trace_set_path)
        manifest_path = os.path.join(parent_dataset_path, 'manifest.emcap')

        if os.path.exists(manifest_path):
            # Open manifest
            with open(manifest_path, 'rb') as manifest_file:
                manifest = pickle.load(manifest_file)
                conf = manifest['conf']

            # Load trace set
            trace_set = emio.get_trace_set(trace_set_path, 'cw', remote=False)
            conf_delete_action(
                conf, 'optimize_capture'
            )  # Make sure there is no optimize_capture action anymore

            # Add appropriate actions
            if 'pca' in manifest:
                conf.actions.append(Action('pca[%s]' % manifest_path))
            elif 'autoenc' in manifest:
                conf.actions.append(Action('corrtest[autoenc]'))

            # Perform compression
            result = EMResult()
            ops.process_trace_set(result,
                                  trace_set,
                                  conf,
                                  keep_trace_sets=True)
            processed_trace_set = result.trace_sets[0]

            # Save compressed trace set
            processed_trace_set.save(os.path.abspath(parent_dataset_path),
                                     dry=False)
        else:
            raise EMMAException(
                "No manifest.emcap in %s, so don't know how to compress." %
                parent_dataset_path)
    else:
        raise EMMAException("Not a valid traceset_path in numpy format")
예제 #9
0
def basetest(self, trace_set_paths, conf, rank_trace_step=1000, t=10):
    resolve_paths(trace_set_paths)  # Get absolute paths

    if type(trace_set_paths) is list:
        result = EMResult(task_id=self.request.id)  # Keep state and results

        # Process trace set paths
        process_trace_set_paths(result,
                                trace_set_paths,
                                conf,
                                request_id=self.request.id,
                                keep_trace_sets=True)

        all_traces_list = []
        for trace_set in result.trace_sets:
            all_traces_list.extend(trace_set.traces)
        del result

        all_traces = traceset.TraceSet(name="all_traces")
        all_traces.set_traces(all_traces_list)

        num_validation_traces = 60000

        # Perform t-fold base test
        ranks = np.zeros(shape=(10, int(num_validation_traces /
                                        rank_trace_step))) + 256
        confidences = np.zeros(shape=(10,
                                      int(num_validation_traces /
                                          rank_trace_step)))
        for i in range(0, t):
            print("Fold %d" % i)
            # Randomize trace_sets
            random_indices = np.arange(len(all_traces.traces))
            np.random.shuffle(random_indices)
            validation_traces = np.take(all_traces.traces,
                                        random_indices,
                                        axis=0)[0:num_validation_traces]

            # Now, evaluate the rank for increasing number of traces from the validation set (steps of 10)
            for j in range(0, int(num_validation_traces / rank_trace_step)):
                subset = traceset.TraceSet(name="all_traces")
                subset.set_traces(validation_traces[0:(j + 1) *
                                                    rank_trace_step])
                subset.window = Window(begin=0,
                                       end=len(subset.traces[0].signal))
                subset.windowed = True
                r, c = rank.calculate_traceset_rank(subset, 2,
                                                    subset.traces[0].key[2],
                                                    conf)
                ranks[i][j] = r
                confidences[i][j] = c
                print("Rank is %d with confidence %f (%d traces)" %
                      (r, c, (j + 1) * rank_trace_step))

        print(ranks)
        print(confidences)
        data_to_save = {
            'ranks': ranks,
            'confidences': confidences,
            'rank_trace_step': rank_trace_step,
            'folds': t,
            'num_validation_traces': num_validation_traces,
            'conf': conf,
        }
        directory = "./models/%s" % conf_to_id(conf)
        os.makedirs(directory, exist_ok=True)
        pickle.dump(data_to_save,
                    open("%s/basetest-t-ranks.p" % directory, "wb"))
    else:
        logger.error("Must provide a list of trace set paths to worker!")
        return None