コード例 #1
0
def corrtest_trace_set(trace_set, result, conf=None, params=None):
    logger.info("corrtest %s" % (str(params) if not params is None else ""))
    if trace_set.windowed:
        # Get params
        if params is None:
            model_type = "aicorrnet"  # TODO model_type can be inferred from conf. Therefore change AI to only require conf.
        else:
            model_type = str(params[0])

        if result.ai is None:
            result.ai = ai.AI(conf, model_type)
            result.ai.load()

        # Fetch inputs from trace_set
        x = AIInput(conf).get_trace_set_inputs(trace_set)

        # Get encodings of signals
        encodings = result.ai.predict(x)

        # Replace original signal with encoding
        assert (encodings.shape[0] == len(trace_set.traces))
        for i in range(0, len(trace_set.traces)):
            trace_set.traces[i].signal = encodings[i]

        # Adjust window size
        trace_set.window = Window(begin=0, end=encodings.shape[1])
        trace_set.windowed = True
    else:
        logger.error(
            "The trace set must be windowed before testing can take place because a fixed-size input tensor is required by Tensorflow."
        )
コード例 #2
0
ファイル: aiiterators.py プロジェクト: zhihuishuwp/emma
    def _preprocess_trace_set(self, trace_set):
        """
        Preprocess trace_set specifically for AutoEncoder
        """

        # Get model inputs (usually the trace signal)
        signals = AIInput(self.conf).get_trace_set_inputs(trace_set)

        return signals, signals  # Model output is same as model input
コード例 #3
0
ファイル: aiiterators.py プロジェクト: zhihuishuwp/emma
    def _preprocess_trace_set(self, trace_set):
        """
        Preprocess trace_set specifically for AICorrNet
        """

        # Get model inputs (usually the trace signal)
        signals = AIInput(self.conf).get_trace_set_inputs(trace_set)

        # Get model labels (key byte leakage values to correlate / analyze)
        values = LeakageModel(self.conf).get_trace_set_leakages(trace_set)

        return signals, values
コード例 #4
0
ファイル: rank.py プロジェクト: github16cp/emma
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        if epoch % self.metric_freq != 0 or epoch == 0:
            return
        if self.trace_set is not None:
            # Fetch inputs from trace_set
            x = AIInput(self.conf).get_trace_set_inputs(self.trace_set)

            if self.cnn:
                x = np.expand_dims(x, axis=-1)

            encodings = self.model.predict(x)  # Output: [?, 16]

            # Store encodings as fake traceset
            keys = np.array([trace.key for trace in self.trace_set.traces])
            plaintexts = np.array(
                [trace.plaintext for trace in self.trace_set.traces])
            fake_ts = TraceSet(traces=encodings,
                               plaintexts=plaintexts,
                               keys=keys,
                               name="fake_ts")
            fake_ts.window = Window(begin=0, end=encodings.shape[1])
            fake_ts.windowed = True

            for i in range(self.key_low, self.key_high):
                if len(set(keys[:, i])) > 1:
                    print(
                        "Warning: nonidentical key bytes detected. Skipping rank calculation"
                    )
                    print("Subkey %d:" % i)
                    print(keys[:, i])
                    break
                rank, confidence = calculate_traceset_rank(
                    fake_ts, i, keys[0][i], self.conf
                )  # TODO: It is assumed here that all true keys of the test set are the same
                self._save_best_rank_model(rank, confidence)
                logs['rank %d' % i] = rank
                logs['confidence %d' % i] = confidence
            #self._save_best_rank_model(np.mean(ranks))
        else:
            print("Warning: no trace_set supplied to RankCallback")
コード例 #5
0
def salvis(self, trace_set_paths, model_type, vis_type, conf):
    """
    Visualize the salience of an AI.
    :param self:
    :param trace_set_paths: List of trace set paths to be used as possible examples for the saliency visualization.
    :param model_type: Type of model to load for this configuration.
    :param conf: Configuration of the model (required preprocessing actions, architecture, etc.).
    :return:
    """
    logger.info("Loading model")
    model = ai.AI(conf, model_type)
    model.load()

    logger.info("Resolving traces")
    resolve_paths(trace_set_paths)
    examples_iterator, _ = aiiterators.get_iterators_for_model(
        model_type,
        trace_set_paths, [],
        conf,
        hamming=conf.hamming,
        subtype=None,
        request_id=self.request.id)

    logger.info("Retrieving batch of examples")
    trace_set = examples_iterator.get_all_as_trace_set(
        limit=int(conf.saliency_num_traces / 256))
    examples_batch = AIInput(conf).get_trace_set_inputs(trace_set)
    examples_batch = examples_batch[0:conf.saliency_num_traces, :]
    if len(examples_batch.shape) != 2:
        raise ValueError(
            "Expected 2D examples batch for saliency visualization.")

    if conf.saliency_remove_bias:
        examples_batch = examples_batch[:, 1:]
    kerasvis = True if vis_type == 'kerasvis' else False

    return SalvisResult(examples_batch=examples_batch,
                        gradients=saliency.get_gradients(conf,
                                                         model,
                                                         examples_batch,
                                                         kerasvis=kerasvis))