Ejemplo n.º 1
0
    def _resolve_conflicts(self, conf):
        # Overrides
        if conf_has_op(conf, 'rwindow'):
            conf.max_cache = 0  # rwindow randomly shifts traces, so we cannot cache these traces during training

        # Sanity checks
        if conf.refset and not conf_has_op(conf, 'align'):
            raise EMMAConfException("Refset specified, but no align action")
        if conf.key_low >= conf.key_high:
            raise EMMAConfException("key_low should be < key_high")
        if self.dataset_val is not None and str(self.dataset.id) == str(
                self.dataset_val.id):
            raise EMMAConfException(
                "Validation set should never be the same as the training set.")
        if conf_has_op(conf, 'keyplot'):
            if not conf_has_op(
                    conf, 'groupkeys'
            ):  # Transparently add a groupkeys op, which is required for keyplot
                conf.actions = conf.actions[0:-1] + [Action('groupkeys')
                                                     ] + conf.actions[-1:]
        if conf_has_op(conf, 'corrtrain'):
            if conf.loss_type == 'softmax_crossentropy' and conf.key_high - conf.key_low != 1:
                raise EMMAConfException(
                    "Softmax crossentropy with multiple key bytes is currently not supported"
                )
Ejemplo n.º 2
0
def __perform_plot(emma, *params):
    trace_sets_to_get = max(
        int(emma.conf.plot_num_traces / emma.dataset.traces_per_set), 1)
    em_result = submit_task(
        ops.work,  # Op
        emma.dataset.trace_set_paths[0:trace_sets_to_get],
        emma.conf,
        keep_trace_sets=True,
        keep_scores=False,  # Op parameters
        remote=emma.conf.remote,
        message="Performing actions")

    visualizations.plot_trace_sets(
        em_result.reference_signal,
        em_result.trace_sets,
        params=params,
        no_reference_plot=emma.conf.plot_no_reference,
        num_traces=emma.conf.plot_num_traces,
        title=emma.conf.plot_title,
        xlabel=emma.conf.plot_xlabel,
        ylabel=emma.conf.plot_ylabel,
        colorbar_label=emma.conf.plot_colorbar_label,
        time_domain=(not (conf_has_op(emma.conf, 'spec')
                          or conf_has_op(emma.conf, 'fft')))
        or emma.conf.plot_force_timedomain,
        sample_rate=1.0)
Ejemplo n.º 3
0
def __perform_keyplot(emma, message="Grouping keys..."):
    if emma.conf.remote:
        async_result = parallel_work(emma.dataset.trace_set_paths, emma.conf)
        em_result = wait_until_completion(async_result, message=message)
    else:
        em_result = ops.work(emma.dataset.trace_set_paths, emma.conf)
        em_result = ops.merge(em_result, emma.conf)

    visualizations.plot_keyplot(
        em_result.means,
        time_domain=(not (conf_has_op(emma.conf, 'spec')
                          or conf_has_op(emma.conf, 'fft')))
        or emma.conf.plot_force_timedomain,
        sample_rate=1.0,
        show=True)
Ejemplo n.º 4
0
def __perform_keyplot(emma, message="Grouping keys..."):
    for subkey in range(emma.conf.key_low, emma.conf.key_high):
        emma.conf.subkey = subkey  # Set in conf, so the workers know which subkey to attack

        if emma.conf.remote:
            async_result = parallel_work(emma.dataset.trace_set_paths,
                                         emma.conf)
            em_result = wait_until_completion(async_result, message=message)
        else:
            em_result = ops.work(emma.dataset.trace_set_paths, emma.conf)
            em_result = ops.merge(em_result, emma.conf)

        visualizations.plot_keyplot(
            em_result.means,
            time_domain=(not (conf_has_op(emma.conf, 'spec')
                              or conf_has_op(emma.conf, 'fft')))
            or emma.conf.plot_force_timedomain,
            sample_rate=1.0,
            show=True)
Ejemplo n.º 5
0
def merge(self, to_merge, conf):
    if type(to_merge) is EMResult:
        to_merge = [to_merge]

    # Is it useful to merge?
    if len(to_merge) >= 1:
        result = EMResult(task_id=self.request.id)

        # If we are attacking, merge the correlations
        # TODO this can be cleaned up
        if conf_has_op(conf, 'attack') or conf_has_op(
                conf, 'memattack') or conf_has_op(conf, 'spattack'):
            # Get size of correlations
            shape = to_merge[
                0].correlations._n.shape  # TODO fixme init hetzelfde als in attack

            # Init result
            result.correlations = CorrelationList(shape)

            # Start merging
            for m in to_merge:
                result.correlations.merge(m.correlations)
        elif conf_has_op(
                conf, 'dattack'
        ):  # TODO just check for presence of to_merge.distances instead of doing this
            shape = to_merge[0].distances._n.shape
            result.distances = DistanceList(shape)

            for m in to_merge:
                result.distances.merge(m.distances)
        elif conf_has_op(conf, 'pattack'):
            shape = to_merge[0].probabilities.shape
            result.probabilities = np.zeros(shape)

            for m in to_merge:
                result.probabilities += m.probabilities
        elif conf_has_op(conf, 'keyplot'):
            result.means = {}

            tmp = defaultdict(lambda: [])
            for m in to_merge:
                for key, mean_traces in m.means.items():
                    tmp[key].extend(mean_traces)

            for key, mean_traces in tmp.items():
                all_traces = np.array(mean_traces)
                print("Merging %d traces for subkey value %s" %
                      (all_traces.shape[0], key))
                result.means[key] = np.mean(all_traces, axis=0)

        # Clean up tasks
        if conf.remote:
            for m in to_merge:
                logger.warning("Deleting %s" % m.task_id)
                app.AsyncResult(m.task_id).forget()

        return result
    else:
        return None
Ejemplo n.º 6
0
    def _setup(self, emma_conf):
        """
        Get a list of relative trace set paths for the dataset identifier and retrieve
        a reference signal for the entire dataset.

        Example trace set paths:
        em-arduino/trace1.npy
        em-arduino/trace2.npy
        ...
        em-arduino/tracen.npy

        Where trace1.npy is loaded as the reference signal.

        At a later time, the relative paths need to be resolved to absolute paths
        on the workers.
        """
        settings = configparser.RawConfigParser()
        settings.read('settings.conf')
        self.root = settings.get("Datasets", "datasets_path")

        # Assign trace set paths
        if self.format == "cw":  # .npy
            path = join(self.root, self.id)
            self.trace_set_paths = sorted([
                join(self.id, f) for f in listdir(path)
                if isfile(join(path, f)) and '_traces.npy' in f
            ])
        elif self.format == "sigmf":  # .meta
            self.trace_set_paths = None
            raise NotImplementedError
        elif self.format == "gnuradio":  # .cfile
            self.trace_set_paths = None
            raise NotImplementedError
        elif self.format == "ascad":  # ASCAD .h5
            # Hack to force split between validation and training set in ASCAD
            validation_set = join(
                self.root,
                'ASCAD/ASCAD_data/ASCAD_databases/%s.h5-val' % self.id)
            training_set = join(
                self.root,
                'ASCAD/ASCAD_data/ASCAD_databases/%s.h5-train' % self.id)

            # Make sure we never use training set when attacking or classifying
            if emma_conf is not None and (
                    conf_has_op(emma_conf, 'attack')
                    or conf_has_op(emma_conf, 'classify')
                    or conf_has_op(emma_conf, 'dattack')
                    or conf_has_op(emma_conf, 'spattack')
                    or conf_has_op(emma_conf, 'pattack')):
                self.trace_set_paths = [validation_set]
            else:
                self.trace_set_paths = [validation_set, training_set]
        else:
            raise Exception("Unknown input format '%s'" % self.format)

        assert (len(self.trace_set_paths) > 0)

        # Assign reference signal
        reference_trace_set = emio.get_trace_set(join(self.root,
                                                      self.trace_set_paths[0]),
                                                 self.format,
                                                 ignore_malformed=False,
                                                 remote=False)

        self.traces_per_set = len(reference_trace_set.traces)
        self.reference_signal = reference_trace_set.traces[
            self.reference_index].signal