Beispiel #1
0
def groupkeys_trace_set(trace_set, result, conf=None, params=None):
    """
    Group traces by key byte and return the mean trace of each key byte value. Then plot the result.
    :param trace_set: 
    :param result: 
    :param conf: 
    :param params: 
    :return: 
    """
    logger.info("groupkeys %s" % (str(params) if not params is None else ""))

    if not trace_set.windowed:
        logger.warning("Trace set not windowed. Skipping groupkeys.")
        return

    if result.means is None:
        result.means = defaultdict(lambda: [])

    leakage_model = LeakageModel(conf)
    tmp = defaultdict(lambda: [])
    for trace in trace_set.traces:
        leakage = leakage_model.get_trace_leakages(trace, conf.subkey)
        if isinstance(leakage, list):
            for leakage_index in range(len(leakage)):
                key = "(%d,%02x)" % (leakage_index, leakage[leakage_index])
                tmp[key].append(trace.signal)
        else:
            tmp["%02x" % leakage].append(trace.signal)

    for key, traces in tmp.items():
        all_traces = np.array(traces)
        print("Mean of %d traces for key %s (subkey %d)" %
              (all_traces.shape[0], key, conf.subkey))
        result.means[key].append(np.mean(all_traces, axis=0))
Beispiel #2
0
def pattack_trace_set(trace_set, result, conf=None, params=None):
    logger.info("pattack %s" % (str(params) if not params is None else ""))

    num_keys = conf.key_high - conf.key_low
    num_outputs_per_key = LeakageModel.get_num_outputs(conf) // num_keys

    # Init if first time
    if result.probabilities is None:
        result.probabilities = np.zeros(
            [256, 1])  # We have 256 probabilities for values for 1 subkey

    if not trace_set.windowed:
        logger.warning("Trace set not windowed. Skipping attack.")
        return

    if trace_set.num_traces <= 0:
        logger.warning("Skipping empty trace set.")
        return

    hypotheses = np.empty([256, trace_set.num_traces, num_outputs_per_key])

    # 1. Build hypotheses for all 256 possibilities of the key and all traces
    leakage_model = LeakageModel(conf)
    for subkey_guess in range(0, 256):
        for i in range(0, trace_set.num_traces):
            hypotheses[subkey_guess, i, :] = leakage_model.get_trace_leakages(
                trace=trace_set.traces[i],
                key_byte_index=conf.subkey,
                key_hypothesis=subkey_guess)

    # 2. Given point j of trace i, calculate the correlation between all hypotheses
    for i in range(0, trace_set.num_traces):
        k = conf.subkey - conf.key_low

        # Get measurements (columns) from all traces for this subkey
        measurements = trace_set.traces[i].signal[num_outputs_per_key *
                                                  k:num_outputs_per_key *
                                                  (k + 1)]

        # Correlate measurements with 256 hypotheses
        for subkey_guess in range(0, 256):
            # Get sbox[p ^ guess]
            hypo = np.argmax(hypotheses[subkey_guess, i])

            # Get probability of this hypothesis
            proba = measurements[hypo]

            # Update probabilities
            result.probabilities[subkey_guess, 0] += np.log(proba + 0.000001)
Beispiel #3
0
def _get_correlation_loss(conf):
    num_outputs = LeakageModel.get_num_outputs(conf)
    num_keys = conf.key_high - conf.key_low

    def correlation_loss(y_true_raw, y_pred_raw):
        """
        Custom loss function that calculates the Pearson correlation of the prediction with
        the true values over a number of batches.
        """
        if num_outputs > num_keys:
            y_true_raw = K.reshape(y_true_raw, (-1, num_keys))
            y_pred_raw = K.reshape(y_pred_raw, (-1, num_keys))

        # y_true_raw = K.print_tensor(y_true_raw, message='y_true_raw = ')  # Note: print truncating is incorrect in the print_tensor function
        # y_pred_raw = K.print_tensor(y_pred_raw, message='y_pred_raw = ')
        y_true = (
            y_true_raw - K.mean(y_true_raw, axis=0, keepdims=True)
        )  # We are taking correlation over columns, so normalize columns
        y_pred = (y_pred_raw - K.mean(y_pred_raw, axis=0, keepdims=True))

        loss = K.variable(0.0)
        for key_col in range(0, conf.key_high - conf.key_low):  # 0 - 16
            y_key = K.expand_dims(y_true[:, key_col],
                                  axis=1)  # [?, 16] -> [?, 1]
            y_keypred = K.expand_dims(y_pred[:, key_col],
                                      axis=1)  # [?, 16] -> [?, 1]
            denom = K.sqrt(K.dot(K.transpose(y_keypred), y_keypred)) * K.sqrt(
                K.dot(K.transpose(y_key), y_key))
            denom = K.maximum(denom, K.epsilon())
            correlation = K.dot(K.transpose(y_key), y_keypred) / denom
            loss += 1.0 - correlation

        return loss

    return correlation_loss
Beispiel #4
0
def spattack_trace_set(trace_set, result, conf=None, params=None):
    logger.info("spattack %s" % (str(params) if not params is None else ""))

    num_keys = conf.key_high - conf.key_low
    num_outputs_per_key = LeakageModel.get_num_outputs(conf) // num_keys

    # Init if first time
    if result.correlations is None:
        result.correlations = CorrelationList(
            [256, 1])  # We only have 1 output point (correlation)

    if not trace_set.windowed:
        logger.warning("Trace set not windowed. Skipping attack.")
        return

    if trace_set.num_traces <= 0:
        logger.warning("Skipping empty trace set.")
        return

    hypotheses = np.empty([256, trace_set.num_traces, num_outputs_per_key])

    # 1. Build hypotheses for all 256 possibilities of the key and all traces
    leakage_model = LeakageModel(conf)
    for subkey_guess in range(0, 256):
        for i in range(0, trace_set.num_traces):
            hypotheses[subkey_guess, i, :] = leakage_model.get_trace_leakages(
                trace=trace_set.traces[i],
                key_byte_index=conf.subkey,
                key_hypothesis=subkey_guess)

    # 2. Given point j of trace i, calculate the correlation between all hypotheses
    for i in range(0, trace_set.num_traces):
        k = conf.subkey - conf.key_low

        # Get measurements (columns) from all traces for this subkey
        measurements = trace_set.traces[i].signal[num_outputs_per_key *
                                                  k:num_outputs_per_key *
                                                  (k + 1)]

        # Correlate measurements with 256 hypotheses
        for subkey_guess in range(0, 256):
            # Update correlation
            result.correlations.update((subkey_guess, 0),
                                       hypotheses[subkey_guess,
                                                  i, :], measurements)
Beispiel #5
0
def classify_trace_set(trace_set, result, conf=None, params=None):
    logger.info("classify %s" % (str(params) if not params is None else ""))

    if trace_set.windowed:
        leakage_model = LeakageModel(conf)
        for trace in trace_set.traces:
            true_value = np.argmax(
                leakage_model.get_trace_leakages(
                    trace, conf.subkey))  # Get argmax of one-hot true label
            predicted_value = np.argmax(
                trace.signal
            )  # Get argmax of prediction from corrtest (previous step)
            result.labels.append(true_value)
            result.predictions.append(predicted_value)
            logprobs = ai.softmax_np(np.array(trace.signal))
            result.logprobs.append(list(logprobs))
    else:
        logger.error(
            "The trace set must be windowed before classification can take place because a fixed-size input tensor is required by Tensorflow."
        )
Beispiel #6
0
def dattack_trace_set(trace_set, result, conf=None, params=None):
    """
    Perform CPA attack on a trace set. Assumes the traces in trace_set are real time domain signals.
    """
    logger.info("dattack %s" % (str(params) if not params is None else ""))

    # Init if first time
    if result.distances is None:
        result.distances = DistanceList([256, trace_set.window.size])

    if not trace_set.windowed:
        logger.warning("Trace set not windowed. Skipping attack.")
        return

    if trace_set.num_traces <= 0:
        logger.warning("Skipping empty trace set.")
        return

    hypotheses = np.empty([256, trace_set.num_traces])

    # 1. Build hypotheses for all 256 possibilities of the key and all traces
    leakage_model = LeakageModel(conf)
    for subkey_guess in range(0, 256):
        for i in range(0, trace_set.num_traces):
            hypotheses[subkey_guess, i] = leakage_model.get_trace_leakages(
                trace=trace_set.traces[i],
                key_byte_index=conf.subkey,
                key_hypothesis=subkey_guess)

    # 2. Given point j of trace i, calculate the distance between all hypotheses
    for j in range(0, trace_set.window.size):
        # Get measurements (columns) from all traces
        measurements = np.empty(trace_set.num_traces)
        for i in range(0, trace_set.num_traces):
            measurements[i] = trace_set.traces[i].signal[j]

        # Correlate measurements with 256 hypotheses
        for subkey_guess in range(0, 256):
            # Update distamces
            result.distances.update((subkey_guess, j),
                                    hypotheses[subkey_guess, :], measurements)
Beispiel #7
0
    def _preprocess_trace_set(self, trace_set):
        """
        Preprocess trace_set specifically for AICorrNet
        """

        # Get model inputs (usually the trace signal)
        signals = AIInput(self.conf).get_trace_set_inputs(trace_set)

        # Get model labels (key byte leakage values to correlate / analyze)
        values = LeakageModel(self.conf).get_trace_set_leakages(trace_set)

        return signals, values
Beispiel #8
0
    def __init__(self, conf, input_dim, name="aicorrnet"):
        super(AICorrNet, self).__init__(conf, name)

        #optimizer = keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        #optimizer = keras.optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, decay=0.0)
        if self.cnn:
            optimizer = keras.optimizers.Nadam(lr=conf.lr / 10.0)
        else:
            optimizer = keras.optimizers.Nadam(lr=conf.lr)
        #optimizer = keras.optimizers.Adadelta()

        if not self.cnn:
            self.model = Sequential()
            #initializer = keras.initializers.Constant(value=1.0/input_dim)
            #initializer = keras.initializers.Constant(value=0.5)
            #initializer = keras.initializers.Constant(value=1.0)
            #initializer = keras.initializers.RandomUniform(minval=0, maxval=1.0, seed=None)
            #initializer = keras.initializers.RandomUniform(minval=0, maxval=0.001, seed=None)
            initializer = 'glorot_uniform'
            #constraint = Clip()
            constraint = None

            # Hidden layers
            for i in range(0, self.n_hidden_layers):
                hidden_nodes = conf.n_hidden_nodes
                self.model.add(Dense(hidden_nodes, input_dim=input_dim, use_bias=self.use_bias, activation=None, kernel_initializer=initializer, kernel_regularizer=str_to_reg(self.reg, self.reg_lambda)))
                input_dim=hidden_nodes
                if self.batch_norm:
                    self.model.add(BatchNormalization(momentum=self.momentum))
                self.model.add(str_to_activation(self.activation))

            # Output layer
            extra_outputs = 1 if conf.loss_type == 'correlation_special' else 0
            self.model.add(Dense(LeakageModel.get_num_outputs(conf) + extra_outputs, input_dim=input_dim, use_bias=self.use_bias, activation=None, kernel_initializer=initializer, kernel_constraint=constraint, kernel_regularizer=str_to_reg(self.regfinal, self.reg_lambda)))
            if self.batch_norm:
                self.model.add(BatchNormalization(momentum=0.1))
            self.model.add(str_to_activation(self.activation))
        else:
            from ASCAD_train_models import cnn_best_nosoftmax
            self.model = cnn_best_nosoftmax(input_shape=(input_dim, 1), classes=conf.key_high - conf.key_low)

        # Compile model
        self.model.compile(optimizer=optimizer, loss=self.loss, metrics=[])

        # Custom callbacks
        self.callbacks['tensorboard'] = CustomTensorboard(log_dir='/tmp/keras/' + self.name + '-' + self.id, freq=self.metric_freq)

        if not conf.norank:
            self.callbacks['rank'] = rank.CorrRankCallback(conf, '/tmp/keras/' + self.name + '-' + self.id + '/rank/', save_best=True, save_path=self.model_path)
Beispiel #9
0
class SignalLeakageAIInput(AIInput):
    input_type = AIInputType.SIGNAL_LEAKAGE

    def __init__(self, conf):
        super().__init__(conf)
        self.leakage_model = LeakageModel(conf)

    def get_trace_inputs(self, trace):
        leakages = []

        for k in range(16):
            leakage = self.leakage_model.get_trace_leakages(trace, k)
            if isinstance(leakage, list) or isinstance(leakage, np.ndarray):
                leakages.extend(list(leakage))
            else:
                leakages.append(leakage)
        leakages = np.array(leakages)

        return np.concatenate((trace.signal, leakages))
Beispiel #10
0
    def test_corrtrain_correlation_multi(self):
        from leakagemodels import LeakageModel
        """
        Artificial example to test AICorrNet and trace processing with multiple leakage values and multiple subkeys.
        """

        # ------------------------------
        # Generate data
        # ------------------------------
        traces = [  # Contains abs(trace). Shape = [trace, point]
            [1, 1, 1, -15],
            [-4, 2, 2, -12],
            [10, 3, 3, 8],
            [8, 1, 1, -14],
            [9, 0, -3, 8],
        ]

        plaintexts = [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        keys = [
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        # Convert to numpy
        traces = np.array(traces)
        plaintexts = np.array(plaintexts)
        keys = np.array(keys)

        trace_set = TraceSet(name='test',
                             traces=traces,
                             plaintexts=plaintexts,
                             keys=keys)

        # ------------------------------
        # Preprocess data
        # ------------------------------
        conf = Namespace(
            max_cache=0,
            augment_roll=False,
            augment_noise=False,
            normalize=False,
            traces_per_set=4,
            online=False,
            dataset_id='qa',
            cnn=False,
            leakage_model=LeakageModelType.AES_MULTI,
            input_type=AIInputType.SIGNAL,
            augment_shuffle=True,
            n_hidden_layers=1,
            n_hidden_nodes=256,
            activation='leakyrelu',
            metric_freq=100,
            regularizer=None,
            reglambda=0.001,
            model_suffix=None,
            use_bias=True,
            batch_norm=True,
            hamming=False,
            key_low=1,
            key_high=3,
            loss_type='correlation',
            lr=0.001,
            epochs=5000,
            batch_size=512,
            norank=False,
        )
        it_dummy = AICorrSignalIterator([],
                                        conf,
                                        batch_size=10000,
                                        request_id=None,
                                        stream_server=None)
        x, y = it_dummy._preprocess_trace_set(trace_set)

        # ------------------------------
        # Train and obtain encodings
        # ------------------------------
        model = ai.AICorrNet(conf, input_dim=4, name="test")
        print(model.info())
        rank_cb = rank.CorrRankCallback(conf,
                                        '/tmp/deleteme/',
                                        save_best=False,
                                        save_path=None)
        rank_cb.set_trace_set(trace_set)

        if model.using_regularization:
            print(
                "Warning: cant do correlation loss test because regularizer will influence loss function"
            )
            return

        # Find optimal weights
        print("The x (EM samples) and y (leakage model values) are:")
        print(x)
        print(y)
        print(
            "When feeding x through the model without training, the encodings become:"
        )
        print(model.predict(x))
        print("Training now")
        model.train_set(x,
                        y,
                        save=False,
                        epochs=conf.epochs,
                        extra_callbacks=[rank_cb])
        print("Done training")

        # Get the encodings of the input data using the same approach used in ops.py corrtest (iterate over rows)
        result = []
        for i in range(0, x.shape[0]):
            result.append(
                model.predict(np.array([x[i, :]], dtype=float))[0]
            )  # Result contains sum of points such that corr with y[key_index] is maximal for all key indices. Shape = [trace, 16]
        result = np.array(result)
        print(
            "When feeding x through the model after training, the encodings for key bytes %d to %d become:\n %s"
            % (conf.key_low, conf.key_high, str(result)))

        # ------------------------------
        # Check loss function
        # ------------------------------
        # Evaluate the model to get the loss for the encodings
        predicted_loss = model.model.evaluate(x, y, verbose=0)

        # Manually calculate the loss using numpy to verify that we are learning a correct correlation
        calculated_loss = 0
        num_keys = (conf.key_high - conf.key_low)
        num_outputs = LeakageModel.get_num_outputs(conf) // num_keys
        for i in range(0, num_keys):
            subkey_hws = y[:, i * num_outputs:(i + 1) * num_outputs]
            subkey_encodings = result[:, i * num_outputs:(i + 1) * num_outputs]
            print("Subkey %d HWs   : %s" % (i + conf.key_low, str(subkey_hws)))
            print("Subkey %d encodings: %s" %
                  (i + conf.key_low, str(subkey_encodings)))
            y_key = subkey_hws.reshape([-1, 1])
            y_pred = subkey_encodings.reshape([-1, 1])
            print("Flattened subkey %d HWs   : %s" %
                  (i + conf.key_low, str(y_key)))
            print("Flattened subkey %d encodings: %s" %
                  (i + conf.key_low, str(y_pred)))

            # Calculate correlation (numpy approach)
            corr_key_i = np.corrcoef(y_pred[:, 0], y_key[:, 0],
                                     rowvar=False)[1, 0]
            print("corr_num: %s" % corr_key_i)

            calculated_loss += 1.0 - corr_key_i

        print("These values should be close:")
        print("Predicted loss: %s" % str(predicted_loss))
        print("Calculated loss: %s" % str(calculated_loss))
        self.assertAlmostEqual(predicted_loss, calculated_loss, places=2)
Beispiel #11
0
 def __init__(self, conf):
     super().__init__(conf)
     self.leakage_model = LeakageModel(conf)