def test_run_too_much_traces(self):
        """ Tests a run with more traces asked than provided."""

        traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
        plain = np.array([[1], [2], [1], [2]])
        keys = plain

        with self.assertRaises(SystemExit) as cm:
            DataPartitioner.get_traces(traces, keys, plain, 5, 1, 0, 1, 2, 1, 0, True)

        # Test whether the right exit code was used.
        self.assertEqual(cm.exception.code, 1)
def cpa(traces_file: str, keys_file: str, plain_file: str, subkey: int, leakage_model: bool,
        num_traces: int, num_attack_traces: int, num_features: int, feature_select: int, round_: int,
        operation: int, debug_mode: bool, version: int):
    """
    Performs different CPA attacks.

    :param traces_file: the traces file to use
    :param keys_file: the keys file to use
    :param plain_file: the plaintexts file to use
    :param subkey: the subkey index to analyze. Must be in the range [0-15]
    :param leakage_model: the leakage model to use.
    :param round_: the AES round to attack.
    :param num_traces: the amount of traces to analyze starting from trace 0.
    :param num_attack_traces: the amount of attack traces to analyze starting from trace 0.
    :param num_features: the number of features
    :param operation: the AES operation to 'attack', represented as an integer from 0 to 3.
    :param feature_select: which feature select to use
    :param debug_mode: whether to enable debug mode
    :param version: pick which version of CPA you want, the default is offline CPA.
    """

    online, conditional_averaging = False, False
    if version == 1:
        online = True
    elif version == 2:
        conditional_averaging = True

    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)
    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, num_traces, subkey,
                                   0, num_attack_traces, num_features, round_, operation, leakage_model)
    CPA.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext,
            round_, operation, subkey, feature_select, num_features, leakage_model,
            debug_mode_enabled=debug_mode, online=online, conditional_averaging=conditional_averaging)
def ta(traces_file: str, keys_file: str, plain_file: str, points_of_interest: int, pooled: bool, spacing: int,
       subkey: int, num_traces: int, gpu: bool, leakage_model: bool, debug_mode: bool, num_attack_traces: int,
       feature_select: int):
    """
    Performs (pooled) template attack.

    :param traces_file: the traces file to use.
    :param keys_file: the keys file to use.
    :param plain_file: the plaintexts file to use.
    :param points_of_interest: the number of points of interest to extract.
    :param pooled: whether to use pooled attack.
    :param spacing: spacing between the points of interest.
    :param subkey: the subkey index to analyze. Must be in the range [0-16], 16 signaling the whole key to be found out.
    :param num_traces: the amount of traces to analyze starting from trace 0
    :param gpu: whether or not to use gpu for this attack
    :param leakage_model: the leakage model to use
    :param debug_mode: whether to enable debug mode
    :param num_attack_traces: the number of attack traces.
    :param feature_select: which feature select to use, default is none.
    """
    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)

    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, num_traces, subkey,
                                   0, num_attack_traces, 0, 1, 0, leakage_model)

    if not pooled and profiling_traces.shape[0] < profiling_traces.shape[1]:
        print("ERROR: profiling traces are smaller than features, please run Pooled Template Attack instead of"
              " normal Template attack for a more accurate result.")
        exit(1)

    TA.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext,
           pooled, points_of_interest, spacing, subkey, gpu, leakage_model, debug_mode, feature_select)
def sa(traces_file: str, keys_file: str, plain_file: str, round_: int, operation: int, num_traces: int,
       num_attack_traces: int, subkey: int, gpu: bool, leakage_model: bool, num_features: int, feature_select: int,
       debug_mode: bool):
    """
    Performs stochastic attack.

    :param traces_file: the traces file to use.
    :param keys_file: the keys file to use.
    :param plain_file: the plaintexts file to use.
    :param round_: the AES round to attack.
    :param operation: the AES operation to 'attack', represented as an integer from 0 to 3.
    :param num_traces: the amount of traces to analyze starting from trace 0.
    :param num_attack_traces: the amount of attack traces to analyze starting from trace 0.
    :param subkey: the subkey index to analyze. Must be in the range [0-16], 16 signaling the whole key to be found out.
    :param gpu: enables gpu acceleration if set to true.
    :param num_features: number of features to select for.
    :param feature_select: which feature select to use, default is pearson.
    :param leakage_model: the leakage model to use.
    :param debug_mode: whether to enable debug mode
    """

    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)
    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, num_traces, subkey,
                                   0, num_attack_traces, num_features, round_, operation, leakage_model)
    SA.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext,
           round_, operation, len(profiling_traces), num_attack_traces, subkey, feature_select, num_features, gpu,
           leakage_model, debug_mode_enabled=debug_mode)
def mia(traces_file: str, keys_file: str, plain_file: str, subkey: int, leakage_model: bool,
        num_traces: int, num_attack_traces: int, num_features: int, feature_select: int, round_: int,
        operation: int, debug_mode: bool):
    """Performs MIA (Mutual Information Analysis) attack.

    :param traces_file: the traces file to use
    :param keys_file: the keys file to use
    :param plain_file: the plaintexts file to use
    :param subkey: the subkey index to analyze. Must be in the range [0-15]
    :param leakage_model: the leakage model to use.
    :param round_: the AES round to attack.
    :param num_traces: the amount of traces to analyze starting from trace 0.
    :param num_attack_traces: the amount of attack traces to analyze starting from trace 0.
    :param num_features: the number of features
    :param operation: the AES operation to 'attack', represented as an integer from 0 to 3.
    :param feature_select: which feature select to use
    :param debug_mode: whether to enable debug mode
    """
    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)
    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, num_traces, subkey,
                                   0, num_attack_traces, num_features, round_, operation, leakage_model)
    Mia.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext,
            round_, operation, subkey, feature_select,
            num_features, leakage_model, debug_mode_enabled=debug_mode)
    def test_mia_run(self):
        """"This tests the run method of MIA with a reduced data set"""
        traces, keys, plain = FileLoader.main(CONST_DEFAULT_TRACES_FILE,
                                              CONST_DEFAULT_KEYS_FILE,
                                              CONST_DEFAULT_PLAIN_FILE)

        profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
            DataPartitioner.get_traces(traces, keys, plain, 1000, 0,
                                       0, 1000, 10, 1, 0, False)

        expected = np.array([43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
        self.assertTrue(np.array_equal(Mia.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces,
                        attack_keys, attack_plaintext, 1, 0, 0, 1, 10, True), expected))
    def test_basic_run(self):
        """ Tests a run with normal amount of traces to select."""

        traces = np.array([[1, 2, 3], [4, 5, 6],  [7, 0.4, 9], [2, 3, 12]])
        plain = np.array([[1], [2], [1], [2]])
        keys = plain

        profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
            DataPartitioner.get_traces(traces, keys, plain, 2, 1, 0, 1, 2, 1, 0, True)

        # Test whether the dimensions of the returned traces match the expectations.
        self.assertEqual(len(profiling_traces), 2)
        self.assertEqual(len(profiling_traces[0]), 3)
    def run(traces: np.ndarray, keys: np.ndarray, plain: np.ndarray,
            subkey: int, num_features: int, feature_select: int):
        """
        Runs Linear Regression Analysis

        :param traces: the traces to use
        :param keys: the keys to use
        :param plain: the plaintexts corresponding to the traces
        :param subkey: the specific subkey index to calculate.
                       16 is used to indicate that the whole key should be returned
        :param num_features: the number of features to select with feature selection
        :param feature_select: the type of feature selection to use.
        :return: the (sub)key most likely to be the real (sub)key
        """

        print('This performs Linear Regression Analysis')

        lra = LRA(traces, plain)

        lra.bar = progressbar.ProgressBar(
            max_value=2 * lra.KEY_SIZE,
            widgets=progress_bar_util.get_widgets(False))

        result = [0 for _ in range(16)]
        subkey_indices = [subkey]
        if subkey == 16:
            subkey_indices = list(range(16))
            lra.bar.max_value *= 16

        for i in subkey_indices:
            if feature_select != 0:
                feature_indices = DataPartitioner.select_features(
                    traces, keys, plain, i, feature_select, num_features, 1, 0,
                    True)
                lra.traces = traces[:, feature_indices]
                lra.dimension_leakage_points = len(lra.traces[0])

            result[i] = lra.solve_subkey(i)

        print('The final key is: ', result)

        return result
def pia(traces_file: str, keys_file: str, plain_file: str, subkey: int, num_traces: int, num_attack_traces: int,
        round_: int,
        debug_mode: bool):
    """Performs PIA (Perceived Information Analysis).

    :param traces_file: the traces file to use
    :param keys_file: the keys file to use
    :param plain_file: the plaintexts file to use
    :param subkey: the subkey index to analyze. Must be in the range [0-15]
    :param round_: the AES round to attack.
    :param num_traces: the amount of traces to analyze starting from trace 0.
    :param num_attack_traces: the amount of attack traces to analyze starting from trace 0.
    :param num_features: the number of features
    :param feature_select: which feature select to use
    :param debug_mode: whether to enable debug mode
    """
    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)
    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, num_traces, subkey,
                                   0, num_attack_traces, 5000, round_, 0, False)

    Pia.run(profiling_traces, profiling_plaintext, profiling_keys, attack_traces, subkey, debug_mode_enabled=debug_mode)
def dpa(traces_file: str, keys_file: str, plain_file: str, subkey: int, num_attack_traces: int,
        round_: int, operation: int, debug_mode: bool, bit: int, offset: int, single_order: bool):
    """Performs different DPA attacks.

    :param traces_file: the traces file to use
    :param keys_file: the keys file to use
    :param plain_file: the plaintexts file to use
    :param subkey: the subkey index to analyze. Must be in the range [0-15]
    :param round_: the AES round to attack.
    :param num_attack_traces: the amount of attack traces to analyze starting from trace 0.
    :param operation: the AES operation to 'attack', represented as an integer from 0 to 3.
    :param debug_mode: whether to enable debug mode
    :param bit: which bit to attack
    :param offset: the offset to use
    :param single_order: enables single order DPA
    """
    traces, keys, plain = FileLoader.main(traces_file, keys_file, plain_file)
    profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext = \
        DataPartitioner.get_traces(traces, keys, plain, 0, subkey,
                                   0, num_attack_traces, 0, round_, operation, False)
    DPA.run(attack_traces, attack_keys, attack_plaintext,
            round_, operation, subkey, offset=offset, single_order=single_order,
            debug_mode_enabled=debug_mode, bit=bit)
    def run(template_traces: np.ndarray,
            template_keys: np.ndarray,
            template_plaintext: np.ndarray,
            attack_traces: np.ndarray,
            attack_keys: np.ndarray,
            attack_plain: np.ndarray,
            pooled: bool,
            num_points_of_interest: int,
            spacing_points_of_interest: int,
            subkey: int,
            gpu: bool = False,
            leakage_model: bool = True,
            debug_mode_enabled: bool = False,
            feature_select: int = 0) -> np.array:
        """ Method used to select correct version of Template Attack.

        :param template_traces: the traces to use
        :param template_keys: the keys to use
        :param template_plaintext: the plaintexts to use
        :param attack_traces: the traces to use for attacking
        :param attack_keys: the keys to use for attacking
        :param attack_plain: the plaintexts to use for attacking
        :param pooled: whether to do a pooled attack
        :param num_points_of_interest: number of points of interest to use
        :param spacing_points_of_interest: spacing between the points of interest
        :param subkey: the subkey index to analyze. Must be in the range [0-16]. 16 signals the full key.
        :param gpu: whether or not to use gpu for this attack
        :param leakage_model: the leakage model to use
        :param debug_mode_enabled: whether to enable debug mode
        :param feature_select: which feature selection method to use, see main for which number is which.
        :return: array containing the calculated key
        """

        # Init progress bar with rough amount of iterations
        num_subkeys = 16 if subkey == 16 else 1  # Attack takes roughly equal time per subkey so * 16 for whole key
        max_value = len(attack_traces) * 256 * num_subkeys
        bar = progressbar.ProgressBar(
            max_value=max_value,
            widgets=progress_bar_util.get_widgets(debug_mode_enabled))

        ta = TA(template_traces, template_keys, template_plaintext,
                attack_traces, attack_keys, attack_plain)

        indices = []

        if feature_select > 0:

            print("Feature selection is being calculated...")

            if num_subkeys == 16:
                for i in range(16):

                    temp = DataPartitioner.select_features(
                        template_traces, template_keys, template_plaintext, i,
                        feature_select, num_points_of_interest, 1, 0,
                        leakage_model)
                    for j in temp:
                        indices.append(j)
            else:
                # Select at least 10 features
                num_features = max(num_points_of_interest, 10)
                indices = DataPartitioner.select_features(
                    template_traces, template_keys, template_plaintext, subkey,
                    feature_select, num_features, 1, 0, leakage_model)

            ta.template_traces, ta.attack_traces = template_traces[:,
                                                                   indices], attack_traces[:,
                                                                                           indices]

        if pooled:
            result = ta.run_pooled(num_points_of_interest,
                                   spacing_points_of_interest, subkey, bar,
                                   gpu, leakage_model, debug_mode_enabled)
        else:
            result = ta.run_normal(num_points_of_interest,
                                   spacing_points_of_interest, subkey, bar,
                                   gpu, leakage_model, debug_mode_enabled)

        bar.finish()

        print('The final key is: ', result)
        return result
    def solve_subkey(self,
                     subkey: int,
                     use_gpu: bool,
                     bar: progressbar.ProgressBar,
                     feature_select: int,
                     num_features: int,
                     aes_round: int = 1,
                     aes_operation: int = 0,
                     num_traces: int = 5000,
                     num_attack_traces: int = 30,
                     hamming_weight: bool = False):
        """
        This is the over-arching method for finding the key of an AES-encrypted cypher text.
        For detailed explanation of the algorithm see: https://link.springer.com/chapter/10.1007/11545262_3

        :param subkey: the subkey index to analyze. Must be in the range [0-15]
        :param use_gpu: whether to use gpu acceleration
        :param bar: the progressbar to update
        :param aes_round: the AES round to attack
        :param aes_operation: the AES operation to attack, represented as an integer from 0 to 3
        :param num_traces: number of data points to process
        :param num_attack_traces: number of data points to attack
        :param hamming_weight: whether to use the hamming_weight leakage model
        :param feature_select: which feature select method to use.
        :param num_features: the number of features to select
        :return: the calculated subkey corresponding to the subkey index specified
        """

        # Input sanitization
        # pt. 1 : our inputs must not be empty
        if self.profiling_traces.size <= 0 or self.profiling_keys.size <= 0 or self.profiling_plain.size <= 0:
            raise (ValueError(
                "One or more of the following inputs was or were empty:\n-traces\n-keys\n-plain"
            ))
        # pt. 2 : our inputs must be of equal sizes
        if self.profiling_traces.size - 1 != self.profiling_keys.size != self.profiling_plain.size:
            raise (ValueError(
                "One or more of the following inputs were of unequal size:\n-traces\n-keys\n-plain"
            ))
        # pt. 3 : our key consists of 16 subkeys. thus indices range from 0 to 15
        if 0 > subkey or subkey > 15:
            raise (ValueError(
                "Subkey index out of range, should be between 0 and 15"))

        # Feature selection
        if feature_select > 0:
            indices = DataPartitioner.select_features(
                self.profiling_traces, self.profiling_keys,
                self.profiling_plain, subkey, feature_select, num_features,
                aes_round, aes_operation, hamming_weight)
            traces = self.profiling_traces[:, indices]
            attack_traces = self.attack_traces[:, indices]
        else:
            traces = self.profiling_traces
            attack_traces = self.attack_traces

        # Actual method calls
        bit_matrix = self.poa_output(subkey, num_traces, hamming_weight,
                                     aes_round, aes_operation)
        estimates = SA.parameter_estimation(traces,
                                            bit_matrix,
                                            use_gpu,
                                            num_traces=num_traces,
                                            hamming_weight=hamming_weight)

        best_key = self.key_extraction(estimates,
                                       attack_traces,
                                       subkey,
                                       bar,
                                       num_attack_traces=num_attack_traces,
                                       hamming_weight=hamming_weight,
                                       aes_round=aes_round,
                                       aes_operation=aes_operation)

        # Print the necessary stuff while debugging
        debug_string = \
            "Subkey " + str(subkey) + " is: " + str(best_key) + " | first 10 POI indices: " + str(indices[:10])
        self.log_handler.log_to_debug_file(self.log_handler.CONST_SEPARATOR)
        self.log_handler.log_to_debug_progressbar(bar, debug_string)

        return best_key