Beispiel #1
0
def threaded_run_test(args, prediction, folder, run, network_name,
                      model_params, real_key):
    # Shuffle the data using same permutation  for n_exp and calculate mean for GE of the model
    y = []
    for exp_i in range(args.num_exps):
        # Select permutation
        permutation = args.permutations[exp_i]

        # Shuffle data
        predictions_shuffled = shuffle_permutation(permutation,
                                                   np.array(prediction))
        key_guesses_shuffled = shuffle_permutation(permutation, key_guesses)

        # Test the data
        x_exp, y_exp = test_with_key_guess_p(key_guesses_shuffled,
                                             predictions_shuffled,
                                             attack_size=args.attack_size,
                                             real_key=real_key,
                                             use_hw=args.use_hw)
        y.append(y_exp)

    # Calculate the mean over the experiments
    y = np.mean(y, axis=0)
    if args.use_noise_data:
        save_path = '{}model_r{}_{}_noise{}.exp'.format(
            folder, run, get_save_name(network_name, model_params),
            args.noise_level)
    else:
        save_path = '{}model_r{}_{}.exp'.format(
            folder, run, get_save_name(network_name, model_params))
    print("Save path {}".format(save_path))
    util.save_np(save_path, y, f="%f")
Beispiel #2
0
        def retrieve_ge(net_setting):
            print(model_params)
            ge_x, ge_y, loss_acc = get_ge(network_name, model_params,
                                          net_setting)
            mean_y = np.mean(ge_y, axis=0)
            ranks_x.append(ge_x)
            ranks_y.append(ge_y)
            rank_mean_y.append(mean_y)
            name_models.append(
                util_classes.get_save_name(network_name, model_params))

            (lvl, ltl, lta, lva) = loss_acc
            # loss_vali, loss_train, acc_train, acc_vali

            net_setting['ge_x'].append(ge_x[0])
            net_setting['ge_y'].append(mean_y)

            net_setting['ta'].append(np.mean(lta, axis=0))
            net_setting['va'].append(np.mean(lva, axis=0))
            net_setting['tl'].append(np.mean(ltl, axis=0))
            net_setting['vl'].append(np.mean(lvl, axis=0))
            net_setting['line_title'].append(
                util_classes.get_save_name(network_name, model_params))

            all_loss_acc.append(loss_acc)
Beispiel #3
0
def get_ge(net_name, model_parameters, load_parameters):
    args = util.EmptySpace()
    for key, value in load_parameters.items():
        setattr(args, key, value)
    folder = "/media/rico/Data/TU/thesis/runs{}/{}".format(
        args.experiment, util.generate_folder_name(args))

    ge_x, ge_y = [], []
    lta, lva, ltl, lvl = [], [], [], []
    for run in runs:
        filename = '{}/model_r{}_{}'.format(
            folder, run, get_save_name(net_name, model_parameters))
        ge_path = '{}.exp'.format(filename)

        y_r = util.load_csv(ge_path, delimiter=' ', dtype=np.float)
        x_r = range(len(y_r))
        ge_x.append(x_r)
        ge_y.append(y_r)

        if show_losses or show_acc:
            ta, va, tl, vl = util.load_loss_acc(filename)
            lta.append(ta)
            lva.append(va)
            ltl.append(tl)
            lvl.append(vl)

    return ge_x, ge_y, (lta, lva, ltl, lvl)
Beispiel #4
0
def get_ranks(args):
    # Load the data and make it global
    global x_attack, y_attack, dk_plain, key_guesses
    x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(args)

    model_params = {}
    map_accuracy = {}

    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))
    model_params.update({"max_pool": args.max_pool})
    for channel_size in args.channels:
        model_params.update({"channel_size": channel_size})
        for layers in args.layers:
            model_params.update({"num_layers":  layers})
            for kernel in args.kernels:
                model_params.update({"kernel_size": kernel})

                # Calculate the accuracy
                mean_acc = 0.0
                no_data = False
                for run in range(args.runs):
                    model_path = '{}/model_r{}_{}.pt'.format(
                        folder,
                        run,
                        get_save_name(args.network_name, model_params))
                    if not os.path.exists(model_path):
                        print(util.BColors.WARNING + f"Path {model_path} does not exists" + util.BColors.ENDC)
                        no_data = True
                        break
                    print('path={}'.format(model_path))

                    model = load_model(args.network_name, model_path)
                    model.eval()
                    print("Using {}".format(model))
                    model.to(args.device)

                    # Calculate predictions
                    if require_domain_knowledge(args.network_name):
                        _, acc = accuracy2(model, x_attack, y_attack, dk_plain)
                    else:
                        _, acc = accuracy2(model, x_attack, y_attack, None)
                    print('Accuracy: {} - {}%'.format(acc, acc * 100))
                    acc = acc * 100
                    mean_acc = mean_acc + acc
                if not no_data:
                    mean_acc = mean_acc / float(args.runs)
                    map_accuracy.update({f"c_{channel_size}_l{layers}_k{kernel}": mean_acc})
                    print(util.BColors.WARNING + f"Mean accuracy {mean_acc}" + util.BColors.ENDC)

    if args.noise_level >= 0:
        acc_filename = f"{folder}/acc_{args.network_name}_noise{args.noise_level}.json"
    else:
        acc_filename = f"{folder}/acc_{args.network_name}.json"
    print(acc_filename)
    with open(acc_filename, "w") as acc_file:
        acc_file.write(json.dumps(map_accuracy))
Beispiel #5
0
    def retrieve_ge():
        print(model_params)
        ge_x, ge_y, loss_acc = get_ge(network_name, model_params)
        mean_y = np.mean(ge_y, axis=0)
        ranks_x.append(ge_x)
        ranks_y.append(ge_y)
        rank_mean_y.append(mean_y)
        name_models.append(get_save_name(network_name, model_params))

        all_loss_acc.append(loss_acc)
Beispiel #6
0
def get_ranks(args, network_name, model_params):
    # Load the data and make it global
    global x_attack, y_attack, dk_plain, key_guesses
    x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(
        args, network_name)

    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))

    # Calculate the predictions before hand
    predictions = []
    for run in args.runs:
        model_path = '{}/model_r{}_{}.pt'.format(
            folder, run, get_save_name(network_name, model_params))
        print('path={}'.format(model_path))

        model = load_model(network_name, model_path)
        model.eval()
        print("Using {}".format(model))
        model.to(args.device)

        # Calculate predictions
        if require_domain_knowledge(network_name):
            prediction = accuracy(model, x_attack, y_attack, dk_plain)
            predictions.append(prediction.cpu().numpy())
        else:
            prediction = accuracy(model, x_attack, y_attack, None)
            predictions.append(prediction.cpu().numpy())

    # Check if it is only one run, if so don't do multi threading
    if len(args.runs) == 1:
        threaded_run_test(args, predictions[0], folder, args.runs[0],
                          network_name, model_params, real_key)
    else:
        # Start a thread for each run
        processes = []
        for i, run in enumerate(args.runs):
            p = Process(target=threaded_run_test,
                        args=(args, predictions[i], folder, run, network_name,
                              model_params, real_key))
            processes.append(p)
            p.start()
        # Wait for them to finish
        for p in processes:
            p.join()
            print('Joined process')
Beispiel #7
0
def get_ge(net_name, model_parameters):
    folder = '/media/rico/Data/TU/thesis/runs{}/{}/subkey_{}/{}{}{}_SF{}_' \
             'E{}_BZ{}_LR{}{}{}/train{}/'.format(
                                    '3' if not experiment else '',
                                    str(data_set),
                                    sub_key_index,
                                    '' if unmask else 'masked/',
                                    '' if desync is 0 else 'desync{}/'.format(desync),
                                    type_network,
                                    spread_factor,
                                    epochs,
                                    batch_size,
                                    '%.2E' % Decimal(lr),
                                    '' if np.math.ceil(l2_penalty) <= 0 else '_L2_{}'.format(l2_penalty),
                                    init,

                                    train_size)

    ge_x, ge_y = [], []
    lta, lva, ltl, lvl = [], [], [], []
    for run in runs:
        filename = '{}/model_r{}_{}'.format(
            folder, run, get_save_name(net_name, model_parameters))
        ge_path = '{}.exp'.format(filename)

        y_r = util.load_csv(ge_path, delimiter=' ', dtype=np.float)
        x_r = range(len(y_r))
        ge_x.append(x_r)
        ge_y.append(y_r)

        if show_losses or show_acc:
            ta, va, tl, vl = util.load_loss_acc(filename)
            lta.append(ta)
            lva.append(va)
            ltl.append(tl)
            lvl.append(vl)

    return ge_x, ge_y, (lta, lva, ltl, lvl)
Beispiel #8
0
def get_ge(net_name, model_parameters, load_parameters):
    folder = '/media/rico/Data/TU/thesis/runs{}/{}/subkey_{}/{}{}{}_SF{}_' \
             'E{}_BZ{}_LR{}{}/train{}/'.format(
                                            load_parameters["experiment"],
                                            load_parameters["data_set"],
                                            load_parameters["subkey"],
                                            load_parameters["masked"],
                                            load_parameters["desync"],
                                            load_parameters["hw"],
                                            load_parameters["spread"],
                                            load_parameters["epochs"],
                                            load_parameters["batch_size"],
                                            load_parameters["lr"],
                                            load_parameters["l2"],
                                            load_parameters["train_size"])

    ge_x, ge_y = [], []
    lta, lva, ltl, lvl = [], [], [], []
    for run in runs:
        filename = '{}/model_r{}_{}'.format(
            folder,
            run,
            get_save_name(net_name, model_parameters))
        ge_path = '{}.exp'.format(filename)

        y_r = util.load_csv(ge_path, delimiter=' ', dtype=np.float)
        x_r = range(len(y_r))
        ge_x.append(x_r)
        ge_y.append(y_r)

        if show_losses or show_acc:
            ta, va, tl, vl = util.load_loss_acc(filename)
            lta.append(ta)
            lva.append(va)
            ltl.append(tl)
            lvl.append(vl)

    return ge_x, ge_y, (lta, lva, ltl, lvl)
Beispiel #9
0
    def retrieve_ge(net_setting):
        print(model_params)
        ge_x, ge_y, loss_acc = get_ge(network_name, model_params, net_setting)
        mean_y = np.mean(ge_y, axis=0)
        ranks_x.append(ge_x)
        ranks_y.append(ge_y)
        rank_mean_y.append(mean_y)
        name_models.append(get_save_name(network_name, model_params))
        n_settings.append(net_setting)

        (lta, lva, ltl, lvl) = loss_acc

        net_setting['ge_x'].append(ge_x[0])
        net_setting['ge_y'].append(mean_y)
        net_setting['ta'].append(np.mean(lta, axis=0))
        net_setting['va'].append(np.mean(lva, axis=0))
        net_setting['tl'].append(np.mean(ltl, axis=0))
        net_setting['vl'].append(np.mean(lvl, axis=0))
        net_setting['line_title'].append(
            f"{get_save_name(network_name, model_params)}, l2{model_params['l2_penalty']}"
        )

        all_loss_acc.append(loss_acc)
Beispiel #10
0
def get_ge(net_name, model_parameters):
    folder = "{}/{}".format('/media/rico/Data/TU/thesis/runs/',
                            util.generate_folder_name(args))

    ge_x, ge_y = [], []
    lta, lva, ltl, lvl = [], [], [], []
    for run in runs:
        filename = '{}/model_r{}_{}'.format(
            folder, run, get_save_name(net_name, model_parameters))
        ge_path = '{}.exp'.format(filename)

        y_r = util.load_csv(ge_path, delimiter=' ', dtype=np.float)
        x_r = range(len(y_r))
        ge_x.append(x_r)
        ge_y.append(y_r)

        if show_losses or show_acc:
            ta, va, tl, vl = util.load_loss_acc(filename)
            lta.append(ta)
            lva.append(va)
            ltl.append(tl)
            lvl.append(vl)

    return ge_x, ge_y, (lta, lva, ltl, lvl)
Beispiel #11
0
def get_ranks(args, network_name, model_params, edit_model=disable_filter):
    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))

    # Calculate the predictions before hand
    # TODO: for multiple runs
    model_path = '{}/model_r{}_{}.pt'.format(
        folder, args.run, get_save_name(network_name, model_params))
    print('path={}'.format(model_path))

    if not os.path.exists(f"{model_path}.predictions1.npy"):

        # Load the data and make it global
        global x_attack, y_attack, dk_plain, key_guesses
        x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(
            args, network_name)
        model = load_model(network_name, model_path)
        model.eval()
        model.to(args.device)

        predictions, correct_indices, sum_indices = edit_model(model)

        np_predictions = np.array(predictions)
        np_correct_indices = np.array(correct_indices)
        np_sum_indices = np.array(sum_indices)
        np.save(f"{model_path}.predictions1", np_predictions)
        np.save(f"{model_path}.correct_indices", np_correct_indices)
        np.save(f"{model_path}.sum_indices", np_sum_indices)
        print(sum_indices)
    else:
        predictions = np.load(f"{model_path}.predictions1.npy")
        real_key = util.load_csv('{}/{}/secret_key.csv'.format(
            args.traces_path, str(load_args.data_set)),
                                 dtype=np.int)
        key_guesses = util.load_csv(
            '{}/{}/Value/key_guesses_ALL_transposed.csv'.format(
                args.traces_path, str(load_args.data_set)),
            delimiter=' ',
            dtype=np.int,
            start=load_args.train_size + load_args.validation_size,
            size=load_args.attack_size)

    # Start a thread for each prediction
    groups_of = 7
    for k in range(math.ceil(len(predictions) / float(groups_of))):

        # Start groups of processes
        processes = []
        for i in range(k * groups_of, (k + 1) * groups_of, 1):
            if i >= len(predictions):
                break
            print(f"i: {i}")

            p = Process(target=threaded_run_test,
                        args=(args, predictions[i], folder, args.run,
                              network_name, model_params, real_key, i))
            processes.append(p)
            p.start()

        # Wait for the processes to finish
        for p in processes:
            p.join()
            print('Joined process')
Beispiel #12
0
            "l2_penalty": 0,
            "train_size": 1000,
            "kernel_size": 20,
            "num_layers": 2,
            "channel_size": 16,
            "network_name": "SpreadV3", #""DenseNorm",
            "init_weights": "",
            "run": 0
}

args = util.EmptySpace()
for key, value in settings.items():
    setattr(args, key, value)

folder = "/media/rico/Data/TU/thesis/runs{}/{}".format(args.experiment, util.generate_folder_name(args))
filename = folder + f"/model_r{args.run}_" + util_classes.get_save_name(args.network_name, settings) + ".pt"
model = load_model(args.network_name, filename)

print(model)

x_test, _, _, _, _ = util.load_ascad_test_traces({
    "sub_key_index": 2,
    "desync": 0,
    "traces_path": "/media/rico/Data/TU/thesis/data",
    "unmask": args.unmask,
    "use_hw": args.use_hw
})
x_test = x_test
print(f"Shape x_test {np.shape(x_test)}")
x_test = torch.from_numpy(x_test.astype(np.float32)).to(util.device)
Beispiel #13
0
            str(args['data_set']),
            args['subkey_index'],
            '' if args['unmask'] else 'masked/',
            '' if args['desync'] is 0 else 'desync{}/'.format(args['desync']),
            args['type_network'],
            args['spread_factor'],
            args['epochs'],
            args['batch_size'],
            '%.2E' % Decimal(args['lr']),
            args['train_size'])

# Calculate the predictions before hand
predictions = []
for run in args['runs']:
    model_path = '{}/model_r{}_{}.pt'.format(
        folder, run, get_save_name(network_name, model_params))
    print('path={}'.format(model_path))

    model = load_model(network_name, model_path)

    variables = ["conv1", "conv2", "conv3"]
    for var in variables:
        weights = model.__getattr__(var).weight.data.cpu().numpy()
        ones = np.ones(np.shape(weights))
        zeros = np.zeros(np.shape(weights))

        plus = np.where(weights < THRESHOLD, ones, zeros)
        minus = np.where(-THRESHOLD < weights, ones, zeros)
        z = plus + minus
        res = np.where(z == 2, ones, zeros)
        # print(res)