コード例 #1
0
def get_ranks(args):
    # Load the data and make it global
    global x_attack, y_attack, dk_plain, key_guesses
    x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(args)

    model_params = {}
    map_accuracy = {}

    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))
    model_params.update({"max_pool": args.max_pool})
    for channel_size in args.channels:
        model_params.update({"channel_size": channel_size})
        for layers in args.layers:
            model_params.update({"num_layers":  layers})
            for kernel in args.kernels:
                model_params.update({"kernel_size": kernel})

                # Calculate the accuracy
                mean_acc = 0.0
                no_data = False
                for run in range(args.runs):
                    model_path = '{}/model_r{}_{}.pt'.format(
                        folder,
                        run,
                        get_save_name(args.network_name, model_params))
                    if not os.path.exists(model_path):
                        print(util.BColors.WARNING + f"Path {model_path} does not exists" + util.BColors.ENDC)
                        no_data = True
                        break
                    print('path={}'.format(model_path))

                    model = load_model(args.network_name, model_path)
                    model.eval()
                    print("Using {}".format(model))
                    model.to(args.device)

                    # Calculate predictions
                    if require_domain_knowledge(args.network_name):
                        _, acc = accuracy2(model, x_attack, y_attack, dk_plain)
                    else:
                        _, acc = accuracy2(model, x_attack, y_attack, None)
                    print('Accuracy: {} - {}%'.format(acc, acc * 100))
                    acc = acc * 100
                    mean_acc = mean_acc + acc
                if not no_data:
                    mean_acc = mean_acc / float(args.runs)
                    map_accuracy.update({f"c_{channel_size}_l{layers}_k{kernel}": mean_acc})
                    print(util.BColors.WARNING + f"Mean accuracy {mean_acc}" + util.BColors.ENDC)

    if args.noise_level >= 0:
        acc_filename = f"{folder}/acc_{args.network_name}_noise{args.noise_level}.json"
    else:
        acc_filename = f"{folder}/acc_{args.network_name}.json"
    print(acc_filename)
    with open(acc_filename, "w") as acc_file:
        acc_file.write(json.dumps(map_accuracy))
コード例 #2
0
def get_ranks(args, network_name, model_params):
    # Load the data and make it global
    global x_attack, y_attack, dk_plain, key_guesses
    x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(
        args, network_name)

    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))

    # Calculate the predictions before hand
    predictions = []
    for run in args.runs:
        model_path = '{}/model_r{}_{}.pt'.format(
            folder, run, get_save_name(network_name, model_params))
        print('path={}'.format(model_path))

        model = load_model(network_name, model_path)
        model.eval()
        print("Using {}".format(model))
        model.to(args.device)

        # Calculate predictions
        if require_domain_knowledge(network_name):
            prediction = accuracy(model, x_attack, y_attack, dk_plain)
            predictions.append(prediction.cpu().numpy())
        else:
            prediction = accuracy(model, x_attack, y_attack, None)
            predictions.append(prediction.cpu().numpy())

    # Check if it is only one run, if so don't do multi threading
    if len(args.runs) == 1:
        threaded_run_test(args, predictions[0], folder, args.runs[0],
                          network_name, model_params, real_key)
    else:
        # Start a thread for each run
        processes = []
        for i, run in enumerate(args.runs):
            p = Process(target=threaded_run_test,
                        args=(args, predictions[i], folder, run, network_name,
                              model_params, real_key))
            processes.append(p)
            p.start()
        # Wait for them to finish
        for p in processes:
            p.join()
            print('Joined process')
コード例 #3
0
def get_ranks(x_attack, y_attack, key_guesses, real_key, runs, train_size,
              epochs, lr, sub_key_index, attack_size, network_name, batch_size,
              spread_factor, use_hw, data_set_name):
    ranks_x = []
    ranks_y = []

    for run in runs:
        model_path = '/media/rico/Data/TU/thesis/runs2/' \
                     '{}/subkey_{}/{}_SF{}_E{}_BZ{}_LR{}/train{}/model_r{}_{}.pt'.format(
                        data_set_name,
                        sub_key_index,
                        'HW' if use_hw else 'ID',
                        spread_factor,
                        epochs,
                        batch_size,
                        '%.2E' % Decimal(lr),
                        train_size,
                        run,
                        network_name
                        )
        print('path={}'.format(model_path))

        # Load the model
        model = load_model(network_name=network_name, model_path=model_path)
        print("Using {}".format(model))
        model.to(device)

        # permutation = np.random.permutation(x_attack.shape[0])
        # x_attack = shuffle_permutation(permutation, np.array(x_attack))
        # y_attack = shuffle_permutation(permutation, np.array(y_attack))

        x, y = test_with_key_guess(x_attack,
                                   y_attack,
                                   key_guesses,
                                   model,
                                   attack_size=attack_size,
                                   real_key=real_key,
                                   use_hw=use_hw)
        # Add the ranks
        ranks_x.append(x)
        ranks_y.append(y)
    return ranks_x, ranks_y
コード例 #4
0
def get_ranks(args, network_name, model_params, edit_model=disable_filter):
    folder = "{}/{}/".format(args.models_path, generate_folder_name(args))

    # Calculate the predictions before hand
    # TODO: for multiple runs
    model_path = '{}/model_r{}_{}.pt'.format(
        folder, args.run, get_save_name(network_name, model_params))
    print('path={}'.format(model_path))

    if not os.path.exists(f"{model_path}.predictions1.npy"):

        # Load the data and make it global
        global x_attack, y_attack, dk_plain, key_guesses
        x_attack, y_attack, key_guesses, real_key, dk_plain = load_data(
            args, network_name)
        model = load_model(network_name, model_path)
        model.eval()
        model.to(args.device)

        predictions, correct_indices, sum_indices = edit_model(model)

        np_predictions = np.array(predictions)
        np_correct_indices = np.array(correct_indices)
        np_sum_indices = np.array(sum_indices)
        np.save(f"{model_path}.predictions1", np_predictions)
        np.save(f"{model_path}.correct_indices", np_correct_indices)
        np.save(f"{model_path}.sum_indices", np_sum_indices)
        print(sum_indices)
    else:
        predictions = np.load(f"{model_path}.predictions1.npy")
        real_key = util.load_csv('{}/{}/secret_key.csv'.format(
            args.traces_path, str(load_args.data_set)),
                                 dtype=np.int)
        key_guesses = util.load_csv(
            '{}/{}/Value/key_guesses_ALL_transposed.csv'.format(
                args.traces_path, str(load_args.data_set)),
            delimiter=' ',
            dtype=np.int,
            start=load_args.train_size + load_args.validation_size,
            size=load_args.attack_size)

    # Start a thread for each prediction
    groups_of = 7
    for k in range(math.ceil(len(predictions) / float(groups_of))):

        # Start groups of processes
        processes = []
        for i in range(k * groups_of, (k + 1) * groups_of, 1):
            if i >= len(predictions):
                break
            print(f"i: {i}")

            p = Process(target=threaded_run_test,
                        args=(args, predictions[i], folder, args.run,
                              network_name, model_params, real_key, i))
            processes.append(p)
            p.start()

        # Wait for the processes to finish
        for p in processes:
            p.join()
            print('Joined process')
コード例 #5
0
def model_predict(data_loader, data_set, cls_number=17, model_times=0):

    model,_ = load_model(model_name=opt.model,resume=opt.resume,start_epoch=opt.start_epoch,cn=opt.cn, \
               save_dir=opt.save_dir,width=opt.width,start=opt.start,cls_number=cls_number, \
               avg_number=opt.avg_number,gpus=opt.gpus,model_times=model_times,kfold=opt.kfold,train=False)

    model.eval()

    criterion = CrossEntropyLoss()

    test_size = ceil(len(data_set['test']) / data_loader['test'].batch_size)
    test_preds = np.zeros((len(data_set['test'])), dtype=np.int8)
    raw_results = []
    true_label = np.zeros((len(data_set['test'])), dtype=np.int)
    idx = 0
    test_loss = 0
    test_corrects = 0
    for batch_cnt_test, data_test in enumerate(data_loader['test']):
        # print data
        if batch_cnt_test % 100 == 2:
            print("{0}/{1}".format(batch_cnt_test, int(test_size)))
        inputs, labels = data_test
        #print (inputs.size())
        inputs = Variable(inputs.cuda())
        labels = Variable(torch.from_numpy(np.array(labels)).long().cuda())

        # forward
        if opt.crop_five > 1 and opt.cn == 3:
            #print inputs.size()
            bs, ncrops, c, h, w = inputs.size()
            result = model(inputs.view(-1, c, h, w))
            outputs = result.view(bs, ncrops, -1).mean(1)
        elif opt.crop_five > 1 and opt.cn > 3:
            bs, CC, h, w = inputs.size()
            ncrops = CC // opt.cn
            result = model(inputs.view(-1, opt.cn, h, w))
            outputs = result.view(bs, ncrops, -1).mean(1)
        elif opt.crop_five == 1:
            outputs = model(inputs)

        #print(outputs.size())
        #continue

        # statistics
        if isinstance(outputs, list):
            loss = criterion(outputs[0], labels)
            loss += criterion(outputs[1], labels)
            outputs = (outputs[0] + outputs[1]) / 2
        else:
            loss = criterion(outputs, labels)
        _, preds = torch.max(outputs, 1)

        test_loss += loss.item()
        #test_loss += loss.data[0]
        batch_corrects = torch.sum((preds == labels)).item()
        #batch_corrects = torch.sum((preds == labels)).data[0]
        test_corrects += batch_corrects
        raw_result = [prob.tolist() for prob in outputs.data]
        raw_results += raw_result
        test_preds[idx:(idx + labels.size(0))] = preds.cpu().numpy()
        true_label[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
        # statistics
        idx += labels.size(0)
    test_loss = test_loss / test_size
    test_acc = 1.0 * test_corrects / len(data_set['test'])
    test_probs = np.array(raw_results)
    #print test_probs.shape
    np.save(
        'analysis_result/test_probs_' + str(opt.model) + '_' + str(opt.width) +
        '_' + str(opt.batchsize), test_probs)
    print('test-loss: %.4f ||test-acc@1: %.4f' % (test_loss, test_acc))
    return test_preds, test_probs
コード例 #6
0
def run_prediction(filename, output = None, modelTag = 993, viterbi = False, outFormat = 'csv', FULLCONV = True,
                   verbose = True, plot = False):
    """
    Collect the sound files to process and run the prediction on each file
    Parameters
    ----------
    filename : list
        List containing paths to sound files (wav or aiff) or folders containing sound files to
        be analyzed.
    output : str or None
        Path to directory for saving output files. If None, output files will
        be saved to the directory containing the input file.
    model : model to be used for prediction with pre-loaded weights
    viterbi : bool
        Apply viterbi smoothing to the estimated pitch curve. False by default.
    save_activation : bool
        Save the output activation matrix to an .npy file. False by default.
    save_plot: bool
        Save a plot of the output activation matrix to a .png file. False by
        default.
    plot_voicing : bool
        Include a visual representation of the voicing activity detection in
        the plot of the output activation matrix. False by default, only
        relevant if save_plot is True.
    verbose : bool
        Print status messages and keras progress (default=True).
    """

    # load model:
    load_from_json = False

    if(modelTag == 'CREPE'):
        load_from_json = True

    if(load_from_json):
        model = load_model(modelTag, from_json=True)
    else:
        model = load_model(modelTag, FULLCONV = FULLCONV)

    files = []
    for path in filename:
        if os.path.isdir(path):
            found = ([file for file in os.listdir(path) if
                      (file.lower().endswith('.wav') or file.lower().endswith('.aiff') or file.lower().endswith('.aif'))])
            if len(found) == 0:
                print('FCN-f0: No sound files (only wav or aiff supported) found in directory {}'.format(path),
                      file=sys.stderr)
            files += [os.path.join(path, file) for file in found]
        elif os.path.isfile(path):
            if not (path.lower().endswith('.wav') or path.lower().endswith('.aiff') or path.lower().endswith('.aif')):
                print('FCN-f0: Expecting sound file(s) (only wav or aiff supported) but got {}'.format(path),
                      file=sys.stderr)
            else:
                files.append(path)
        else:
            print('FCN-f0: File or directory not found: {}'.format(path),
                  file=sys.stderr)

    if len(files) == 0:
        print('FCN-f0: No sound files found in {} (only wav or aiff supported), aborting.'.format(filename))
        sys.exit(-1)

    for i, file in enumerate(files):
        if verbose:
            print('FCN-f0: Processing {} ... ({}/{})'.format(
                file, i+1, len(files)), file=sys.stderr)
        run_prediction_on_file(file, output=output, model=model, modelTag=modelTag, viterbi=viterbi,
                               outFormat=outFormat, FULLCONV=FULLCONV, plot=plot, verbose=verbose)
    return
コード例 #7
0
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  num_workers=8)
dataloader['val'] = torch.utils.data.DataLoader(valid_dataset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=8)
####print the mean and std of dataset
#print (get_mean_and_std(train_dataset,cn=opt.cn))
#print (get_mean_and_std(valid_dataset,cn=opt.cn))

model, start_epoch = load_model(model_name=opt.model,
                                resume=opt.resume,
                                start_epoch=opt.start_epoch,
                                cn=opt.cn,
                                save_dir=opt.save_dir,
                                width=opt.width,
                                start=opt.start,
                                cls_number=cls_number,
                                avg_number=opt.avg_number)
base_lr = 0.001
weight_decay = 1e-4

load_model_flag = False
if load_model_flag:
    conv1_params = list(map(id, model.conv1.parameters()))
    fc_params = list(map(id, model.fc.parameters()))
    base_params = filter(lambda p: id(p) not in conv1_params + fc_params,
                         model.parameters())
    optimizer = optim.Adam([{'params': base_params},{'params': model.conv1.parameters(), 'lr': base_lr * 10}, \
                            {'params': model.fc.parameters(), 'lr': base_lr * 10}
コード例 #8
0
from utils import pose_utils as util
import torch
import numpy as np
from tqdm import tqdm
from imageio import get_writer
from skimage.io import imsave

opt = Options().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip
opt.use_first_frame = False  #??

dataset = load_dataset(opt)  #CreateDataset(opt)
model = load_model(opt)  #create_model(opt)
data = dataset[0]

prev_frame = torch.zeros_like(data['image'])
start_from = 0
generated = []

for i in tqdm(range(start_from, dataset.clip_length)):
    label = data['label'][i:i + 1]
    inst = None if opt.no_instance else data['inst'][i:i + 1]

    cur_frame = model.inference(label, inst, torch.unsqueeze(prev_frame,
                                                             dim=0))
    prev_frame = cur_frame.data[0]

    imsave('./datasets/cardio_dance_test/test_sync/{:05d}.png'.format(i),
コード例 #9
0
def train_main(x_train,x_test,y_train,y_test,model_times=0):
    if opt.process:
        #x_train = x_train[:1000]
        #x_test = x_test[:100]
        print np.mean(x_train),np.mean(x_test),np.min(x_train),np.min(x_test),np.max(x_train),np.max(x_test)
        x_train,x_test = process_data(x_train,x_test)
        print np.mean(x_train),np.mean(x_test),np.min(x_train),np.min(x_test),np.max(x_train),np.max(x_test)

    if opt.cn == 3:
      data_transforms = {
       'train' : transforms.Compose([
                 transforms.ToPILImage(),
                 transforms.RandomRotation(degrees=45,resample=Image.BICUBIC),
                 #transforms.RandomRotation(degrees=30,resample=Image.BICUBIC),
                 transforms.RandomHorizontalFlip(),
                 transforms.RandomVerticalFlip(),
                 #transforms.ColorJitter(brightness=0.2,contrast=0.2,saturation=0.2, hue=0.2),
                 transforms.RandomResizedCrop(target_size,scale=(0.64,1.0)),
                 #transforms.RandomResizedCrop(target_size,scale=(0.36,1.0)),
                 transforms.ToTensor(),
                 #transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
       ]),
       'val': transforms.Compose([
              transforms.ToPILImage(),
              #transforms.Resize(org_size),
              #transforms.CenterCrop(target_size),
              transforms.ToTensor(),
              #transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
        ])
      }
    else:
      data_transforms = {
        'train' : Compose([
             #RandomRotate((0,45),bound=True),
             RandomRotate((0,45)),
             RandomHVShift(),
             RandomHflip(),
             RandomVflip(),
             RandomErasing(),
             #Resize((target_size,target_size)),
             RandomResizedCrop((target_size,target_size)),
             #Normalize()
         ]),
         'val': Compose([
             Resize((target_size,target_size)),
             #Normalize()
             #CenterCrop((target_size,target_size)),
         ])
      }


    #traindir = r'/media/disk1/fordata/web_server/multiGPU/cccccc/cloud/train/' ##train_dir
    #train_dataset = datasets.ImageFolder(traindir,data_transforms['train'])
    #test_dataset = datasets.ImageFolder(traindir,data_transforms['val'])

    train_x = torch.stack([torch.Tensor(i) for i in x_train])
    train_y = torch.Tensor(y_train)
    #train_y = torch.stack([torch.Tensor(i) for i in y_train])

    val_x = torch.stack([torch.Tensor(i) for i in x_test])
    val_y = torch.Tensor(y_test)
    #val_y = torch.stack([torch.Tensor(i) for i in y_test])


    #train_dataset = torch.utils.data.TensorDataset(train_x,train_y)
    #valid_dataset = torch.utils.data.TensorDataset(val_x,val_y)
    train_dataset = myTensorDataset(train_x,train_y,data_transforms['train'])
    valid_dataset = myTensorDataset(val_x,val_y,data_transforms['val'])

    data_set = {}
    data_set['train'] = train_dataset
    data_set['val'] = valid_dataset

    dataloader = {}
    dataloader['train'] = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
                                                   shuffle=True, num_workers=16)
    dataloader['val'] = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size,
                                                   shuffle=False, num_workers=16)
    ####print the mean and std of dataset
    #print (get_mean_and_std(train_dataset,cn=opt.cn))
    #print (get_mean_and_std(valid_dataset,cn=opt.cn))

    model,start_epoch = load_model(model_name=opt.model,resume=opt.resume,start_epoch=opt.start_epoch,cn=opt.cn, \
                   save_dir=opt.save_dir,width=opt.width,start=opt.start,cls_number=cls_number,avg_number=opt.avg_number, \
                   gpus=opt.gpus,model_times=model_times,kfold=opt.kfold)

    base_lr = opt.baselr
    weight_decay = opt.wd

    load_model_flag = False
    if load_model_flag:
        conv1_params = list(map(id, model.conv1.parameters()))
        fc_params = list(map(id, model.fc.parameters()))
        base_params = filter(lambda p: id(p) not in conv1_params + fc_params,model.parameters())
        optimizer = optim.Adam([{'params': base_params},{'params': model.conv1.parameters(), 'lr': base_lr * 10}, \
                                {'params': model.fc.parameters(), 'lr': base_lr * 10}
                               ], lr=base_lr, weight_decay=weight_decay, amsgrad=True)
    else:
        #optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay, amsgrad=True)
        if opt.optimizer == 'Adam':
            optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay, amsgrad=True)
        elif opt.optimizer == 'SGD':
            optimizer = optim.SGD(model.parameters(), lr=base_lr, weight_decay=weight_decay,momentum=0.9)

    criterion = CrossEntropyLoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.33)
    #exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.5,patience=4)
    #exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones = [10,20,30,40], gamma=0.1)
    #exp_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=5,eta_min=4e-08)

    iter_per_epoch = len(data_set['train']) // opt.batchsize
    #print (iter_per_epoch)
    model_name = opt.model + '_' + str(opt.width) + '_' + str(opt.start) + '_' + str(opt.cn)
    if opt.kfold > 1:
        model_name = str(model_times) + '_' + model_name
    
    train(model,
          model_name=model_name,
          end_epoch=opt.end_epoch,
          start_epoch=start_epoch,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=dataloader,
          save_dir=opt.save_dir,
          cls_number=cls_number,
          print_inter=iter_per_epoch // 4,
          val_inter=iter_per_epoch,
          mixup=opt.mixup,
          label_smoothing=opt.label_smoothing,
          focal_loss=opt.focal_loss
          )
    torch.cuda.empty_cache()
コード例 #10
0
            "train_size": 1000,
            "kernel_size": 20,
            "num_layers": 2,
            "channel_size": 16,
            "network_name": "SpreadV3", #""DenseNorm",
            "init_weights": "",
            "run": 0
}

args = util.EmptySpace()
for key, value in settings.items():
    setattr(args, key, value)

folder = "/media/rico/Data/TU/thesis/runs{}/{}".format(args.experiment, util.generate_folder_name(args))
filename = folder + f"/model_r{args.run}_" + util_classes.get_save_name(args.network_name, settings) + ".pt"
model = load_model(args.network_name, filename)

print(model)

x_test, _, _, _, _ = util.load_ascad_test_traces({
    "sub_key_index": 2,
    "desync": 0,
    "traces_path": "/media/rico/Data/TU/thesis/data",
    "unmask": args.unmask,
    "use_hw": args.use_hw
})
x_test = x_test
print(f"Shape x_test {np.shape(x_test)}")
x_test = torch.from_numpy(x_test.astype(np.float32)).to(util.device)

コード例 #11
0
def get_ranks(use_hw,
              runs,
              train_size,
              epochs,
              lr,
              sub_key_index,
              attack_size,
              rank_step,
              unmask,
              network_name,
              kernel_size_string=""):
    ranks_x = []
    ranks_y = []
    (_, _), (x_attack,
             y_attack), (metadata_profiling,
                         metadata_attack) = load_ascad(trace_file,
                                                       load_metadata=True)
    key_guesses = util.load_csv(
        '/media/rico/Data/TU/thesis/data/ASCAD/key_guesses.csv',
        delimiter=' ',
        dtype=np.int,
        start=0,
        size=attack_size)
    x_attack = x_attack[:attack_size]
    y_attack = y_attack[:attack_size]
    if unmask:
        if use_hw:
            y_attack = np.array([
                y_attack[i] ^ metadata_attack[i]['masks'][0]
                for i in range(len(y_attack))
            ])
        else:
            y_attack = np.array([
                util.HW[y_attack[i] ^ metadata_attack[i]['masks'][0]]
                for i in range(len(y_attack))
            ])
    real_key = metadata_attack[0]['key'][sub_key_index]

    for run in runs:
        folder = '/media/rico/Data/TU/thesis/runs2/{}/subkey_{}/{}{}{}_SF{}_' \
                     'E{}_BZ{}_LR{}/train{}/'.format(
                        str(data_set),
                        sub_key_index,
                        '' if unmask else 'masked/',
                        '' if desync is 0 else 'desync{}/'.format(desync),
                        type_network,
                        spread_factor,
                        epochs,
                        batch_size,
                        '%.2E' % Decimal(lr),
                        train_size)
        model_path = '{}/model_r{}_{}{}.pt'.format(folder, run, network_name,
                                                   kernel_size_string)
        print('path={}'.format(model_path))

        model = load_model(network_name, model_path)
        model.eval()
        print("Using {}".format(model))
        model.to(device)

        # Load additional plaintexts
        dk_plain = None
        if network_name in req_dk:
            dk_plain = metadata_attack[:]['plaintext'][:, sub_key_index]
            dk_plain = hot_encode(dk_plain,
                                  9 if use_hw else 256,
                                  dtype=np.float)

        # Calculate predictions
        predictions = accuracy(model, x_attack, y_attack, dk_plain)
        predictions = predictions.cpu().numpy()

        x, y = [], []
        for exp_i in range(num_exps):
            permutation = permutations[exp_i]

            # Shuffle data
            predictions_shuffled = shuffle_permutation(permutation,
                                                       np.array(predictions))
            key_guesses_shuffled = shuffle_permutation(permutation,
                                                       key_guesses)

            # Test the data
            x_exp, y_exp = test_with_key_guess_p(key_guesses_shuffled,
                                                 predictions_shuffled,
                                                 attack_size=attack_size,
                                                 real_key=real_key,
                                                 use_hw=use_hw)
            x = x_exp
            y.append(y_exp)

        # Calculate the mean over the experimentfs
        y = np.mean(y, axis=0)
        util.save_np('{}/model_r{}_{}{}.exp'.format(folder, run, network_name,
                                                    kernel_size_string),
                     y,
                     f="%f")

        if isinstance(model, SpreadNetIn):
            # Get the intermediate values right after the first fully connected layer
            z = np.transpose(model.intermediate_values2[0])

            # Calculate the mse for the maximum and minimum from these traces and the learned min and max
            min_z = np.min(z, axis=1)
            max_z = np.max(z, axis=1)
            msq_min = np.mean(np.square(min_z - model.tensor_min), axis=None)
            msq_max = np.mean(np.square(max_z - model.tensor_max), axis=None)
            print('msq min: {}'.format(msq_min))
            print('msq max: {}'.format(msq_max))

            # Plot the distribution of each neuron right after the first fully connected layer
            for k in [50]:
                plt.grid(True)
                plt.axvline(x=model.tensor_min[k], color='green')
                plt.axvline(x=model.tensor_max[k], color='green')
                plt.hist(z[:][k], bins=40)

                plt.show()
            exit()

            # Retrieve the intermediate values right after the spread layer,
            # and order them such that each 6 values after each other belong to the neuron of the
            # previous layer
            v = model.intermediate_values
            order = [
                int((x % spread_factor) * 100 + math.floor(x / spread_factor))
                for x in range(spread_factor * 100)
            ]
            inter = []
            for x in range(len(v[0])):
                inter.append([v[0][x][j] for j in order])

            # Calculate the standard deviation of each neuron in the spread layer
            std = np.std(inter, axis=0)
            threshold = 1.0 / attack_size * 10
            print("divby: {}".format(threshold))
            res = np.where(std < threshold, 1, 0)

            # Calculate the mean of each neuron in the spread layer
            mean_res = np.mean(inter, axis=0)
            # mean_res2 = np.where(mean_res < threshold, 1, 0)
            mean_res2 = np.where(mean_res == 0.0, 1, 0)
            print('Sum  std results {}'.format(np.sum(res)))
            print('Sum mean results {}'.format(np.sum(mean_res2)))

            # Check which neurons have a std and mean where it is smaller than threshold
            total_same = 0
            for j in range(len(mean_res2)):
                if mean_res2[j] == 1 and res[j] == 1:
                    total_same += 1
            print('Total same: {}'.format(total_same))

            # Plot the standard deviations
            plt.title('Comparison of networks')
            plt.xlabel('#neuron')
            plt.ylabel('std')
            xcoords = [j * spread_factor for j in range(100)]
            for xc in xcoords:
                plt.axvline(x=xc, color='green')
            plt.grid(True)
            plt.plot(std, label='std')
            plt.figure()

            # Plot the means
            plt.title('Performance of networks')
            plt.xlabel('#neuron')
            plt.ylabel('mean')
            for xc in xcoords:
                plt.axvline(x=xc, color='green')
            plt.grid(True)
            plt.plot(mean_res, label='mean')
            plt.legend()
            plt.show()

        ranks_x.append(x)
        ranks_y.append(y)
    return ranks_x, ranks_y
コード例 #12
0
            '' if args['desync'] is 0 else 'desync{}/'.format(args['desync']),
            args['type_network'],
            args['spread_factor'],
            args['epochs'],
            args['batch_size'],
            '%.2E' % Decimal(args['lr']),
            args['train_size'])

# Calculate the predictions before hand
predictions = []
for run in args['runs']:
    model_path = '{}/model_r{}_{}.pt'.format(
        folder, run, get_save_name(network_name, model_params))
    print('path={}'.format(model_path))

    model = load_model(network_name, model_path)

    variables = ["conv1", "conv2", "conv3"]
    for var in variables:
        weights = model.__getattr__(var).weight.data.cpu().numpy()
        ones = np.ones(np.shape(weights))
        zeros = np.zeros(np.shape(weights))

        plus = np.where(weights < THRESHOLD, ones, zeros)
        minus = np.where(-THRESHOLD < weights, ones, zeros)
        z = plus + minus
        res = np.where(z == 2, ones, zeros)
        # print(res)
        count = np.sum(res, axis=2)

        # ones = np.ones(np.shape(res))
コード例 #13
0
def get_ranks(x_attack,
              y_attack,
              key_guesses,
              runs,
              train_size,
              epochs,
              lr,
              sub_key_index,
              attack_size,
              rank_step,
              unmask,
              network_name,
              kernel_size_string=""):
    ranks_x = []
    ranks_y = []

    for run in runs:
        model_path = '/media/rico/Data/TU/thesis/runs2/' \
                     '{}/subkey_{}/{}_SF{}_E{}_BZ{}_LR{}/train{}/model_r{}_{}{}.pt'.format(
                        data_set_name,
                        sub_key_index,
                        type_network,
                        spread_factor,
                        epochs,
                        batch_size,
                        '%.2E' % Decimal(lr),
                        train_size,
                        run,
                        network_name,
                        kernel_size_string)
        print('path={}'.format(model_path))

        # Load the model
        model = load_model(network_name=network_name, model_path=model_path)
        model.eval()
        print("Using {}".format(model))
        model.to(device)

        # Number of times we test a single model + shuffle the test traces
        num_exps = 100
        x, y = [], []
        for exp_i in range(num_exps):
            permutation = np.random.permutation(x_attack.shape[0])
            # permutation = np.arange(0, x_attack.shape[0])

            x_attack_shuffled = util.shuffle_permutation(
                permutation, np.array(x_attack))
            y_attack_shuffled = util.shuffle_permutation(
                permutation, np.array(y_attack))
            key_guesses_shuffled = util.shuffle_permutation(
                permutation, key_guesses)

            # Check if we need domain knowledge
            dk_plain = None
            if network_name in util.req_dk:
                dk_plain = plain
                dk_plain = util.shuffle_permutation(permutation, dk_plain)

            x_exp, y_exp = test_with_key_guess(x_attack_shuffled,
                                               y_attack_shuffled,
                                               key_guesses_shuffled,
                                               model,
                                               attack_size=attack_size,
                                               real_key=real_key,
                                               use_hw=use_hw,
                                               plain=dk_plain)
            x = x_exp
            y.append(y_exp)

        # Take the mean of the different experiments
        y = np.mean(y, axis=0)
        # Add the ranks
        ranks_x.append(x)
        ranks_y.append(y)
    return ranks_x, ranks_y
コード例 #14
0
from data.load_data import load_dataset
from models.load_model import load_model
import util.util as util
import torch
from imageio import get_writer
import numpy as np
from tqdm import tqdm

opt = DemoTestOptions().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

dataset = load_dataset(opt)
model = load_model(opt)
#if opt.verbose:                                                  # demo changed
#    print(model)

# test whole video sequence
# 20181009: do we use first frame as input?

data = dataset[0]
#if opt.use_first_frame:                                          # demo changed
#    prev_frame = data['image']
#    start_from = 1
#    from skimage.io import imsave
#    imsave('results/ref.png', util.tensor2im(prev_frame))
#    generated = [util.tensor2im(prev_frame)]
if 1:  #else:
    prev_frame = torch.zeros_like(data['image'])
コード例 #15
0
def get_ranks(use_hw,
              runs,
              train_size,
              epochs,
              lr,
              sub_key_index,
              attack_size,
              rank_step,
              unmask,
              network_name,
              kernel_size_string=""):
    ranks_x = []
    ranks_y = []
    (_, _), (x_attack,
             y_attack), (metadata_profiling,
                         metadata_attack) = load_ascad(trace_file,
                                                       load_metadata=True)
    key_guesses = util.load_csv('{}/ASCAD/key_guesses.csv'.format(traces_path),
                                delimiter=' ',
                                dtype=np.int,
                                start=0,
                                size=attack_size)
    x_attack = x_attack[:attack_size]
    y_attack = y_attack[:attack_size]
    if unmask:
        if use_hw:
            y_attack = np.array([
                y_attack[i] ^ metadata_attack[i]['masks'][0]
                for i in range(len(y_attack))
            ])
        else:
            y_attack = np.array([
                util.HW[y_attack[i] ^ metadata_attack[i]['masks'][0]]
                for i in range(len(y_attack))
            ])
    real_key = metadata_attack[0]['key'][sub_key_index]

    for run in runs:
        folder = '{}/{}/subkey_{}/{}{}{}_SF{}_' \
                 'E{}_BZ{}_LR{}/train{}/'.format(
                    models_path,
                    str(data_set),
                    sub_key_index,
                    '' if unmask else 'masked/',
                    '' if desync is 0 else 'desync{}/'.format(desync),
                    type_network,
                    spread_factor,
                    epochs,
                    batch_size,
                    '%.2E' % Decimal(lr),
                    train_size)
        model_path = '{}/model_r{}_{}{}.pt'.format(folder, run, network_name,
                                                   kernel_size_string)
        print('path={}'.format(model_path))

        model = load_model(network_name, model_path)
        model.eval()
        print("Using {}".format(model))
        model.to(device)

        # Load additional plaintexts
        dk_plain = None
        if network_name in req_dk:
            dk_plain = metadata_attack[:]['plaintext'][:, sub_key_index]
            dk_plain = hot_encode(dk_plain,
                                  9 if use_hw else 256,
                                  dtype=np.float)

        # Calculate predictions
        predictions = accuracy(model, x_attack, y_attack, dk_plain)
        predictions = predictions.cpu().numpy()

        # Shuffle the data using same permutation  for n_exp and calculate mean for GE of the model
        x, y = [], []
        for exp_i in range(num_exps):
            permutation = permutations[exp_i]

            # Shuffle data
            predictions_shuffled = shuffle_permutation(permutation,
                                                       np.array(predictions))
            key_guesses_shuffled = shuffle_permutation(permutation,
                                                       key_guesses)

            # Test the data
            x_exp, y_exp = test_with_key_guess_p(key_guesses_shuffled,
                                                 predictions_shuffled,
                                                 attack_size=attack_size,
                                                 real_key=real_key,
                                                 use_hw=use_hw)
            x = x_exp
            y.append(y_exp)

        # Calculate the mean over the experiments
        y = np.mean(y, axis=0)
        util.save_np('{}/model_r{}_{}{}.exp'.format(folder, run, network_name,
                                                    kernel_size_string),
                     y,
                     f="%f")

        ranks_x.append(x)
        ranks_y.append(y)
    return ranks_x, ranks_y