num_samples = np.sum(nn_model_ref.Y_train_nn_binaire)
print(X_train_f.shape)
vals, counts = np.unique(X_train, axis=1, return_counts=True)
print(vals.shape, counts.shape)
sv = dict(zip(vals,  counts / num_samples))
print(sv)
"""

Y_eval_proba = nn_model_ref.Y_val_nn_binaire
Y_train_proba = nn_model_ref.Y_train_nn_binaire

#net = AE_binarize(args, X_train_f.shape[1], h1= 250).to(device)

net = NN_linear(args, X_train_f.shape[1]).to(device)

nn_model_ref.net = net
nn_model_ref.X_train_nn_binaire = X_train_f
nn_model_ref.X_val_nn_binaire = X_val_f
#nn_model_ref.Y_train_nn_binaire = X_train_f
#nn_model_ref.Y_val_nn_binaire = X_val_f
"""
args.load_nn_path = "./results/create_synth_masks_v2/speck/5/ctdata0l^ctdata1l_ctdata0r^ctdata1r^ctdata0l^ctdata1l_ctdata0l^ctdata0r_ctdata1l^ctdata1r/2020_07_21_17_26_59_603174/0.9966710913033485_bestacc.pth"
nn_model_ref.net.load_state_dict(torch.load(args.load_nn_path,
                map_location=device)['state_dict'], strict=False)
nn_model_ref.net.to(device)
nn_model_ref.net.eval()
"""
nn_model_ref.train_from_scractch("AE")

for global_sparsity in [0, 0.2, 0.4]:
    print(global_sparsity)
if args.create_new_data_for_ToT and args.create_new_data_for_classifier:
    del nn_model_ref.X_train_nn_binaire, nn_model_ref.X_val_nn_binaire, nn_model_ref.Y_train_nn_binaire, nn_model_ref.Y_val_nn_binaire
    del nn_model_ref.c0l_train_nn, nn_model_ref.c0l_val_nn, nn_model_ref.c0r_train_nn, nn_model_ref.c0r_val_nn
    del nn_model_ref.c1l_train_nn, nn_model_ref.c1l_val_nn, nn_model_ref.c1r_train_nn, nn_model_ref.c1r_val_nn

args.nombre_round_eval = 5
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model,
                             cipher, creator_data_binary,
                             path_save_model_train)
net_f = nn_model_ref.net
net_f.conv0.weight.requires_grad = False
net_f.BN0.bias.requires_grad = False
for i in range(net_f.numLayers - 1):
    net_f.layers_conv[i].weight.requires_grad = False
    net_f.layers_batch[i].weight.requires_grad = False
net_f.fc1.weight.requires_grad = False
net_f.BN5.weight.requires_grad = False
net_f.fc2.weight.requires_grad = False
net_f.BN6.weight.requires_grad = False
#net_f.fc2 = nn.Linear(args.hidden1, args.hidden1)
#net_f.BN6 = nn.BatchNorm1d(args.hidden1, eps=0.01, momentum=0.99)
net_f.fc3 = nn.Linear(args.hidden1, 1)
nn_model_ref2.net = net_f
nn_model_ref2.train_general(name_input)

print("STEP 1 : DONE")
print("---" * 100)
if args.end_after_training:
    sys.exit(1)
nn_model_ref.eval_all(["train", "val"])


net_linear = NN_linear(args)

net_linear.fc1.weight = nn_model_ref.net.fc1.weight.to(device)
net_linear.fc2.weight = nn_model_ref.net.fc2.weight.to(device)
net_linear.fc3.weight = nn_model_ref.net.fc3.weight.to(device)
net_linear.BN5.weight = nn_model_ref.net.BN5.weight.to(device)
net_linear.BN6.weight = nn_model_ref.net.BN6.weight.to(device)

net_all = nn_model_ref.net
del nn_model_ref.net
#change model
nn_model_ref.net = net_linear.to(device)
#change data
nn_model_ref.X_train_nn_binaire = nn_model_ref.all_intermediaire
nn_model_ref.X_val_nn_binaire = nn_model_ref.all_intermediaire_val


del get_masks_gen

args.research_new_masks = True

get_masks_gen = Get_masks_v2(args, nn_model_ref.net, path_save_model, rng, creator_data_binary, device)
if args.research_new_masks:
    #get_masks_gen.create_data()
    get_masks_gen.X_deltaout_train = nn_model_ref.all_intermediaire
    get_masks_gen.X_eval = nn_model_ref.all_intermediaire_val
    get_masks_gen.Y_tf =  nn_model_ref.Y_train_nn_binaire
示例#4
0
for phase in val_phase:
    nn_model_ref.net.eval()
    if nn_model_ref.args.curriculum_learning:
        nn_model_ref.dataloaders[phase].catgeorie = pourcentage
    running_loss = 0.0
    nbre_sample = 0
    TP, TN, FN, FP = torch.zeros(1).long(), torch.zeros(1).long(), torch.zeros(1).long(), torch.zeros(
        1).long()
    tk0 = tqdm(nn_model_ref.dataloaders[phase], total=int(len(nn_model_ref.dataloaders[phase])))
    for i, data in enumerate(tk0):
        inputs, labels = data
        coefall = 0
        for iter_filenames, filenames in enumerate(glob.glob(mypath)):
            coef = all_models_trained["coef"][iter_filenames]
            nn_model_ref.net = all_models_trained[filenames].eval()
            outputs = nn_model_ref.net(inputs.to(nn_model_ref.device))
            if methode1 :
                if iter_filenames==0:
                    outputs_f = coef* outputs
                else:
                    outputs_f += coef * outputs
                coefall +=coef


            #if phase == "train":
            #    data_train1[i*nn_model_ref.batch_size:(i+1)*nn_model_ref.batch_size,iter_filenames] = outputs.squeeze(1).detach().cpu().numpy()
            #else:
            #    data_val1[i*nn_model_ref.batch_size:(i+1)*nn_model_ref.batch_size,iter_filenames] = outputs.squeeze(1).detach().cpu().numpy()

                            )
                        )
    if flag2:
        nn_model_ref.eval_all(["val"])
        flag2 = False
    else:
        nn_model_ref.eval(["val"])
    acc_retain.append(nn_model_ref.acc)

    if args.save_model_prune:
        torch.save({'epoch': 0 , 'acc': 0, 'state_dict': nn_model_ref.net.state_dict()},
                   os.path.join(path_save_model,
                                'Gohr_' + nn_model_ref.args.type_model + '_best_nbre_sampletrain_' + str(
                                    nn_model_ref.args.nbre_sample_train) + "_prunning_"+str(global_sparsity) + '.pth'))


    del nn_model_ref.net
    nn_model_ref.net = nn_model_ref.choose_model()
    nn_model_ref.load_nn()


fig = plt.figure(figsize=(20,20))
ax = plt.axes()
ax.plot(args.values_prunning, acc_retain)
plt.title("model: " +args.model_to_prune)
plt.xlabel("Pourcentage prunning")
plt.ylabel("Accuracy")
plt.savefig(path_save_model + "prunning" + args.model_to_prune.replace('/', "_") + ".png")