#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#
print("---" * 100)
writer, device, rng, path_save_model, path_save_model_train, name_input = init_all_for_run(
    args)

print("LOAD CIPHER")
print()
cipher = init_cipher(args)
creator_data_binary = Create_data_binary(args, cipher, rng)

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
print("---" * 100)
print("TABLE OF TRUTH")

nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher,
                            creator_data_binary, path_save_model_train)
nn_model_ref.load_nn()

flag2 = True
acc_retain = []
global_sparsity = 0.95
parameters_to_prune = []
for name, module in nn_model_ref.net.named_modules():
    if len(name):
        if name not in ["layers_batch", "layers_conv"]:
            flag = True
            for layer_forbidden in args.layers_NOT_to_prune:
                if layer_forbidden in name:
                    flag = False
            if flag:
                parameters_to_prune.append((module, 'weight'))
コード例 #2
0
    creator_data_binary = Create_data_binary(args, cipher, rng)
    for repeat in range(5):
        cpt += 1
        creator_data_binary = Create_data_binary(args, cipher, rng)

        #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
        print("---" * 100)
        print("STEP 1 : LOAD/ TRAIN NN REF")
        print()
        print("COUNTINUOUS LEARNING: " + str(args.countinuous_learning) +
              " | CURRICULUM LEARNING: " + str(args.curriculum_learning) +
              " | MODEL: " + str(args.type_model))
        print()

        nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model,
                                    cipher, creator_data_binary,
                                    path_save_model_train)

        if args.retain_model_gohr_ref:
            nn_model_ref.train_general(name_input)
        else:
            #nn_model_ref.load_nn()
            try:
                if args.finetunning:
                    nn_model_ref.load_nn()
                    nn_model_ref.train_from_scractch(name_input + "fine-tune")
                #nn_model_ref.eval(["val"])
                else:
                    nn_model_ref.load_nn()
            except:
                print("ERROR")
コード例 #3
0
print("STEP 1 : LOAD/ TRAIN NN REF")
print()
print("COUNTINUOUS LEARNING: "+ str(args.countinuous_learning) +  " | CURRICULUM LEARNING: " +  str(args.curriculum_learning) + " | MODEL: " + str(args.type_model))
print()


import glob
from torch.utils.data import DataLoader


args.load_special = True
mypath = "./results/Res_96/*pth"
all_models_trained = {}
for filenames in glob.glob(mypath):
    print(filenames)
    nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
    args.load_nn_path = filenames
    nn_model_ref.load_nn()
    all_models_trained[filenames] = nn_model_ref.net.eval()

    del nn_model_ref

all_models_trained["coef"] = [1, 1, 1, 1, 1, 1, 1, 1]
#[0.125,0.008, 0.06, 0.5, 0.03, 0.0125, 0.008, 0.25]


nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)


data_train = DataLoader_cipher_binary(nn_model_ref.X_train_nn_binaire, nn_model_ref.Y_train_nn_binaire, nn_model_ref.device)
dataloader_train = DataLoader(data_train, batch_size=nn_model_ref.batch_size,
コード例 #4
0
            nn_model_ref.load_nn()
    except:
        print("ERROR")
        print("NO MODEL AVALAIBLE FOR THIS CONFIG")
        print("CHANGE ARGUMENT retain_model_gohr_ref")
        print()
        sys.exit(1)

if args.create_new_data_for_ToT and args.create_new_data_for_classifier:
    del nn_model_ref.X_train_nn_binaire, nn_model_ref.X_val_nn_binaire, nn_model_ref.Y_train_nn_binaire, nn_model_ref.Y_val_nn_binaire
    del nn_model_ref.c0l_train_nn, nn_model_ref.c0l_val_nn, nn_model_ref.c0r_train_nn, nn_model_ref.c0r_val_nn
    del nn_model_ref.c1l_train_nn, nn_model_ref.c1l_val_nn, nn_model_ref.c1r_train_nn, nn_model_ref.c1r_val_nn

args.nombre_round_eval = 5
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model,
                             cipher, creator_data_binary,
                             path_save_model_train)
net_f = nn_model_ref.net
net_f.conv0.weight.requires_grad = False
net_f.BN0.bias.requires_grad = False
for i in range(net_f.numLayers - 1):
    net_f.layers_conv[i].weight.requires_grad = False
    net_f.layers_batch[i].weight.requires_grad = False
net_f.fc1.weight.requires_grad = False
net_f.BN5.weight.requires_grad = False
net_f.fc2.weight.requires_grad = False
net_f.BN6.weight.requires_grad = False
#net_f.fc2 = nn.Linear(args.hidden1, args.hidden1)
#net_f.BN6 = nn.BatchNorm1d(args.hidden1, eps=0.01, momentum=0.99)
net_f.fc3 = nn.Linear(args.hidden1, 1)
nn_model_ref2.net = net_f
コード例 #5
0
print()
cipher = init_cipher(args)

res_svm_liner = []
res_svm_rbf = []
res_rf_liner = []
res_mlp_rbf = []
res_gohr_rbf = []
res_lgbm_rbf = []

nbre_sample_train = args.nbre_sample_train
#print(args.nbre_sample_train)
args.nbre_sample_train = 10000000
#print(args.nbre_sample_train)
creator_data_binary = Create_data_binary(args, cipher, rng)
nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher,
                            creator_data_binary, path_save_model_train)
nn_model_ref.load_nn()
net = nn_model_ref.net
args.nbre_sample_train = nbre_sample_train
del nn_model_ref, creator_data_binary
cpt = 0
for seed in range(3):
    args.seed = seed
    creator_data_binary = Create_data_binary(args, cipher, rng)
    for repeat in range(5):
        cpt += 1
        nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model,
                                    cipher, creator_data_binary,
                                    path_save_model_train)
        nn_model_ref.net = net
print()
"""nombre_round_eval = args.nombre_round_eval
args.nombre_round_eval = nombre_round_eval - 2
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref2.epochs = 10
nn_model_ref2.train_general(name_input)
args.nombre_round_eval = nombre_round_eval - 1
nn_model_ref3 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref3.epochs = 10
nn_model_ref3.net = nn_model_ref2.net
nn_model_ref3.train_general(name_input)
args.nombre_round_eval = nombre_round_eval
nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref.net = nn_model_ref3.net"""

nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher,
                            creator_data_binary, path_save_model_train)

nn_model_ref.load_nn()
"""try:
    if args.finetunning:
        nn_model_ref.load_nn()
        nn_model_ref.train_from_scractch(name_input + "fine-tune")
    #nn_model_ref.eval(["val"])
    else:
        nn_model_ref.load_nn()
except:
    print("ERROR")
    print("NO MODEL AVALAIBLE FOR THIS CONFIG")
    print("CHANGE ARGUMENT retain_model_gohr_ref")
    print()
    sys.exit(1)"""
コード例 #7
0
nn_model_ref = NN_Model_Ref_8class(args, writer, device, rng, path_save_model,
                                   cipher, creator_data_binary,
                                   path_save_model_train)
nn_model_ref.load_nn()
net_f = nn_model_ref.net.eval()

if args.create_new_data_for_ToT and args.create_new_data_for_classifier:
    del nn_model_ref.X_train_nn_binaire, nn_model_ref.X_val_nn_binaire, nn_model_ref.Y_train_nn_binaire, nn_model_ref.Y_val_nn_binaire
    del nn_model_ref.c0l_train_nn, nn_model_ref.c0l_val_nn, nn_model_ref.c0r_train_nn, nn_model_ref.c0r_val_nn
    del nn_model_ref.c1l_train_nn, nn_model_ref.c1l_val_nn, nn_model_ref.c1r_train_nn, nn_model_ref.c1r_val_nn

del nn_model_ref

args.nombre_round_eval = 5
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model,
                             cipher, creator_data_binary,
                             path_save_model_train)

import time
import torch

val_phase = ['train', 'val']
from torch.utils.data import DataLoader
import glob

args.load_special = True
mypath = "./results/Res_96/*pth"
all_models_trained = {}
"""for filenames in glob.glob(mypath):
    print(filenames)
    #if filenames =="./results/Res_96/0.948724_bestacc.pth":
コード例 #8
0
"""nombre_round_eval = args.nombre_round_eval
args.nombre_round_eval = nombre_round_eval - 2
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref2.epochs = 10
nn_model_ref2.train_general(name_input)
args.nombre_round_eval = nombre_round_eval - 1
nn_model_ref3 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref3.epochs = 10
nn_model_ref3.net = nn_model_ref2.net
nn_model_ref3.train_general(name_input)
args.nombre_round_eval = nombre_round_eval
nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref.net = nn_model_ref3.net"""

nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)



nn_model_ref.load_nn()
try:
    if args.finetunning:
        nn_model_ref.load_nn()
        nn_model_ref.train_from_scractch(name_input + "fine-tune")
    #nn_model_ref.eval(["val"])
    else:
        nn_model_ref.load_nn()
except:
    print("ERROR")
    print("NO MODEL AVALAIBLE FOR THIS CONFIG")
    print("CHANGE ARGUMENT retain_model_gohr_ref")
"""nombre_round_eval = args.nombre_round_eval
args.nombre_round_eval = nombre_round_eval - 2
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref2.epochs = 10
nn_model_ref2.train_general(name_input)
args.nombre_round_eval = nombre_round_eval - 1
nn_model_ref3 = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref3.epochs = 10
nn_model_ref3.net = nn_model_ref2.net
nn_model_ref3.train_general(name_input)
args.nombre_round_eval = nombre_round_eval
nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref.net = nn_model_ref3.net"""

nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)


if args.retain_model_gohr_ref:
    nn_model_ref.train_general(name_input)
else:
    #nn_model_ref.load_nn()
    try:
        if args.finetunning:
            nn_model_ref.load_nn()
            nn_model_ref.train_from_scractch(name_input + "fine-tune")
        #nn_model_ref.eval(["val"])
        else:
            nn_model_ref.load_nn()
    except:
        print("ERROR")
コード例 #10
0
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#
print("---" * 100)
writer, device, rng, path_save_model, path_save_model_train, name_input = init_all_for_run(args)


print("LOAD CIPHER")
print()
cipher = init_cipher(args)
creator_data_binary = Create_data_binary(args, cipher, rng)

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
print("---" * 100)
print("PRUNNING")

nn_model_ref = NN_Model_Ref(args, writer, device, rng, path_save_model, cipher, creator_data_binary, path_save_model_train)
nn_model_ref.load_nn()


flag2 = True
acc_retain=[]
for global_sparsity in args.values_prunning:
    parameters_to_prune = []
    for name, module in nn_model_ref.net.named_modules():
        if len(name):
            if name not in ["layers_batch", "layers_conv"]:
                flag = True
                for layer_forbidden in args.layers_NOT_to_prune:
                    if layer_forbidden in name:
                        flag = False
                if flag:
コード例 #11
0
nn_model_ref = NN_Model_Ref_8class(args, writer, device, rng, path_save_model,
                                   cipher, creator_data_binary,
                                   path_save_model_train)
#nn_model_ref.load_nn()
#net_f = nn_model_ref.net.eval()

if args.create_new_data_for_ToT and args.create_new_data_for_classifier:
    del nn_model_ref.X_train_nn_binaire, nn_model_ref.X_val_nn_binaire, nn_model_ref.Y_train_nn_binaire, nn_model_ref.Y_val_nn_binaire
    del nn_model_ref.c0l_train_nn, nn_model_ref.c0l_val_nn, nn_model_ref.c0r_train_nn, nn_model_ref.c0r_val_nn
    del nn_model_ref.c1l_train_nn, nn_model_ref.c1l_val_nn, nn_model_ref.c1r_train_nn, nn_model_ref.c1r_val_nn

del nn_model_ref

args.nombre_round_eval = 5
nn_model_ref2 = NN_Model_Ref(args, writer, device, rng, path_save_model,
                             cipher, creator_data_binary,
                             path_save_model_train)

import time
import torch

val_phase = ['train', 'val']
from torch.utils.data import DataLoader
import glob

args.load_special = True
mypath = "./results/Res_96/*pth"
all_models_trained = {}
for filenames in glob.glob(mypath):
    print(filenames)
    #if filenames =="./results/Res_96/0.948724_bestacc.pth":