Exemplo n.º 1
0
        print("STEP 3 : DONE")
        print("---" * 100)
        #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
        print("STEP 4 : CREATE DATA PROBA AND CLASSIFY")
        print()
        print("NEW DATA: " + str(args.create_new_data_for_classifier))
        print()

        generator_data = Genrator_data_prob_classifier(
            args, nn_model_ref.net, path_save_model, rng, creator_data_binary,
            device, get_masks_gen.masks, nn_model_ref)
        generator_data.create_data_g(table_of_truth)
        #EVALUATE GOHR NN ON NEW DATASET
        nn_model_ref.X_train_nn_binaire = generator_data.X_bin_train
        nn_model_ref.X_val_nn_binaire = generator_data.X_bin_val
        nn_model_ref.Y_train_nn_binaire = generator_data.Y_create_proba_train
        nn_model_ref.Y_val_nn_binaire = generator_data.Y_create_proba_val

        if args.eval_nn_ref:
            nn_model_ref.eval_all(["train", "val"])
        all_clfs = All_classifier(args, path_save_model, generator_data,
                                  get_masks_gen, nn_model_ref, table_of_truth,
                                  cpt)
        #all_clfs.X_train_proba = np.concatenate((all_clfs.X_train_proba, X_feat_temp), axis = 1)
        #all_clfs.X_eval_proba =  np.concatenate((all_clfs.X_eval_proba, X_feat_temp_val), axis = 1)
        all_clfs.classify_all()

        if args.quality_of_masks:
            qm = Quality_masks(args, path_save_model, generator_data,
                               get_masks_gen, nn_model_ref, table_of_truth,
print("---" * 100)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
print("STEP 4 : CREATE DATA PROBA AND CLASSIFY")
print()
print("NEW DATA: "+ str(args.create_new_data_for_classifier))
print()

generator_data = Genrator_data_prob_classifier(args, nn_model_ref.net, path_save_model, rng, creator_data_binary, device, get_masks_gen.masks, nn_model_ref)
generator_data.create_data_g(table_of_truth)
args.inputs_type = ["ctdata0l", "ctdata0r", "ctdata1l", "ctdata1r"]
nn_model_ref.load_nn()
#EVALUATE GOHR NN ON NEW DATASET
liste_inputs = creator_data_binary.convert_data_inputs(args, generator_data.c0l_create_proba_train, generator_data.c0r_create_proba_train, generator_data.c1l_create_proba_train, generator_data.c1r_create_proba_train)
nn_model_ref.X_train_nn_binaire = creator_data_binary.convert_to_binary(liste_inputs);
liste_inputs = creator_data_binary.convert_data_inputs(args, generator_data.c0l_create_proba_val, generator_data.c0r_create_proba_val, generator_data.c1l_create_proba_val, generator_data.c1r_create_proba_val)
nn_model_ref.X_val_nn_binaire = creator_data_binary.convert_to_binary(liste_inputs);
nn_model_ref.Y_train_nn_binaire = generator_data.Y_create_proba_train
nn_model_ref.Y_val_nn_binaire = generator_data.Y_create_proba_val

if args.eval_nn_ref:
    nn_model_ref.eval_all(["train", "val"])
all_clfs = All_classifier(args, path_save_model, generator_data, get_masks_gen, nn_model_ref, table_of_truth)
#all_clfs.X_train_proba = np.concatenate((all_clfs.X_train_proba, X_feat_temp), axis = 1)
#all_clfs.X_eval_proba =  np.concatenate((all_clfs.X_eval_proba, X_feat_temp_val), axis = 1)
all_clfs.classify_all()

if args.quality_of_masks:
    qm = Quality_masks(args, path_save_model, generator_data, get_masks_gen, nn_model_ref, table_of_truth, all_clfs)
    qm.start_all()
else:
    qm = None
        #data = data_train #np.array(nn_model_ref2.intermediaires[phase]).astype(np.uint8).reshape(num1 * nn_model_ref2.batch_size, -1)
        #data2 = scaler1.fit_transform(data)
        nn_model_ref2.all_intermediaire = data_train
        #nn_model_ref2.outputs_proba_train = np.array(nn_model_ref2.outputs_proba[phase]).astype(np.float16).reshape(num1 * nn_model_ref2.batch_size, -1)
        #nn_model_ref2.outputs_pred_train = np.array(nn_model_ref2.outputs_pred[phase]).astype(np.float16).reshape(num1 * nn_model_ref2.batch_size, -1)
        #if not nn_model_ref2.args.retrain_nn_ref:
        #del nn_model_ref2.all_intermediaire, data_train

    else:
        #scaler2 = StandardScaler()
        #data = data_val
        #data = np.array(nn_model_ref2.intermediaires[phase]).astype(np.uint8).reshape(num1 * nn_model_ref2.batch_size, -1)
        #data2 = scaler2.fit_transform(data)
        nn_model_ref2.all_intermediaire_val = data_val
        #nn_model_ref2.outputs_proba_val = np.array(nn_model_ref2.outputs_proba[phase]).astype(np.float16).reshape(num2 * nn_model_ref2.batch_size, -1)
        #nn_model_ref2.outputs_pred_val = np.array(nn_model_ref2.outputs_pred[phase]).astype(np.float16).reshape(
        #    num2 * nn_model_ref2.batch_size, -1)
        #if not nn_model_ref2.args.retrain_nn_ref:
        #del nn_model_ref2.all_intermediaire_val, data_val
        del nn_model_ref2.dataloaders[phase]

for i in range(4):
    print(data_train_X[data_train_preds == i].shape)
    print(data_train_labels[data_train_preds == i].shape)

    nn_model_ref2.X_train_nn_binaire = data_train_X[data_train_preds == i]
    nn_model_ref2.X_val_nn_binaire = data_val_X[data_val_preds == i]
    nn_model_ref2.Y_train_nn_binaire = data_train_labels[data_train_preds == i]
    nn_model_ref2.Y_val_nn_binaire = data_val_labels[data_val_preds == i]
    nn_model_ref2.train_general(name_input)
    nn_model_ref2.choose_model()
vals, counts = np.unique(X_train, axis=1, return_counts=True)
print(vals.shape, counts.shape)
sv = dict(zip(vals,  counts / num_samples))
print(sv)
"""

Y_eval_proba = nn_model_ref.Y_val_nn_binaire
Y_train_proba = nn_model_ref.Y_train_nn_binaire

#net = AE_binarize(args, X_train_f.shape[1], h1= 250).to(device)

net = NN_linear(args, X_train_f.shape[1]).to(device)

nn_model_ref.net = net
nn_model_ref.X_train_nn_binaire = X_train_f
nn_model_ref.X_val_nn_binaire = X_val_f
#nn_model_ref.Y_train_nn_binaire = X_train_f
#nn_model_ref.Y_val_nn_binaire = X_val_f
"""
args.load_nn_path = "./results/create_synth_masks_v2/speck/5/ctdata0l^ctdata1l_ctdata0r^ctdata1r^ctdata0l^ctdata1l_ctdata0l^ctdata0r_ctdata1l^ctdata1r/2020_07_21_17_26_59_603174/0.9966710913033485_bestacc.pth"
nn_model_ref.net.load_state_dict(torch.load(args.load_nn_path,
                map_location=device)['state_dict'], strict=False)
nn_model_ref.net.to(device)
nn_model_ref.net.eval()
"""
nn_model_ref.train_from_scractch("AE")

for global_sparsity in [0, 0.2, 0.4]:
    print(global_sparsity)
    flag2 = True
    acc_retain = []
del table_of_truth.c0l_create_ToT, table_of_truth.c0r_create_ToT
del table_of_truth.c1l_create_ToT, table_of_truth.c1r_create_ToT

print("STEP 3 : DONE")
print("---" * 100)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
print("STEP 4 : CREATE DATA PROBA AND CLASSIFY")
print()
print("NEW DATA: "+ str(args.create_new_data_for_classifier))
print()

generator_data = Genrator_data_prob_classifier(args, nn_model_ref.net, path_save_model, rng, creator_data_binary, device, get_masks_gen.masks, nn_model_ref)
generator_data.create_data_g(table_of_truth)
#EVALUATE GOHR NN ON NEW DATASET
nn_model_ref.X_train_nn_binaire = generator_data.X_bin_train
nn_model_ref.X_val_nn_binaire = generator_data.X_bin_val
nn_model_ref.Y_train_nn_binaire = generator_data.Y_create_proba_train
nn_model_ref.Y_val_nn_binaire = generator_data.Y_create_proba_val

nn_model_ref.eval_all(["train", "val"])


net_linear = NN_linear(args)

net_linear.fc1.weight = nn_model_ref.net.fc1.weight.to(device)
net_linear.fc2.weight = nn_model_ref.net.fc2.weight.to(device)
net_linear.fc3.weight = nn_model_ref.net.fc3.weight.to(device)
net_linear.BN5.weight = nn_model_ref.net.BN5.weight.to(device)
net_linear.BN6.weight = nn_model_ref.net.BN6.weight.to(device)

net_all = nn_model_ref.net