Exemplo n.º 1
0
        print("STEP 3 : DONE")
        print("---" * 100)
        #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
        print("STEP 4 : CREATE DATA PROBA AND CLASSIFY")
        print()
        print("NEW DATA: " + str(args.create_new_data_for_classifier))
        print()

        generator_data = Genrator_data_prob_classifier(
            args, nn_model_ref.net, path_save_model, rng, creator_data_binary,
            device, get_masks_gen.masks, nn_model_ref)
        generator_data.create_data_g(table_of_truth)
        #EVALUATE GOHR NN ON NEW DATASET
        nn_model_ref.X_train_nn_binaire = generator_data.X_bin_train
        nn_model_ref.X_val_nn_binaire = generator_data.X_bin_val
        nn_model_ref.Y_train_nn_binaire = generator_data.Y_create_proba_train
        nn_model_ref.Y_val_nn_binaire = generator_data.Y_create_proba_val

        if args.eval_nn_ref:
            nn_model_ref.eval_all(["train", "val"])
        all_clfs = All_classifier(args, path_save_model, generator_data,
                                  get_masks_gen, nn_model_ref, table_of_truth,
                                  cpt)
        #all_clfs.X_train_proba = np.concatenate((all_clfs.X_train_proba, X_feat_temp), axis = 1)
        #all_clfs.X_eval_proba =  np.concatenate((all_clfs.X_eval_proba, X_feat_temp_val), axis = 1)
        all_clfs.classify_all()

        if args.quality_of_masks:
            qm = Quality_masks(args, path_save_model, generator_data,
                               get_masks_gen, nn_model_ref, table_of_truth,
                               all_clfs)
        #data = data_train #np.array(nn_model_ref2.intermediaires[phase]).astype(np.uint8).reshape(num1 * nn_model_ref2.batch_size, -1)
        #data2 = scaler1.fit_transform(data)
        nn_model_ref2.all_intermediaire = data_train
        #nn_model_ref2.outputs_proba_train = np.array(nn_model_ref2.outputs_proba[phase]).astype(np.float16).reshape(num1 * nn_model_ref2.batch_size, -1)
        #nn_model_ref2.outputs_pred_train = np.array(nn_model_ref2.outputs_pred[phase]).astype(np.float16).reshape(num1 * nn_model_ref2.batch_size, -1)
        #if not nn_model_ref2.args.retrain_nn_ref:
        #del nn_model_ref2.all_intermediaire, data_train

    else:
        #scaler2 = StandardScaler()
        #data = data_val
        #data = np.array(nn_model_ref2.intermediaires[phase]).astype(np.uint8).reshape(num1 * nn_model_ref2.batch_size, -1)
        #data2 = scaler2.fit_transform(data)
        nn_model_ref2.all_intermediaire_val = data_val
        #nn_model_ref2.outputs_proba_val = np.array(nn_model_ref2.outputs_proba[phase]).astype(np.float16).reshape(num2 * nn_model_ref2.batch_size, -1)
        #nn_model_ref2.outputs_pred_val = np.array(nn_model_ref2.outputs_pred[phase]).astype(np.float16).reshape(
        #    num2 * nn_model_ref2.batch_size, -1)
        #if not nn_model_ref2.args.retrain_nn_ref:
        #del nn_model_ref2.all_intermediaire_val, data_val
        del nn_model_ref2.dataloaders[phase]

for i in range(4):
    print(data_train_X[data_train_preds == i].shape)
    print(data_train_labels[data_train_preds == i].shape)

    nn_model_ref2.X_train_nn_binaire = data_train_X[data_train_preds == i]
    nn_model_ref2.X_val_nn_binaire = data_val_X[data_val_preds == i]
    nn_model_ref2.Y_train_nn_binaire = data_train_labels[data_train_preds == i]
    nn_model_ref2.Y_val_nn_binaire = data_val_labels[data_val_preds == i]
    nn_model_ref2.train_general(name_input)
    nn_model_ref2.choose_model()