# In[7]:

encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))

# In[8]:

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args,
            n_output=[2048, 3])

conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

# If you ran the above lines, you can reload a saved model like this:
예제 #2
0
    """
    # construct the argument parser and parse the arguments
    '''
    ap = argparse.ArgumentParser()
    ap.add_argument("-e", "--enc", type=str,
                    help="numpy saved array")
    args = vars(ap.parse_args())

    enc = np.load(args["enc"])
    '''
    pcs = np.load("output/{}_pcs.npy".format(DATASET))
    names = np.load("output/{}_names.npy".format(DATASET))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    recs = []
    rec_losses = []
    for pc in pcs:
        pc = np.expand_dims(pc, axis=0)
        rec, l = ae.reconstruct(pc, GT=pc, compute_loss=True)
        recs.append(rec)
        rec_losses.append(l)

    np.save("output/{}_rec_loss".format(DATASET), np.array(rec_losses))
예제 #3
0
# dictionary
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)


conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
            training_epochs = 3000, #train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            train_dir = train_dir,
            test_dir = test_dir,
            val_dir = val_dir,
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = z_rotate == 'True', #train_params['z_rotate'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args,
            experiment_name = experiment_name,
            val_step = 5,
            test_step = 200
           )
            # How often to evaluate/print out loss on held_out data (if any). # epochs
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
예제 #4
0
experiment_name = 'shapenet_1024_ae_128'
train_dir = create_dir(os.path.join('data/', experiment_name))
train_params = default_train_params(single_class=False)
train_params['training_epochs'] = 5
encoder, decoder, enc_args, dec_args = washington_xyz_rgb(
    n_pc_points, bneck_size)

pcd_dataset = PointCloudDataSet(X_train, labels=y_train, copy=False)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)

conf.experiment_name = experiment_name
conf.save(os.path.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

#buf_size = 1 # flush each line
#fout = open(os.path.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
예제 #5
0
        n_pc_points = 2048  # Number of points per model.
        bneck_size = size  # Bottleneck-AE size
        ae_loss = 'emd'  # Loss to optimize: 'emd' or 'chamfer'

        train_dir = create_dir(osp.join(top_out_dir, experiment_name))
        train_params = default_train_params(single_class=False)
        encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
            n_pc_points, bneck_size)

        conf = Conf(n_input=[n_pc_points, 3],
                    loss=ae_loss,
                    training_epochs=train_params['training_epochs'],
                    batch_size=train_params['batch_size'],
                    denoising=train_params['denoising'],
                    learning_rate=train_params['learning_rate'],
                    train_dir=train_dir,
                    loss_display_step=train_params['loss_display_step'],
                    saver_step=train_params['saver_step'],
                    z_rotate=train_params['z_rotate'],
                    encoder=encoder,
                    decoder=decoder,
                    encoder_args=enc_args,
                    decoder_args=dec_args)
        conf.experiment_name = experiment_name
        conf.held_out_step = 5  # How often to evaluate/print out loss on held_out data (if any).

        reset_tf_graph()
        ae = PointNetAutoEncoder(experiment_name, conf)

        t18 = Tork18('/home/ceteke/Documents/datasets/tork18', 'pour')
        X, y = t18.get_dataset()
예제 #6
0
n_out = [n_pc_points, 3]  # Dimensionality of generated samples.

discriminator = mlp_discriminator
# generator = point_cloud_generator
train_params = default_train_params()  # not actually used
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)

conf = Conf(
    n_input=[1948, 3],
    loss=ae_loss,
    training_epochs=train_params['training_epochs'],  # not actually used
    batch_size=train_params['batch_size'],
    denoising=train_params['denoising'],
    learning_rate=train_params['learning_rate'],
    train_dir=top_out_dir,
    loss_display_step=train_params['loss_display_step'],
    saver_step=train_params['saver_step'],
    z_rotate=train_params['z_rotate'],
    encoder=encoder,
    decoder=decoder,
    encoder_args=enc_args,
    decoder_args=dec_args,
    n_output=[2048, 3])
conf.experiment_name = experiment_name
generator = conditional_missing_points_generator

if save_synthetic_samples:
    synthetic_data_out_dir = osp.join(top_out_dir, 'OUT/synthetic_samples/',
                                      experiment_name)
    create_dir(synthetic_data_out_dir)
예제 #7
0
                                                 n_threads=8,
                                                 file_ending='.ply',
                                                 verbose=True)

train_dir = create_dir(osp.join(top_out_dir, experiment_name))
train_params = default_train_params(single_class=False)
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on held_out data (if any).
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
print("Training...")
buf_size = 1  # flush each line
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
예제 #8
0
파일: evaluate.py 프로젝트: ajhamdi/AdvPC
def evaluate(setup, results, models, targets_list, victims_list):

    top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data")
    # print(BASE_DIR)

    # Top-dir of where point-clouds are stored.
    top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data",
                          "shape_net_core_uniform_samples_2048")

    # experiment_name = 'single_class_ae'
    experiment_name = 'new_ae'

    n_pc_ppoints = 1024  # 2048                # Number of points per model.
    bneck_size = 128  # Bottleneck-AE size
    # Loss to optimize: 'emd' or 'chamfer'             # Bottleneck-AE size
    ae_loss = 'chamfer'
    train_params = default_train_params()
    encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
        n_pc_ppoints, bneck_size)
    train_dir = create_dir(osp.join(top_out_dir, experiment_name))

    conf = Conf(n_input=[n_pc_ppoints, 3],
                loss=ae_loss,
                training_epochs=train_params['training_epochs'],
                batch_size=train_params['batch_size'],
                denoising=train_params['denoising'],
                learning_rate=train_params['learning_rate'],
                train_dir=train_dir,
                loss_display_step=train_params['loss_display_step'],
                saver_step=train_params['saver_step'],
                z_rotate=train_params['z_rotate'],
                encoder=encoder,
                decoder=decoder,
                encoder_args=enc_args,
                decoder_args=dec_args)
    conf.experiment_name = experiment_name
    conf.held_out_step = 5  # How often to evaluate/print out loss on
    # held_out data (if they are provided in ae.train() ).
    conf.save(osp.join(train_dir, 'configuration'))

    load_pre_trained_ae = True
    restore_epoch = 500
    if load_pre_trained_ae:
        conf = Conf.load(train_dir + '/configuration')
        reset_tf_graph()
        ae = PointNetAutoEncoder(conf.experiment_name, conf)
        ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)
        models["ae"] = ae

    # all_resulting_corrects = []
    # natural_L_2_norm_orig = []
    # natural_L_2_norm_adv = []
    # natural_L_2_norm_nat = []
    # natural_L_infty_norm_orig = []
    # natural_L_infty_norm_adv = []
    # natural_L_infty_norm_nat = []
    # L_2_norm_adv = []
    # L_2_norm_nat = []
    # L_infty_norm_adv = []
    # L_infty_norm_nat = []
    # L_cham_norm_adv = []
    # L_cham_norm_nat = []
    # L_emd_norm_adv = []
    # L_emd_norm_nat = []
    # natural_L_cham_norm_orig = []
    # natural_L_cham_norm_adv = []
    # natural_L_cham_norm_nat = []
    accuracies_disc = {
        "orig_acc": "original accuracy on PointNet ",
        "adv_suc": "natural adverserial sucess rate on PointNet ",
        "adv_acc": "natural adverserial accuracy on PointNet ",
        "proj_acc": "projected accuracy on PointNet ",
        "rec_suc": "defended natural adverserial sucess rate on PointNet ",
        "rec_acc": "reconstructed defense accuracy on PointNet ",
        "orig_acc_pp": "original accuracy on PointNet_++ ",
        "orig_acc_gcn": "original accuracy on DGCN",
        "orig_acc_p": "original accuracy on PointNet_+ ",
        "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ",
        "adv_suc_gcn": "natural adverserial sucess rate on DGCN",
        "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ",
        "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ",
        "adv_acc_gcn": "natural adverserial accuracy on DGCN",
        "adv_acc_p": "natural adverserial accuracy on PointNet_+ ",
        "proj_acc_pp": "projected accuracy on PointNet_++ ",
        "proj_acc_gcn": "projected accuracy on DGCN",
        "proj_acc_p": "projected accuracy on PointNet_+ ",
        "rec_suc_pp":
        "defended natural adverserial sucess rate on PointNet_++ ",
        "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN",
        "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ",
        "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ",
        "rec_acc_gcn": "reconstructed defense accuracy on DGCN",
        "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ",
        "b_adv_suc": "baseline adverserial sucess rate on PointNet ",
        "b_adv_acc": "baseline adverserial accuracy on PointNet ",
        "b_rec_suc":
        "baseline defended natural adverserial sucess rate on PointNet ",
        "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ",
        "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ",
        "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN",
        "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ",
        "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ",
        "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN",
        "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ",
        "b_rec_suc_pp":
        "baseline defended natural adverserial sucess rate on PointNet_++ ",
        "b_rec_suc_gcn":
        "baseline defended natural adverserial sucess rate on DGCN",
        "b_rec_suc_p":
        "baseline defended natural adverserial sucess rate on PointNet_+ ",
        "b_rec_acc_pp":
        "baselin ereconstructed defense accuracy on PointNet_++ ",
        "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN",
        "b_rec_acc_p":
        "baselin ereconstructed defense accuracy on PointNet_+ ",
        "orig_acc_r": "original accuracy under Random defense",
        "adv_suc_r": "natural adverserial accuracy under Random defense",
        "adv_acc_r": "natural adverserial sucess rate under Random defense",
        "b_adv_suc_r": "baseline  adverserial accuracy under Random defense",
        "b_adv_acc_r":
        "baseline  adverserial sucess rate under Random defense",
        "orig_acc_o": "original accuracy under Outlier defense",
        "adv_suc_o": "natural adverserial accuracy under Outlier defense",
        "adv_acc_o": "natural adverserial sucess rate under Outlier defense",
        "b_adv_suc_o": "baseline  adverserial accuracy under Outlier defense",
        "b_adv_acc_o":
        "baseline  adverserial sucess rate under Outlier defense",
        "orig_acc_bust": "original accuracy under Robust model",
        "adv_acc_bust": "natural adverserial accuracy under Robust model"
    }

    # accuracies_names  = [
    #     "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p"
    #     "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r",
    #     "adv_acc_r","orig_acc_o","adv_acc_o"]
    norms_names = ["natural_L_cham_norm_orig"]
    ev_results = ListDict(accuracies_disc.keys() + norms_names)
    # norms_results = ListDict(norms_names)
    setups = ListDict(setup.keys())
    save_results(setup["results_file"], ev_results + setups)
    for target in targets_list:
        setup["target"] = target
        for victim in victims_list:
            if victim == setup["target"]:
                continue
            setup["victim"] = victim
            for batch_indx in range(int(setup["batch_size"])):
                predictions, norms = evaluate_all_shapes_scale(
                    batch_indx=batch_indx, setup=setup, models=models)
                [setups.append(setup) for ii in range(setup["batch_size"])]
                # norms_results.remove(norms_results - ListDict(norms))
                # norms_results.partial_extend(ListDict(norms))
                ev_results.remove(ev_results - ListDict(predictions) -
                                  ListDict(norms))
                ev_results.partial_extend(
                    ListDict(predictions)).partial_extend(ListDict(norms))
                save_results(setup["results_file"], ev_results + setups)

    save_results(setup["results_file"], ev_results + setups + results)
    return ev_results
예제 #9
0
parser.add_argument('--epochs', type=int, default=1000, help="Restore epochs.")
args = parser.parse_args()
print(args)

top_in_dir = args.dataset_dir
n_pc_points = 2048  # Number of points per model.
class_name = args.class_name
assert class_name == 'all', "Not supporting other things yet."
restore_epoch = args.epochs

print("Build model")
train_dir = args.train_dir
print("Train dir:%s" % train_dir)
print("Load model configuration:")
conf_path = os.path.join(train_dir, 'configuration')
conf = Conf.load(conf_path)
print(conf)
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(args.train_dir, epoch=restore_epoch)

# Load Validation set
print("Load data (train set)")
sub_dirs = []
for subdir in os.listdir(os.path.join(top_in_dir)):
    p = os.path.join(top_in_dir, subdir, 'val')
    print(p)
    sub_dirs.append(p)

all_pc_data = load_all_point_clouds_under_folders(
    sub_dirs,
예제 #10
0
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)


train_dir = create_dir(osp.join(top_out_dir, experiment_name))
out_dir = create_dir(osp.join(top_out_dir, "generated_planes"))
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)

conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
            training_epochs = train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            train_dir = train_dir,
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5    

reset_tf_graph()
pdb.set_trace()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(model_dir, 500)