Ejemplo n.º 1
0
def load(model_path, z_rotate, num_points, point_dimension=3):
	model_dir = osp.dirname(model_path)
	model_epoch = int(osp.basename(model_path).split('-')[1])
	experiment_name = osp.basename(osp.dirname(model_path)).split('train_')[1] #'single_class_ae_plane_chamfer_z_rotate'                         # Number of points per model.
	bneck_size = 128                                # Bottleneck-AE size
	ae_loss = 'chamfer'                             # Loss to optimize: 'emd' or 'chamfer'
	class_name = "airplane"
	syn_id = snc_category_to_synth_id()[class_name]
	class_dir = osp.join(top_in_dir , syn_id)    # e.g. /home/yz6/code/latent_3d_points/data/shape_net_core_uniform_samples_2048/02691156

	train_dir = create_dir(osp.join(top_out_dir, experiment_name))
	train_params = default_train_params()
	encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(num_points, bneck_size, point_dimension=point_dimension)


	conf = Conf(n_input = [num_points, point_dimension],
	            loss = ae_loss,
	            training_epochs = train_params['training_epochs'],
	            batch_size = train_params['batch_size'],
	            denoising = train_params['denoising'],
	            learning_rate = train_params['learning_rate'],
	            loss_display_step = train_params['loss_display_step'],
	            saver_step = train_params['saver_step'],
	            z_rotate = z_rotate == 'True',
	            train_dir = train_dir,
	            encoder = encoder,
	            decoder = decoder,
	            encoder_args = enc_args,
	            decoder_args = dec_args,
	            experiment_name = experiment_name,
	            allow_gpu_growth = True
	           )
	# pdb.set_trace()
	reset_tf_graph()
	ae = PointNetAutoEncoder(conf.experiment_name, conf)
	ae.restore_model(model_dir, model_epoch)
	return ae, conf
Ejemplo n.º 2
0
                    batch_size=train_params['batch_size'],
                    denoising=train_params['denoising'],
                    learning_rate=train_params['learning_rate'],
                    train_dir=train_dir,
                    loss_display_step=train_params['loss_display_step'],
                    saver_step=train_params['saver_step'],
                    z_rotate=train_params['z_rotate'],
                    encoder=encoder,
                    decoder=decoder,
                    encoder_args=enc_args,
                    decoder_args=dec_args)
        conf.experiment_name = experiment_name
        conf.held_out_step = 5  # How often to evaluate/print out loss on held_out data (if any).

        reset_tf_graph()
        ae = PointNetAutoEncoder(experiment_name, conf)

        t18 = Tork18('/home/ceteke/Documents/datasets/tork18', 'pour')
        X, y = t18.get_dataset()

        if len(num_points) != len(train_sizes):
            num_points.append(int(len(X) * t))

        ae.restore_model(osp.join(top_out_dir, experiment_name), 350, True)

        scores = 0.0
        for _ in range(folds):
            X_train, X_test, y_train, y_test = train_test_split(X,
                                                                y,
                                                                train_size=t,
                                                                shuffle=True)
    learning_rate=train_params['learning_rate'],
    train_dir=train_dir,
    loss_display_step=train_params['loss_display_step'],
    saver_step=train_params['saver_step'],
    z_rotate=train_params['z_rotate'],
    encoder=encoder,
    decoder=decoder,
    encoder_args=enc_args,
    decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# Train the AE (save output to train_stats.txt)

# In[1]:
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/chair/',500)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/clean/',410)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/car_train/',660)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/airplane_full',600)
ae.restore_model(
    '/home/swami/deeprl/latent_3d_points/data/single_class_ae/airplane_train_ae',
    600)

#airplane = 'shape_net_core_uniform_samples_2048/02691156'
airplane = 'airplane_train'
class_dir = '/home/swami/deeprl/latent_3d_points/data/' + airplane
Ejemplo n.º 4
0
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)

conf.experiment_name = experiment_name
conf.save(os.path.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

#buf_size = 1 # flush each line
#fout = open(os.path.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
#train_stats = ae.train(pcd_dataset, conf, log_file=fout)
#fout.close()

ae.restore_model('data/shapenet_1024_ae_128', 90, True)

print("Transforming Training data")
X_train_trans = []
for x_b in batchify(X_train, 100):
    X_train_trans.append(ae.transform(x_b))
X_train_trans = np.concatenate(X_train_trans)

print("Transforming test data")
Ejemplo n.º 5
0
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
train_params = default_train_params(single_class=False)
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on held_out data (if any).
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
print("Training...")
buf_size = 1  # flush each line
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
Ejemplo n.º 6
0
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)

conf.experiment_name = experiment_name

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model('data/{}'.format(experiment_name), 350, True)

dataset = pickle.load(
    open('/home/ceteke/Desktop/demonstrations/dummy_xyz_500ms.pk', 'rb'))
files = pickle.load(
    open('/home/ceteke/Desktop/demonstrations/dummy_xyz_500ms_files.pk', 'rb'))

dataset_trans = ae.transform(dataset)

gmm = GaussianMixture(5)

pca = PCA(n_components=2)
dt = pca.fit_transform(dataset_trans)

gmm.fit(dt)
Ejemplo n.º 7
0
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = z_rotate == 'True', #train_params['z_rotate'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args,
            experiment_name = experiment_name,
            val_step = 5,
            test_step = 200
           )
            # How often to evaluate/print out loss on held_out data (if any). # epochs
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

buf_size = 1 # flush each line
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(train_pc, conf, log_file=fout, val_data=val_pc, test_data=test_pc)
fout.close()

print('On train hidden transform')
train_hidden, _, _ = train_pc.full_epoch_data()
train_hidden = apply_augmentations(train_hidden, conf)
train_hidden = ae.transform(train_hidden)
np.save(osp.join(train_dir, 'hidden.npy'), train_hidden)

print('On val hidden transform')
val_hidden, _, _ = val_pc.full_epoch_data()
val_hidden = apply_augmentations(val_hidden, conf)
Ejemplo n.º 8
0
    ap = argparse.ArgumentParser()
    ap.add_argument("-e", "--enc", type=str,
                    help="numpy saved array")
    args = vars(ap.parse_args())

    enc = np.load(args["enc"])
    '''
    pcs = np.load("output/{}_pcs.npy".format(DATASET))
    names = np.load("output/{}_names.npy".format(DATASET))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    recs = []
    rec_losses = []
    for pc in pcs:
        pc = np.expand_dims(pc, axis=0)
        rec, l = ae.reconstruct(pc, GT=pc, compute_loss=True)
        recs.append(rec)
        rec_losses.append(l)

    np.save("output/{}_rec_loss".format(DATASET), np.array(rec_losses))
    np.save("output/{}_recs".format(DATASET), np.array(recs))

    #for id in range(0,3):
Ejemplo n.º 9
0
    n_pc_points = 2048                # Number of points per model.
    bneck_size = 64                  # Bottleneck-AE size
    restore_epoch = 500

    #syn_id = snc_category_to_synth_id()[class_name]
    #class_dir = osp.join(top_in_dir , syn_id)
    class_dir = top_in_dir
    all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)



    reset_tf_graph()
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)
    ae.restore_model(ae_conf.train_dir, restore_epoch, verbose=True)

    latent_codes = ae.get_latent_codes(all_pc_data.point_clouds)


    reconstructions = ae.decode(latent_codes)


    i = 1
    plot_3d_point_cloud(all_pc_data.point_clouds[i][:, 0],
                        all_pc_data.point_clouds[i][:, 1],
                        all_pc_data.point_clouds[i][:, 2], in_u_sphere=True);


    plot_3d_point_cloud(reconstructions[i][:, 0],
Ejemplo n.º 10
0
    with open(args["list"]) as f:
        pc_files = f.read().splitlines()

    test_pcs = np.empty([len(pc_files), n_points, 3], dtype=np.float32)
    for idx, point_file in enumerate(pc_files[:]):
        cloud = PyntCloud.from_file(point_file)
        test_pcs[idx, :, :] = cloud.points[:n_points]
    #print(test_pcs[0].shape)

    reset_tf_graph()
    ae_configuration = model_dir + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(model_dir, restore_epoch, verbose=True)

    #latent_code = ae.transform(test_pcs[:1])
    latent_codes = ae.get_latent_codes(test_pcs)

    for pc_idx in range(len(latent_codes) - 1):
        a = latent_codes[pc_idx]
        b = latent_codes[pc_idx + 1]  #aug_latent_codes[0]
        diff = a - b
        steps = np.linspace(0.0, 1.0, num=9)
        interpolations = []
        for step in steps[:-1]:
            interpolations.append(a - step * diff)
Ejemplo n.º 11
0
top_in_dir = args.dataset_dir
n_pc_points = 2048  # Number of points per model.
class_name = args.class_name
assert class_name == 'all', "Not supporting other things yet."
restore_epoch = args.epochs

print("Build model")
train_dir = args.train_dir
print("Train dir:%s" % train_dir)
print("Load model configuration:")
conf_path = os.path.join(train_dir, 'configuration')
conf = Conf.load(conf_path)
print(conf)
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(args.train_dir, epoch=restore_epoch)

# Load Validation set
print("Load data (train set)")
sub_dirs = []
for subdir in os.listdir(os.path.join(top_in_dir)):
    p = os.path.join(top_in_dir, subdir, 'val')
    print(p)
    sub_dirs.append(p)

all_pc_data = load_all_point_clouds_under_folders(
    sub_dirs,
    n_threads=8,
    file_ending='.npy',
    max_num_points=2048,
Ejemplo n.º 12
0
train_dir = args.train_dir
top_in_dir = args.dataset_dir
n_pc_points = 2048                # Number of points per model.
bneck_size = args.bneck_size # Bottleneck-AE size
restore_epoch = args.epochs

print("Build model")
print("Train dir:%s"%train_dir)
print("Load model configuration:")
conf_path = os.path.join(train_dir, 'configuration')
conf = Conf.load(conf_path)
print(conf)
reset_tf_graph()
print("Build tensorflow graph")
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(args.train_dir, epoch=restore_epoch)

#####################
# Load Training Set
#####################
print("Load data (train set)")
tr_shape_lst = []
te_shape_lst = []
tr_lbl = []
te_lbl = []
class_lst = []
for i, f in enumerate(os.listdir(top_in_dir)):
    # Train
    tr_class_dir = os.path.join(top_in_dir, f, 'train')
    if not os.path.isdir(tr_class_dir):
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            train_dir=train_dir,
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on held_out data (if any).

# pdb.set_trace()
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

n_examples = all_pc_data.num_examples
batch_size = conf.batch_size
n_batches = int(n_examples / batch_size)
latent_list = list()
for _ in xrange(n_batches):
    feed_pc, feed_model_names, _ = all_pc_data.next_batch(batch_size)
    num_points_to_pick = n_pc_points
    perm = np.random.permutation(num_points_to_pick)
    feed_pc = np.take(feed_pc, perm[0:num_points_to_pick], axis=1)
    latent_codes = ae.transform(feed_pc)
    latent_list.append(latent_codes)

latent = np.concatenate(latent_list, axis=0)
ae.restore_model(model_dir, 250)
Ejemplo n.º 14
0
    Command:
        -p path/<filename>.npy
    """
    # construct the argument parser and parse the arguments
    '''
    ap = argparse.ArgumentParser()
    ap.add_argument("-e", "--enc", type=str,
                    help="numpy saved array")
    args = vars(ap.parse_args())

    enc = np.load(args["enc"])
    '''
    enc = np.load("output/{}_latent.npy".format(DATASET))
    names = np.load("output/{}_names.npy".format(DATASET))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    reconstructions = ae.decode(enc)

    for id in range(0, 3):
        #for id in range(0,len(reconstructions)):
        print(names[id])
        points2file(reconstructions[id], "output/rec_{}".format(names[id]))
Ejemplo n.º 15
0
                pose = [p[0], p[1], p[2], nlx, nly, nlz]
                '''
                pose = [
                    float(jp["pos"]["x"]),
                    float(jp["pos"]["y"]),
                    float(jp["pos"]["z"]),
                    float(jp["orn"]["x"]),
                    float(jp["orn"]["y"]),
                    float(jp["orn"]["z"])
                ]
                anno.append(pose)

    if enable_anno:
        np.save("output/{}_anno".format(DATASET), np.array(anno))
    np.save("output/{}_pcs".format(DATASET), pcs)
    np.save("output/{}_names".format(DATASET), np.array(names))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    latent_codes = ae.get_latent_codes(pcs)

    print(latent_codes.shape)
    np.save("output/{}_latent".format(DATASET), np.array(latent_codes))
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/chair/',500)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/airplane/',800)
ae.restore_model(
    '/home/swami/deeprl/latent_3d_points/data/single_class_ae/airplane_train_ae/',
    600)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/airplane_full_adv_g/',600)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/',900)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/with_global_with_upsampling/',890)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/with_global_with_upsampling/trials',1)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/clean',410)
print "Successfully loaded model"

# Get a batch of reconstuctions and their latent-codes.
reconstruct_from_latent_vectors = True
Ejemplo n.º 17
0
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/chair/',500)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/airplane/',800)
#ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/airplane_full/',600)
# ae.restore_model('/home/shubham/latent_3d_points/data/single_class_ae/clean/',410)
ae.restore_model(
    '/home/swami/deeprl/latent_3d_points/data/single_class_ae/airplane_train_ae/',
    600)

##use best encoder and GAN:

num_pts_to_mask = 5
#latent_vec_file = '/home/shubham/latent_3d_points/notebooks/gt_noisy_airplane_full.txt'
latent_vec_file = '/home/swami/deeprl/latent_3d_points/notebooks/gt_noisy_airplane_test_ae.txt'
Ejemplo n.º 18
0
def evaluate(setup, results, models, targets_list, victims_list):

    top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data")
    # print(BASE_DIR)

    # Top-dir of where point-clouds are stored.
    top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data",
                          "shape_net_core_uniform_samples_2048")

    # experiment_name = 'single_class_ae'
    experiment_name = 'new_ae'

    n_pc_ppoints = 1024  # 2048                # Number of points per model.
    bneck_size = 128  # Bottleneck-AE size
    # Loss to optimize: 'emd' or 'chamfer'             # Bottleneck-AE size
    ae_loss = 'chamfer'
    train_params = default_train_params()
    encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
        n_pc_ppoints, bneck_size)
    train_dir = create_dir(osp.join(top_out_dir, experiment_name))

    conf = Conf(n_input=[n_pc_ppoints, 3],
                loss=ae_loss,
                training_epochs=train_params['training_epochs'],
                batch_size=train_params['batch_size'],
                denoising=train_params['denoising'],
                learning_rate=train_params['learning_rate'],
                train_dir=train_dir,
                loss_display_step=train_params['loss_display_step'],
                saver_step=train_params['saver_step'],
                z_rotate=train_params['z_rotate'],
                encoder=encoder,
                decoder=decoder,
                encoder_args=enc_args,
                decoder_args=dec_args)
    conf.experiment_name = experiment_name
    conf.held_out_step = 5  # How often to evaluate/print out loss on
    # held_out data (if they are provided in ae.train() ).
    conf.save(osp.join(train_dir, 'configuration'))

    load_pre_trained_ae = True
    restore_epoch = 500
    if load_pre_trained_ae:
        conf = Conf.load(train_dir + '/configuration')
        reset_tf_graph()
        ae = PointNetAutoEncoder(conf.experiment_name, conf)
        ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)
        models["ae"] = ae

    # all_resulting_corrects = []
    # natural_L_2_norm_orig = []
    # natural_L_2_norm_adv = []
    # natural_L_2_norm_nat = []
    # natural_L_infty_norm_orig = []
    # natural_L_infty_norm_adv = []
    # natural_L_infty_norm_nat = []
    # L_2_norm_adv = []
    # L_2_norm_nat = []
    # L_infty_norm_adv = []
    # L_infty_norm_nat = []
    # L_cham_norm_adv = []
    # L_cham_norm_nat = []
    # L_emd_norm_adv = []
    # L_emd_norm_nat = []
    # natural_L_cham_norm_orig = []
    # natural_L_cham_norm_adv = []
    # natural_L_cham_norm_nat = []
    accuracies_disc = {
        "orig_acc": "original accuracy on PointNet ",
        "adv_suc": "natural adverserial sucess rate on PointNet ",
        "adv_acc": "natural adverserial accuracy on PointNet ",
        "proj_acc": "projected accuracy on PointNet ",
        "rec_suc": "defended natural adverserial sucess rate on PointNet ",
        "rec_acc": "reconstructed defense accuracy on PointNet ",
        "orig_acc_pp": "original accuracy on PointNet_++ ",
        "orig_acc_gcn": "original accuracy on DGCN",
        "orig_acc_p": "original accuracy on PointNet_+ ",
        "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ",
        "adv_suc_gcn": "natural adverserial sucess rate on DGCN",
        "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ",
        "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ",
        "adv_acc_gcn": "natural adverserial accuracy on DGCN",
        "adv_acc_p": "natural adverserial accuracy on PointNet_+ ",
        "proj_acc_pp": "projected accuracy on PointNet_++ ",
        "proj_acc_gcn": "projected accuracy on DGCN",
        "proj_acc_p": "projected accuracy on PointNet_+ ",
        "rec_suc_pp":
        "defended natural adverserial sucess rate on PointNet_++ ",
        "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN",
        "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ",
        "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ",
        "rec_acc_gcn": "reconstructed defense accuracy on DGCN",
        "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ",
        "b_adv_suc": "baseline adverserial sucess rate on PointNet ",
        "b_adv_acc": "baseline adverserial accuracy on PointNet ",
        "b_rec_suc":
        "baseline defended natural adverserial sucess rate on PointNet ",
        "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ",
        "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ",
        "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN",
        "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ",
        "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ",
        "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN",
        "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ",
        "b_rec_suc_pp":
        "baseline defended natural adverserial sucess rate on PointNet_++ ",
        "b_rec_suc_gcn":
        "baseline defended natural adverserial sucess rate on DGCN",
        "b_rec_suc_p":
        "baseline defended natural adverserial sucess rate on PointNet_+ ",
        "b_rec_acc_pp":
        "baselin ereconstructed defense accuracy on PointNet_++ ",
        "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN",
        "b_rec_acc_p":
        "baselin ereconstructed defense accuracy on PointNet_+ ",
        "orig_acc_r": "original accuracy under Random defense",
        "adv_suc_r": "natural adverserial accuracy under Random defense",
        "adv_acc_r": "natural adverserial sucess rate under Random defense",
        "b_adv_suc_r": "baseline  adverserial accuracy under Random defense",
        "b_adv_acc_r":
        "baseline  adverserial sucess rate under Random defense",
        "orig_acc_o": "original accuracy under Outlier defense",
        "adv_suc_o": "natural adverserial accuracy under Outlier defense",
        "adv_acc_o": "natural adverserial sucess rate under Outlier defense",
        "b_adv_suc_o": "baseline  adverserial accuracy under Outlier defense",
        "b_adv_acc_o":
        "baseline  adverserial sucess rate under Outlier defense",
        "orig_acc_bust": "original accuracy under Robust model",
        "adv_acc_bust": "natural adverserial accuracy under Robust model"
    }

    # accuracies_names  = [
    #     "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p"
    #     "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r",
    #     "adv_acc_r","orig_acc_o","adv_acc_o"]
    norms_names = ["natural_L_cham_norm_orig"]
    ev_results = ListDict(accuracies_disc.keys() + norms_names)
    # norms_results = ListDict(norms_names)
    setups = ListDict(setup.keys())
    save_results(setup["results_file"], ev_results + setups)
    for target in targets_list:
        setup["target"] = target
        for victim in victims_list:
            if victim == setup["target"]:
                continue
            setup["victim"] = victim
            for batch_indx in range(int(setup["batch_size"])):
                predictions, norms = evaluate_all_shapes_scale(
                    batch_indx=batch_indx, setup=setup, models=models)
                [setups.append(setup) for ii in range(setup["batch_size"])]
                # norms_results.remove(norms_results - ListDict(norms))
                # norms_results.partial_extend(ListDict(norms))
                ev_results.remove(ev_results - ListDict(predictions) -
                                  ListDict(norms))
                ev_results.partial_extend(
                    ListDict(predictions)).partial_extend(ListDict(norms))
                save_results(setup["results_file"], ev_results + setups)

    save_results(setup["results_file"], ev_results + setups + results)
    return ev_results
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5   # How often to evaluate/print out loss on 
                         # held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))


# Build AE Model.

# In[11]:


reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)


# Train the AE (save output to train_stats.txt) 

# In[1]:


buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout,mask_type =0)
fout.close()


# Get a batch of reconstuctions and their latent-codes.
Ejemplo n.º 20
0
    n_threads=8,
    file_ending='.npy',
    max_num_points=2048,
    verbose=True,
    normalize=args.normalize_shape,
    file_names=file_names)
print 'Shape of DATA =', all_pc_data.point_clouds.shape

#######################
# Load pre-trained AE #
#######################
reset_tf_graph()
ae_conf = Conf.load(ae_configuration)
ae_conf.encoder_args['verbose'] = False
ae_conf.decoder_args['verbose'] = False
ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)
ae.restore_model(ae_conf.train_dir, ae_epoch, verbose=True)

# Use AE to convert raw pointclouds to latent codes.
latent_codes = ae.get_latent_codes(all_pc_data.point_clouds)
latent_data = PointCloudDataSet(latent_codes)
print 'Shape of DATA =', latent_data.point_clouds.shape

#######################
# Set GAN parameters. #
#######################
use_wgan = True  # Wasserstein with gradient penalty, or not?
n_epochs = args.epochs  # Epochs to train.

plot_train_curve = True
save_gan_model = True
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

# If you ran the above lines, you can reload a saved model like this:

# In[9]:

load_pre_trained_ae = True
restore_epoch = 400
if load_pre_trained_ae:
    conf = Conf.load(train_dir + '/configuration')
    reset_tf_graph()
    ae = PointNetAutoEncoder(conf.experiment_name, conf)
    ae.restore_model(conf.train_dir, epoch=restore_epoch)

# Build AE Model.

# In[10]:

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# Train the AE (save output to train_stats.txt)

# In[ ]:

buf_size = 1  # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
Ejemplo n.º 22
0
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            train_dir = train_dir,
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5    

reset_tf_graph()
pdb.set_trace()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(model_dir, 500)

model_path = '../data/lgan_plane/G_network_299.pth'
g = torch.load(model_path)
batch_size=50

for i in xrange(100):
    noise=Variable(torch.cuda.FloatTensor(batch_size, 128))
    generate_noise(noise)
    fake_x = g(noise).data.cpu().numpy()
    fake_pc = ae.decode(fake_x)
    print fake_pc.shape