Esempio n. 1
0
           )
            # How often to evaluate/print out loss on held_out data (if any). # epochs
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

buf_size = 1 # flush each line
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(train_pc, conf, log_file=fout, val_data=val_pc, test_data=test_pc)
fout.close()

print('On train hidden transform')
train_hidden, _, _ = train_pc.full_epoch_data()
train_hidden = apply_augmentations(train_hidden, conf)
train_hidden = ae.transform(train_hidden)
np.save(osp.join(train_dir, 'hidden.npy'), train_hidden)

print('On val hidden transform')
val_hidden, _, _ = val_pc.full_epoch_data()
val_hidden = apply_augmentations(val_hidden, conf)
val_hidden = ae.transform(val_hidden)
np.save(osp.join(val_dir, 'hidden.npy'), val_hidden)


print('On test hidden transform')
test_hidden, _, _ = test_pc.full_epoch_data()
test_hidden = apply_augmentations(test_hidden, conf)
test_hidden = ae.transform(test_hidden)
np.save(osp.join(test_dir, 'hidden.npy'), test_hidden)
# In[11]:


reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)


# Train the AE (save output to train_stats.txt) 

# In[1]:


buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout,mask_type =0)
fout.close()


# Get a batch of reconstuctions and their latent-codes.

# In[13]:


feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)
latent_codes = ae.transform(feed_pc)


# Use any plotting mechanism such as matplotlib to visualize the results.
Esempio n. 3
0
            num_points.append(int(len(X) * t))

        ae.restore_model(osp.join(top_out_dir, experiment_name), 350, True)

        scores = 0.0
        for _ in range(folds):
            X_train, X_test, y_train, y_test = train_test_split(X,
                                                                y,
                                                                train_size=t,
                                                                shuffle=True)

            print(len(X_train), len(X_test))

            svm = LinearSVC()

            X_train_trans = ae.transform(X_train)
            X_test_trans = ae.transform(X_test)

            #markers = {0: 'o', 1: '+'}
            #colors = {0: 'red', 1: 'blue'}
            #labels = {0: 'Once', 1: 'Sonra'}

            svm.fit(X_train_trans, y_train)
            scores += svm.score(X_test_trans, y_test)

        a = accs[size]
        a.append(scores / folds)
        accs[size] = a

for k, v in accs.items():
    print(k, v, num_points)
# latent_vec_file = '/home/shubham/latent_3d_points/data/' + "car_train_ae_train.txt"
latent_vec_file = '/home/swami/deeprl/latent_3d_points/data/single_class_ae/' + "airplane_train_ae.txt"
# feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)

full_pc, _, _ = all_pc_data.full_epoch_data()
num_input = all_pc_data.num_examples

num_iters = int(math.ceil(num_input / float(batch_size)))
array_row_size = int(num_iters * batch_size)
print "lv num rows:" + str(array_row_size)
lv_array = np.zeros([array_row_size, bneck_size])
for i in range(num_iters):
    feed_pc, feed_model_names, _ = all_pc_data.next_batch(batch_size)
    # latent_codes = ae.transform(feed_pc) ##also might want to switch to encoder_with_convs_and_symmetry in ae_template, tho not necessary###
    latent_codes, mask, noise = ae.transform(feed_pc)
    lv_array[i * batch_size:(i + 1) * batch_size, :] = latent_codes

np.savetxt(latent_vec_file, lv_array)
print("Latent codes:")
print(str(latent_codes))
print(mask)
pdb.set_trace()

# pdb.set_trace()

reconstructions = ae.reconstruct(feed_pc)
# shape2 = reconstructions[0][2,:,:]
# print "loss : " + str(reconstructions[1])
write_ply("airplane_ae.ply", reconstructions[0][1, :, :])
write_ply("airplane_ae2.ply", reconstructions[0][2, :, :])
Esempio n. 5
0
            decoder_args=dec_args)

conf.experiment_name = experiment_name
conf.save(os.path.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

#buf_size = 1 # flush each line
#fout = open(os.path.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
#train_stats = ae.train(pcd_dataset, conf, log_file=fout)
#fout.close()

ae.restore_model('data/shapenet_1024_ae_128', 90, True)

print("Transforming Training data")
X_train_trans = []
for x_b in batchify(X_train, 100):
    X_train_trans.append(ae.transform(x_b))
X_train_trans = np.concatenate(X_train_trans)

print("Transforming test data")
X_test_trans = []
for x_b in batchify(X_test, 100):
    X_test_trans.append(ae.transform(x_b))
X_test_trans = np.concatenate(X_test_trans)

print("Fitting svm")
svm = LinearSVC()
svm.fit(X_train_trans, y_train[:len(X_train_trans)])
print(svm.score(X_test_trans, y_test[:len(X_test_trans)]))
Esempio n. 6
0
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)

conf.experiment_name = experiment_name

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model('data/{}'.format(experiment_name), 350, True)

dataset = pickle.load(
    open('/home/ceteke/Desktop/demonstrations/dummy_xyz_500ms.pk', 'rb'))
files = pickle.load(
    open('/home/ceteke/Desktop/demonstrations/dummy_xyz_500ms_files.pk', 'rb'))

dataset_trans = ae.transform(dataset)

gmm = GaussianMixture(5)

pca = PCA(n_components=2)
dt = pca.fit_transform(dataset_trans)

gmm.fit(dt)
clusters = gmm.predict(dt)

colors = {0: 'blue', 1: 'red', 2: 'green', 3: 'black', 4: 'cyan'}

for i, d in enumerate(dt):
    fp = files[i].split('/')
    fp = (fp[-2] + '/' + fp[-1]).split('_seg.pcd')[0]
    plt.scatter(d[0], d[1], color=colors[clusters[i]])