def get_latent_vectors(self, pclouds, batch_size=50): ''' Convenience wrapper of self.transform to get the latent (bottle-neck) codes for a set of input point clouds. Args: pclouds (N, K, 3) numpy array of N point clouds with K points each. ''' latent_codes = [] idx = np.arange(len(pclouds)) for b in iterate_in_chunks(idx, batch_size): latent_codes.append(self.transform(pclouds[b])) return np.vstack(latent_codes)
def get_pre_symmetry_data(self, pclouds, batch_size=50): ''' Convenience wrapper of self.get_pre_symmetry to get the data before symmetry operation (before the bottle-neck) for a set of input point clouds. Args: pclouds (N, K, 3) numpy array of N point clouds with K points each. ''' pre_symmetry_data = [] idx = np.arange(len(pclouds)) for b in iterate_in_chunks(idx, batch_size): pre_symmetry_data.append(self.get_pre_symmetry(pclouds[b])) return np.vstack(pre_symmetry_data)
def get_reconstructions(self, pclouds, batch_size=50): ''' Convenience wrapper of self.reconstruct to get reconstructions of input point clouds. Args: pclouds (N, K, 3) numpy array of N point clouds with K points each. ''' reconstructions = [] idx = np.arange(len(pclouds)) for b in iterate_in_chunks(idx, batch_size): rcon, _ = self.reconstruct(pclouds[b], compute_loss=False) reconstructions.append(rcon) return np.vstack(reconstructions)
def embedding_at_tensor(self, dataset, conf, feed_original=True, apply_augmentation=False, tensor_name='bottleneck'): ''' Observation: the NN-neighborhoods seem more reasonable when we do not apply the augmentation. Observation: the next layer after latent (z) might be something interesting. tensor_name: e.g. model.name + '_1/decoder_fc_0/BiasAdd:0' ''' batch_size = conf.batch_size original, ids, noise = dataset.full_epoch_data(shuffle=False) if feed_original: feed = original else: feed = noise if feed is None: feed = original feed_data = feed if apply_augmentation: feed_data = apply_augmentations(feed, conf) embedding = [] if tensor_name == 'bottleneck': for b in iterate_in_chunks(feed_data, batch_size): embedding.append( self.transform(b.reshape([len(b)] + conf.n_input))) else: embedding_tensor = self.graph.get_tensor_by_name(tensor_name) for b in iterate_in_chunks(feed_data, batch_size): codes = self.sess.run( embedding_tensor, feed_dict={self.x: b.reshape([len(b)] + conf.n_input)}) embedding.append(codes) embedding = np.vstack(embedding) return feed, embedding, ids
# add axis to keep the interface of dist_weight as the first dim adversarial_pc_input = np.expand_dims(adversarial_pc_input, axis=0) num_dist_weight, num_examples_curr, _, _ = adversarial_pc_input.shape # get knn distances per point for adversarial point clouds knn_dists_adversarial_pc_input = -1 * np.ones( list(adversarial_pc_input.shape[:-1]) + [flags.num_knn], dtype=np.float32) for j in range(num_dist_weight): if flags.use_tf_knn: adv_pcs = adversarial_pc_input[j] adv_knn_dists_list = [] idx = np.arange(num_examples_curr) for b in iterate_in_chunks(idx, knn_batch_size): print('shape class %d/%d dist weight %d/%d point cloud %d/%d' % (i + 1, len(pc_classes), j + 1, num_dist_weight, b[-1] + 1, num_examples_curr)) knn_dists_batch = sess.run(knn_dists, feed_dict={pc_pl: adv_pcs[b]}) adv_knn_dists_list.append(knn_dists_batch) adv_knn_dists = np.vstack(adv_knn_dists_list) knn_dists_adversarial_pc_input[j] = adv_knn_dists else: for l in range(num_examples_curr): print('shape class %d/%d dist weight %d/%d point cloud %d/%d' % (i + 1, len(pc_classes), j + 1, num_dist_weight, l + 1, num_examples_curr))