Пример #1
0
def get_pcd(view, pcd_database, object_frame=False, verbose=False):
    '''
    Read in the point cloud for the requested view from its pcd file.
    '''

    pcd_filename = os.path.join(pcd_database, view + '.pcd')

    try:
        point_cloud = pypcd.PointCloud.from_path(pcd_filename)
    except IOError:
        print("File, " + str(pcd_filename) + " doesn't exist. Ignoring.")
        return None

    # Point cloud size.
    print("PC Size: ", len(point_cloud.pc_data))

    # Some objects end up filling whole screen - this is not useful to us.
    if len(point_cloud.pc_data) == 307200:
        return None

    obj_cloud = np.ones((len(point_cloud.pc_data), 3), dtype=np.float32)
    obj_cloud[:, 0] = point_cloud.pc_data['x']
    obj_cloud[:, 1] = point_cloud.pc_data['y']
    obj_cloud[:, 2] = point_cloud.pc_data['z']

    # Get object frame for this point cloud.
    if object_frame:
        object_transform, world_frame_center = find_object_frame(
            obj_cloud, verbose)

        # Transform our point cloud. I.e. center and rotate to new frame.
        for i in range(obj_cloud.shape[0]):
            obj_cloud[i] = np.dot(
                object_transform,
                [obj_cloud[i][0], obj_cloud[i][1], obj_cloud[i][2], 1])[:3]

        centroid_diff = np.array([0.0, 0.0, 0.0])
    else:
        pca_operator = PCA(n_components=3, svd_solver='full')
        pca_operator.fit(obj_cloud)
        pca_centroid = np.matrix(pca_operator.mean_).T
        centroid = np.array([
            (np.amax(obj_cloud[:, 0]) + np.amin(obj_cloud[:, 0])) / 2,
            (np.amax(obj_cloud[:, 1]) + np.amin(obj_cloud[:, 1])) / 2,
            (np.amax(obj_cloud[:, 2]) + np.amin(obj_cloud[:, 2])) / 2,
        ])

        centroid_diff = np.array([
            float(pca_centroid[0]) - centroid[0],
            float(pca_centroid[1]) - centroid[1],
            float(pca_centroid[2]) - centroid[2],
        ])

        # Center.
        obj_cloud[:, 0] -= float(centroid[0])
        obj_cloud[:, 1] -= float(centroid[1])
        obj_cloud[:, 2] -= float(centroid[2])

    # Determine scaling size.
    max_dim = max(
        np.amax(obj_cloud[:, 0]) - np.amin(obj_cloud[:, 0]),
        np.amax(obj_cloud[:, 1]) - np.amin(obj_cloud[:, 1]),
        np.amax(obj_cloud[:, 2]) - np.amin(obj_cloud[:, 2]),
    )

    # Scale so that max dimension is about 1.
    scale = (1.0 / 1.03) / max_dim
    print("Scale, ", scale)

    # Scale every point.
    obj_cloud = obj_cloud * scale

    # Down/Up Sample cloud so everything has the same # of points.
    idxs = np.random.choice(obj_cloud.shape[0],
                            size=_POINT_CLOUD_SIZE,
                            replace=True)
    obj_cloud = obj_cloud[idxs, :]

    if verbose:
        plot_3d_points(obj_cloud)

    return obj_cloud, (max_dim * (1.03 / 1.0)), scale, centroid_diff
Пример #2
0
def run(get_model,
        train_path,
        validation_path,
        model_path,
        logs_path,
        batch_size=32,
        epoch_start=0,
        epochs=100,
        learning_rate=1e-4,
        optimizer='adam',
        train=True,
        warm_start=False,
        alpha=0.5,
        loss_function='mse',
        sdf_count=64):

    # Read in training and validation files.
    train_files = [
        os.path.join(train_path, filename)
        for filename in os.listdir(train_path) if ".tfrecord" in filename
    ]
    validation_files = [
        os.path.join(validation_path, filename)
        for filename in os.listdir(validation_path) if ".tfrecord" in filename
    ]

    # Fetch the data.
    train_dataset = get_sdf_dataset(train_files,
                                    batch_size=batch_size,
                                    sdf_count=sdf_count)
    validation_dataset = get_sdf_dataset(validation_files,
                                         batch_size=batch_size,
                                         sdf_count=sdf_count)

    # Setup iterators.
    # train_iterator = train_dataset.make_initializable_iterator()
    train_iterator = tf.compat.v1.data.make_initializable_iterator(
        train_dataset)
    train_next_point_cloud, train_next_xyz, train_next_label = train_iterator.get_next(
    )

    # val_iterator = validation_dataset.make_initializable_iterator()
    val_iterator = tf.compat.v1.data.make_initializable_iterator(
        validation_dataset)
    val_next_point_cloud, val_next_xyz, val_next_label = val_iterator.get_next(
    )

    # Setup optimizer.
    batch = tf.compat.v1.get_variable('batch', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
    learn_rate = get_learning_rate(batch, batch_size, learning_rate)
    tf.compat.v1.summary.scalar('learning_rate', learn_rate)

    if optimizer == 'adam':
        opt = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
    elif optimizer == 'momentum':
        opt = tf.train.MomentumOptimizer(
            learning_rate=learn_rate,
            momentum=0.9)  # Make this another hyperparam?

    # Setup batch norm decay rate for PointConv/Net layers.
    bn_decay = get_bn_decay(batch, batch_size)

    # Setup model operations.
    points = tf.compat.v1.placeholder(tf.float32, name="point_cloud")
    xyz_in = tf.compat.v1.placeholder(tf.float32, name="query_points")
    sdf_labels = tf.compat.v1.placeholder(tf.float32, name="query_labels")
    is_training = tf.compat.v1.placeholder(tf.bool, name="is_training")
    # sdf_prediction, loss, debug = get_model(points, xyz_in, sdf_labels, is_training, bn_decay, batch_size=batch_size, alpha=alpha, loss_function=loss_function)
    sdf_prediction, loss, debug = get_model(points,
                                            xyz_in,
                                            sdf_labels,
                                            is_training,
                                            bn_decay,
                                            batch_size=batch_size,
                                            loss_feature=loss_function)

    # Get update ops for the BN.
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)

    # Setup training operation.
    with tf.control_dependencies(update_ops):
        train_op = opt.minimize(loss, global_step=batch)

    # Setup tensorboard operation.
    merged = tf.compat.v1.summary.merge_all()

    print("Variable Counts: ")
    print("Encoder: " + str(get_num_trainable_variables('encoder')))
    print("SDF: " + str(get_num_trainable_variables('sdf')))
    init = tf.compat.v1.global_variables_initializer()

    # Save/Restore model.
    saver = tf.compat.v1.train.Saver()

    with tf.compat.v1.Session() as sess:

        # Setup tensorboard.
        f_writer = tf.compat.v1.summary.FileWriter(logs_path, sess.graph)

        # Init variables.
        if not train or warm_start:
            model_file = os.path.join(model_path, 'model.ckpt')
            saver.restore(sess, model_file)
            print("Model restored from: ", model_file)
        else:
            sess.run(init)

        validation_loss = float('inf')
        best_loss = float('inf')

        for epoch in range(epoch_start, epoch_start + epochs):
            print("Epoch: ", str(epoch))

            sess.run(train_iterator.initializer)

            # Track loss throughout updates.
            total_loss = 0.0
            examples = 0

            while True:
                try:
                    # Split the given features into batches.
                    point_clouds_, xyzs_, labels_ = sess.run(
                        (train_next_point_cloud, train_next_xyz,
                         train_next_label))

                    examples += 1
                    if train:
                        _, summary_, sdf_prediction_, loss_, step, _ = sess.run(
                            [
                                train_op, merged, sdf_prediction, loss, batch,
                                debug
                            ],
                            feed_dict={
                                points: point_clouds_,
                                xyz_in: xyzs_,
                                sdf_labels: labels_,
                                is_training: True,
                            })
                        f_writer.add_summary(summary_, step)
                    else:
                        sdf_prediction_, loss_ = sess.run(
                            [sdf_prediction, loss],
                            feed_dict={
                                points: point_clouds_,
                                xyz_in: xyzs_,
                                sdf_labels: labels_,
                                is_training: False,
                            })

                        pts = np.reshape(xyzs_[0], (sdf_count, 3))
                        truth = np.reshape(labels_[0], (sdf_count))
                        pred = np.reshape(sdf_prediction_[0], (sdf_count))

                        plot_3d_points(point_clouds_[0])
                        plot_3d_points(pts)
                        plot_3d_points(pts, truth)
                        plot_3d_points(pts, pred)

                    total_loss += loss_

                except tf.errors.OutOfRangeError:
                    break

            avg_loss = total_loss / float(examples)

            print('Average loss: {}'.format(avg_loss))

            if train:
                f_writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='epoch_loss',
                                         simple_value=avg_loss)
                    ]), epoch)

            # Validation loop every epoch.
            sess.run(val_iterator.initializer)

            total_loss = 0.0
            examples = 0
            while True:
                try:
                    point_clouds_, xyzs_, labels_ = sess.run(
                        (val_next_point_cloud, val_next_xyz, val_next_label))

                    examples += 1
                    sdf_prediction_, loss_ = sess.run(
                        [sdf_prediction, loss],
                        feed_dict={
                            points: point_clouds_,
                            xyz_in: xyzs_,
                            sdf_labels: labels_,
                            is_training: False,
                        })

                    total_loss += loss_

                except tf.errors.OutOfRangeError:
                    break

            avg_loss = total_loss / float(examples)

            # Save model if it's the best one we've seen.
            if avg_loss < best_loss and train:
                best_loss = avg_loss
                save_path = saver.save(sess,
                                       os.path.join(model_path, 'model.ckpt'))
                print("Model saved to: %s" % save_path)

            if train:
                f_writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='epoch_validation_loss',
                                         simple_value=avg_loss)
                    ]), epoch)
Пример #3
0
        plot_3d_points(obj_cloud)

    return obj_cloud, (max_dim * (1.03 / 1.0)), scale, centroid_diff


if __name__ == '__main__':
    train_folder = '/dataspace/ReconstructionData/SDF_Full_Fix/Train'
    train_files = [
        os.path.join(train_folder, filename)
        for filename in os.listdir(train_folder) if ".tfrecord" in filename
    ]

    dataset = get_sdf_dataset(train_files, batch_size=1, sdf_count=1024)

    for x, y, z in dataset:
        point_cloud_points = x.numpy()[0]
        plot_3d_points(point_cloud_points)

        points_inside = y.numpy()[0][np.where(
            np.reshape(z.numpy()[0], (-1, )) <= 0)]
        plot_3d_points(points_inside)

        all_points = np.concatenate([point_cloud_points, points_inside],
                                    axis=0)

        pt_cld_col = np.zeros(point_cloud_points.shape[0])
        true_cld_col = np.zeros(points_inside.shape[0]) + 1
        col = np.concatenate([pt_cld_col, true_cld_col])

        plot_3d_points(all_points, col)
Пример #4
0
def extract_voxel(get_model, model_path, loss_function, train_path,
                  validation_path, mesh):

    # Read in training and validation files.
    validation_files = [
        os.path.join(validation_path, filename)
        for filename in os.listdir(validation_path) if ".tfrecord" in filename
    ]

    sdf_count_ = 2048
    voxel_resolution = 32

    # Fetch the data.
    validation_dataset = get_sdf_dataset(validation_files,
                                         batch_size=1,
                                         sdf_count=sdf_count_)

    # Setup iterators.
    val_iterator = validation_dataset.make_initializable_iterator()
    val_next_point_cloud, val_next_xyz, val_next_label = val_iterator.get_next(
    )

    # Setup model operations.
    points = tf.placeholder(tf.float32)
    xyz_in = tf.placeholder(tf.float32)
    sdf_labels = tf.placeholder(tf.float32)
    is_training = tf.placeholder(tf.bool)

    sdf_prediction, loss, _ = get_model(points,
                                        xyz_in,
                                        sdf_labels,
                                        is_training,
                                        None,
                                        batch_size=1,
                                        alpha=0.5,
                                        loss_function=loss_function,
                                        sdf_count=sdf_count_)

    # Generate points to sample.
    pts = []
    for x in range(voxel_resolution):
        for y in range(voxel_resolution):
            for z in range(voxel_resolution):
                x_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * x)
                y_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * y)
                z_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * z)
                pts.append([x_, y_, z_])
    pts = np.array(pts)
    pt_splits = np.split(pts, pts.shape[0] // sdf_count_)

    # Save/Restore model.
    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, os.path.join(model_path, 'model.ckpt'))

        # Setup function that predicts SDF for (x,y,z) given a point cloud.
        def get_sdf(point_cloud, pts):

            prediction = sess.run(sdf_prediction,
                                  feed_dict={
                                      points: point_cloud,
                                      xyz_in: pts,
                                      sdf_labels: None,
                                      is_training: False,
                                  })

            # print(xyz)
            # print(prediction)

            return prediction

        sess.run(val_iterator.initializer)
        for i in range(20):
            point_clouds_, xyzs_, labels_ = sess.run(
                (val_next_point_cloud, val_next_xyz, val_next_label))

            # Setup a voxelization based on the SDF.
            voxelized = np.zeros(
                (voxel_resolution, voxel_resolution, voxel_resolution),
                dtype=np.float32)

            filled_pts = []

            # For all points sample SDF given the point cloud and include points inside the object to a point cloud.
            for pts_ in pt_splits:
                sdf_ = get_sdf(point_clouds_,
                               np.reshape(pts_, (1, sdf_count_, 3)))

                for pt_, sdf in zip(np.reshape(pts_, (sdf_count_, 3)),
                                    np.reshape(sdf_, (sdf_count_, ))):
                    if sdf <= 0.0 and sdf >= -0.05:
                        filled_pts.append(pt_)
                        x_ = int(
                            round(
                                (pt_[0] + 0.5) * float(voxel_resolution - 1)))
                        y_ = int(
                            round(
                                (pt_[1] + 0.5) * float(voxel_resolution - 1)))
                        z_ = int(
                            round(
                                (pt_[2] + 0.5) * float(voxel_resolution - 1)))
                        voxelized[x_, y_, z_] = 1.0

            # Plot.
            plot_3d_points(point_clouds_[0])
            plot_3d_points(np.reshape(filled_pts, (-1, 3)))
            if mesh:
                # Mesh w/ mcubes.
                #plot_voxel(convert_to_sparse_voxel_grid(voxelized), voxel_res=(voxel_resolution, voxel_resolution, voxel_resolution))
                vertices, triangles = mcubes.marching_cubes(voxelized, 0)
                mcubes.export_mesh(vertices, triangles, 'test.dae', 'test')

                meshed_object = trimesh.load('test.dae')
                meshed_object.show()