Ejemplo n.º 1
0
def main():
    '''Main demo function'''
    # Save prediction into a file named 'prediction.obj' or the given argument
    #pred_file_name = sys.argv[1] if len(sys.argv) > 1 else 'prediction.obj'
    tic = time.clock()
    # Use the default network model
    net = ResidualGRUNet(compute_grad=False)
    net.load(DEFAULT_WEIGHTS)
    solver = Solver(net)

    toc = time.clock()

    print('Time to load model: ' + str(toc - tic))

    tic = time.clock()
    path = './test/'
    dirs = sorted(os.listdir(path))
    time_ims = []
    for i in dirs:
        print(i)
        time_ims.append(load_demo_images(path + i + '/'))

    # Run the network
    voxel_prediction, _ = solver.test_output(np.array(time_ims))
    pred_file_name = str(i) + '.obj'

    # Save the prediction to an OBJ file (mesh file).
    voxel2obj(pred_file_name, voxel_prediction[0, :, 1, :, :] > 0.4)
    toc = time.clock()

    print('Time for each frame: ' + str(float((toc - tic) / len(dirs))))
    def plotFromVoxels(voxels, title=''):
        #        print('plotfromvoxel')
        voxel2obj(title + 'voxels.obj', voxels)
        if len(voxels.shape) > 3:
            x_d = voxels.shape[0]
            y_d = voxels.shape[1]
            z_d = voxels.shape[2]
            v = voxels[:, :, :, 0]

            v = np.reshape(v, (x_d, y_d, z_d))
        else:
            v = voxels

        print(
            "voxels_plot", v.shape
        )  ###############################################  (32, 32, 32)  ##################################

        u = voxels
        vertices, triangles = mcubes.marching_cubes(u, 0)
        mcubes.export_mesh(vertices, triangles, title + "recon.dae",
                           "MySphere")
        export_obj(vertices, triangles, title + 'recon.obj')

        x, y, z = v.nonzero()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(x, y, z, zdir='z', c='red')
        ax.set_xlabel('X')
        ax.set_ylabel('y')
        ax.set_zlabel('z')
        ax.set_aspect('equal')
        ax.view_init(-90, 90)

        max_range = np.array(
            [x.max() - x.min(),
             y.max() - y.min(),
             z.max() - z.min()]).max()
        print("max_range", max_range)

        Xb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (x.max() + x.min())
        Yb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (y.max() + y.min())
        Zb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (z.max() + z.min())
        # Comment or uncomment following both lines to test the fake bounding box:
        for xb, yb, zb in zip(Xb, Yb, Zb):
            ax.plot([xb], [yb], [zb], 'w')
        plt.grid()

        plt.show()
        plt.title(title)
        from matplotlib.pyplot import show
        show(block=False)
Ejemplo n.º 3
0
def examples(model='syn'):
    from voxel import voxel2obj
    cates = ['chair']

    if not model in ['syn', 'real']:
        print("The model '%s' is not in 'syn' or 'real'" % model)
        return

    weight_path = os.path.join('models', '%s_model' % model, 'model.cptk')
    source_path = '%s_data' % model
    dest_path = 'res_%s_data' % model

    if model == 'syn':
        before, after, img_input = syn_model()
    else:
        before, after, img_input = real_model()

    params = tf.trainable_variables()
    saver = tf.train.Saver(var_list=params)

    if not os.path.exists(dest_path):
        os.makedirs(dest_path)

    for cate in cates:
        files = [
            file for file in os.listdir(os.path.join(source_path, cate))
            if file[-3:] == 'png'
        ]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, weight_path)
        for file in files:
            filepath = os.path.join(source_path, 'chair', file)
            img = Image.open(filepath).resize((img_h, img_w))
            img = np.array(img).astype(np.float32) / 255.
            img = img.reshape([1, img_h, img_w, 3])

            v_before, v_after = sess.run([before, after],
                                         feed_dict={img_input: img})
            v_before = v_before.squeeze() > threshold
            v_after = v_after.squeeze() > threshold

            voxel2obj('%s/%s_before.obj' % (dest_path, file[:-4]), v_before)
            voxel2obj('%s/%s_after.obj' % (dest_path, file[:-4]), v_after)
Ejemplo n.º 4
0
                        help="Visualize signed density field as voxels.")
    parser.add_argument("--export", "-e", help="Export to a pickle file.")

    args = parser.parse_args()
    filename = args.file
    if filename.endswith(".sdf"):
        sdf = SignedDensityField.from_sdf(filename)
    elif filename.endswith(".pkl"):
        sdf = SignedDensityField.from_pkl(filename)
    elif filename.endswith(".pth"):
        sdf = SignedDensityField.from_pth(filename)

    print(
        "sdf info:",
        sdf.delta,
        sdf.data.shape,
        sdf.origin,
        (sdf.data > 0.01).sum(),
        sdf.delta * np.array(sdf.data.shape),
    )
    if args.v:
        sdf.visualize()
    if args.export:
        sdf.dump(args.export)
    if args.n:
        from voxel import voxel2obj

        voxel = (np.abs(sdf.data) <= 0.01).astype(np.int)
        voxel2obj("test.obj", voxel)
        call(["meshlab", "test.obj"])
Ejemplo n.º 5
0
            batch_voxel = np.zeros((32, 32, 32, 1, 2), dtype=int)

            batch_voxel[:, :, :, 0, 1] = vox
            batch_voxel[:, :, :, 0, 0] = vox < 1

            # Run optimization op (backprop) and cost op (to get loss value)
            l, o, _ = sess.run([loss, output, optimizer], feed_dict={G: prev_state, Y: batch_voxel})

            # train_writer.add_summary(summary, i)

            if (i % 2 == 0):
                print("Creating prediction objects.")

                exp_x = tf.exp(o)  # 32, 32, 32, 1 ,2
                sum_exp_x = tf.reduce_sum(exp_x, axis=4, keepdims=True)  # 32, 32, 32, 1, 1

                pred = exp_x / sum_exp_x

                # pred = 1 / (1 + tf.exp(tf.ones_like(o)-1))

                pred = pred.eval()

                pred_name = "test_pred_" + str(i) + ".obj"
                pred_name2 = "test_pred_" + str(i) + "_XD.obj"

                voxel.voxel2obj(pred_name2, pred[:, :, :, 0, 0] > [0.4])
                voxel.voxel2obj(pred_name, pred[:, :, :, 0, 1] > [0.4])

            # Display logs per step
            print('Step %i: Loss: %f' % (i, l))
Ejemplo n.º 6
0
            y_test[:, :, :, i-1, 0] = (tf.ones_like(vox) - vox).eval()

            i += 1

        # Run optimization op (backprop) and cost op (to get loss value)
        l, o, _ = sess.run([loss, output, optimizer], feed_dict={G: x_test, Y: y_test})
        currentDT = datetime.datetime.now()
        print(str(currentDT) + " Batch: " + str(no) + " Loss: " + str(l))

        '''
        o = tf.convert_to_tensor(o)
        outputs = tf.contrib.layers.softmax(o)
        pred = tf.argmax(outputs, axis=4).eval().astype(np.float32)
        voxel.voxel2obj("test_pred_" + str(no) + ".obj", pred[:, :, :, 0])
        '''
        exp_x = tf.exp(o)  # 32, 32, 32, 1 ,2
        sum_exp_x = tf.reduce_sum(exp_x, reduction_indices=[4], keepdims=True)  # 32, 32, 32, 1, 1

        pred = exp_x / sum_exp_x

        pred = pred.eval()

        pred_name = "test_pred_" + str(no) + ".obj"
        pred_name2 = "test_pred_" + str(no) + "_XD.obj"

        voxel.voxel2obj(pred_name2, pred[:, :, :, 0, 1])
        voxel.voxel2obj(pred_name, pred[:, :, :, 0, 0])

        x_train = dataset.train_data()
        y_train = dataset.train_labels()