Exemplo n.º 1
0
        [-0.08633196, -0.02354074, 0.02689737],
        [0.07980453, 0.08336258, -0.03348995],
        [0.00127693, 0.08648632, -0.06349777],
        [-0.00372324, 0.06101485, 0.05373947],
        [0.02931256, 0.13969943, 0.04758224],
        [-0.07085561, 0.1854981, -0.01251608],
        [-0.08501478, 0.08730197, -0.03007746],
        [-0.2747325, 0.11887977, -0.02393699],
        [-0.18173946, 0.19762556, -0.04584916],
        [-0.00075296, 0.44299334, -0.00646337],
        [0.00776821, 0.36928397, -0.01387711],
        [0.00775032, 0.27719203, -0.02083887],
        [-0.07311475, 0.2800464, -0.05349565],
        [0.09813186, 0.2514349, -0.05162171],
        [0.06995263, 0.22113597, 0.02499798],
        [0.2917657, 0.11937696, 0.00506067],
        [0.21196616, 0.17858423, -0.02221516],
    ])

    s = Spherecloud(points, colors=(0.8, 0.2, 0.4), sizes=0.03)
    l = Lines(points, (0.1, 0.1, 0.1), width=0.02)
    show([s, l],
         up_vector=(0, 1.0, 0),
         camera_position=(0, 0, 1.5),
         size=(256, 256),
         behaviours=[
             CameraTrajectory(Circle((0, 0, 0), (0, 0.0, 1.5), (0, 1.0, 0)),
                              speed=0.01),
             LightToCamera()
         ])
Exemplo n.º 2
0
    Lines, QuadraticBezierCurves
from simple_3dviz.window import show

if __name__ == "__main__":
    # Make a random point cloud
    centers = np.random.randn(30, 3)
    colors = np.array([[1., 0, 0, 1], [0, 1, 1,
                                       1]])[np.random.randint(0, 2, size=30)]
    sizes = np.ones(30) * 0.2

    # Move in a circle around the points
    show(Spherecloud(centers, colors, sizes),
         behaviours=[
             CameraTrajectory(Circle([0, 0, 3], [3, 3, 3], [0, 0, 1]),
                              speed=0.001),
             LightToCamera(offset=[-1, -1, 0])
         ])

    # Move in an endless square
    show(Spherecloud(centers, colors, sizes),
         behaviours=[
             CameraTrajectory(Repeat(
                 Lines([-4, -4, 1], [-4, 4, 1], [4, 4, 1], [4, -4, 1],
                       [-4, -4, 1])),
                              speed=0.001),
             LightToCamera(offset=[-1, -1, 0])
         ])

    # Move back and forth on a line
    show(Spherecloud(centers, colors, sizes),
         behaviours=[
Exemplo n.º 3
0
    reso = []
    for i in (1, 2, 3):
        reso.append(int(first_line[i]))
    wx, wy, wz = reso
    longest_axis = max(reso)

    arr_vox = np.fromfile(args.rawfile, dtype=np.uint8).astype(np.bool)
    arr_vox_3d = np.reshape(arr_vox, (wx, wy, wz), order='F')

    # bug in simple_3dviz, the spacing is not uniform if the voxel array is not uniform
    arr_vox_3d = np.concatenate(
        (arr_vox_3d, np.zeros((longest_axis - wx, wy, wz), dtype=np.bool)),
        axis=0)
    arr_vox_3d = np.concatenate(
        (arr_vox_3d,
         np.zeros((longest_axis, longest_axis - wy, wz), dtype=np.bool)),
        axis=1)
    arr_vox_3d = np.concatenate(
        (arr_vox_3d,
         np.zeros(
             (longest_axis, longest_axis, longest_axis - wz), dtype=np.bool)),
        axis=2)

    half_edge = ((1 / longest_axis) * 0.5, (1 / longest_axis) * 0.5,
                 (1 / longest_axis) * 0.5)

    show(Mesh.from_voxel_grid(voxels=arr_vox_3d,
                              colors=(0.75, 0.75, 0.75),
                              sizes=half_edge),
         behaviours=[LightToCamera()],
         size=(1024, 1024))
Exemplo n.º 4
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and visualize the recovered parts")
    parser.add_argument(
        "config_file",
        help="Path to the file that contains the experiment configuration")
    parser.add_argument("--output_directory",
                        default="../demo/output/",
                        help="Save the output files in that directory")
    parser.add_argument(
        "--weight_file",
        default=None,
        help=("The path to a previously trained model to continue"
              " the training from"))
    parser.add_argument("--prediction_file",
                        default=None,
                        help="The path to the predicted primitives")
    parser.add_argument("--background",
                        type=lambda x: list(map(float, x.split(","))),
                        default="1,1,1,1",
                        help="Set the background of the scene")
    parser.add_argument("--up_vector",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,1",
                        help="Up vector of the scene")
    parser.add_argument("--camera_target",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,0",
                        help="Set the target for the camera")
    parser.add_argument("--camera_position",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="-2.0,-2.0,-2.0",
                        help="Camera position in the scene")
    parser.add_argument("--window_size",
                        type=lambda x: tuple(map(int, x.split(","))),
                        default="512,512",
                        help="Define the size of the scene and the window")
    parser.add_argument("--with_rotating_camera",
                        action="store_true",
                        help="Use a camera rotating around the object")
    parser.add_argument("--mesh",
                        action="store_true",
                        help="Visualize the target mesh")
    parser.add_argument("--n_vertices",
                        type=int,
                        default=10000,
                        help="How many vertices to use per part")

    add_dataset_parameters(parser)
    args = parser.parse_args(argv)

    if torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    print("Running code on", device)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    config = load_config(args.config_file)
    # Extract the number of primitives
    n_primitives = config["network"]["n_primitives"]

    # Dictionary to keep the predictions used for the evaluation
    predictions = {}

    if args.prediction_file is None:
        # Create a dataset instance to generate the input samples
        dataset_directory = config["data"]["dataset_directory"]
        dataset_type = config["data"]["dataset_type"]
        train_test_splits_file = config["data"]["splits_file"]
        dataset = dataset_factory(
            config["data"]["dataset_factory"],
            (ModelCollectionBuilder(config).with_dataset(dataset_type).
             filter_category_tags(args.category_tags).filter_tags(
                 args.model_tags).random_subset(
                     args.random_subset).build(dataset_directory)),
        )
        assert len(dataset) == 1

        # Build the network architecture to be used for training
        network, _, _ = build_network(config, args.weight_file, device=device)
        network.eval()

        # Create the prediction input
        with torch.no_grad():
            for sample in dataset:
                sample = [s[None] for s in sample]  # make a batch dimension
                X = sample[0].to(device)
                targets = [yi.to(device) for yi in sample[1:]]
                F = network.compute_features(X)
                phi_volume, _ = network.implicit_surface(F, targets[0])
                y_pred, faces = network.points_on_primitives(
                    F,
                    args.n_vertices,
                    random=False,
                    mesh=True,
                    union_surface=False)
            predictions["phi_volume"] = phi_volume
            predictions["y_prim"] = y_pred
    else:
        preds = torch.load(args.prediction_file, map_location="cpu")
        y_pred = preds[4]
        faces = preds[5]
        targets = preds[0]
        predictions["phi_volume"] = preds[1]
        predictions["y_prim"] = y_pred

    # Get the renderables from the deformed vertices and faces
    vertices = y_pred.detach()
    parts = range(n_primitives)
    renderables = [
        Mesh.from_faces(vertices[0, :, i], faces, colors=get_colors(i))
        for i in parts
    ]
    behaviours = [
        SceneInit(
            scene_init(
                load_ground_truth(dataset) if args.mesh else None,
                args.up_vector, args.camera_position, args.camera_target,
                args.background)),
        LightToCamera(),
    ]
    if args.with_rotating_camera:
        behaviours += [
            CameraTrajectory(Circle(args.camera_target, args.camera_position,
                                    args.up_vector),
                             speed=1 / 180)
        ]
        show(renderables,
             size=args.window_size,
             behaviours=behaviours + [SnapshotOnKey()])

    print("Saving renderables to file")
    for i in range(n_primitives):
        m = trimesh.Trimesh(vertices[0, :, i].detach(), faces)
        m.export(os.path.join(args.output_directory,
                              "part_{:03d}.obj".format(i)),
                 file_type="obj")
Exemplo n.º 5
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and estimate a set of primitives")
    parser.add_argument(
        "config_file",
        help="Path to the file that contains the experiment configuration")
    parser.add_argument("output_directory",
                        help="Save the output files in that directory")
    parser.add_argument(
        "--weight_file",
        default=None,
        help="The path to the previously trainined model to be used")
    parser.add_argument("--run_on_gpu", action="store_true", help="Use GPU")
    parser.add_argument(
        "--qos_threshold",
        default=1.0,
        type=float,
        help="Split primitives if predicted qos less than this threshold")
    parser.add_argument(
        "--vol_threshold",
        default=0.0,
        type=float,
        help="Discard primitives with volume smaller than this threshold")
    parser.add_argument(
        "--prob_threshold",
        default=0.0,
        type=float,
        help="Discard primitives with probability smaller than this threshold")
    parser.add_argument("--with_post_processing",
                        action="store_true",
                        help="Remove overlapping primitives")
    parser.add_argument("--mesh",
                        type=load_ground_truth,
                        help="File of ground truth mesh")
    parser.add_argument("--save_frames",
                        help="Path to save the visualization frames to")
    parser.add_argument("--without_screen",
                        action="store_true",
                        help="Perform no screen rendering")
    parser.add_argument("--n_frames",
                        type=int,
                        default=200,
                        help="Number of frames to be rendered")
    parser.add_argument("--background",
                        type=lambda x: list(map(float, x.split(","))),
                        default="0,0,0,1",
                        help="Set the background of the scene")
    parser.add_argument("--up_vector",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,1",
                        help="Up vector of the scene")
    parser.add_argument("--camera_target",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,0",
                        help="Set the target for the camera")
    parser.add_argument("--camera_position",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="-2.0,-2.0,-2.0",
                        help="Camer position in the scene")
    parser.add_argument("--max_depth",
                        type=int,
                        default=3,
                        help="Maximum depth to visualize")
    parser.add_argument("--window_size",
                        type=lambda x: tuple(map(int, x.split(","))),
                        default="512,512",
                        help="Define the size of the scene and the window")
    parser.add_argument(
        "--from_fit",
        action="store_true",
        help="Visulize everything based on primitive_params.fit")
    parser.add_argument(
        "--from_flat_partition",
        action="store_true",
        help=("Visulize everything based on primitive_params.space_partition"
              " with a single depth"))
    parser.add_argument("--group_color",
                        action="store_true",
                        help="Color the active prims based on the group")
    parser.add_argument("--with_rotating_camera",
                        action="store_true",
                        help="Use a camera rotating around the object")
    parser.add_argument(
        "--visualize_sharpness",
        action="store_true",
        help="When set visualize the sharpness together with the prediction")

    add_dataset_parameters(parser)
    args = parser.parse_args(argv)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    if args.run_on_gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    print("Running code on", device)

    config = load_config(args.config_file)
    dataloader, network = build_dataloader_and_network_from_args(args,
                                                                 config,
                                                                 device=device)

    for sample in dataloader:
        # Do the forward pass and estimate the primitive parameters
        X = sample[0].to(device)
        y_hat = network(X)
        #import matplotlib.pyplot as plt
        #import seaborn as sns
        #import numpy as np
        #f = plt.figure(figsize=(8, 6))
        #sns.barplot(
        #    np.arange(y_hat.n_primitives),
        #    y_hat.sharpness_r.squeeze(0).detach().numpy()[:, 0]
        #)
        #plt.title("Epoch {}".format(args.weight_file.split("/")[-1].split("_")[-1]))
        #plt.ylim([0, 10.5])
        #plt.ylabel("Sharpness")
        #plt.xlabel("Primitive id")
        #plt.savefig("/tmp/sharpness_{:03d}.png".format(
        #    int(args.weight_file.split("/")[-1].split("_")[-1]))
        #)
        #plt.close()

        renderables, active_prims = get_renderables(y_hat, args)
        with open(os.path.join(args.output_directory, "renderables.pkl"),
                  "wb") as f:
            pickle.dump(renderables, f)
        print(active_prims)

        behaviours = [
            SceneInit(
                scene_init(args.mesh, args.up_vector, args.camera_position,
                           args.camera_target, args.background)),
            LightToCamera(),
        ]
        if args.with_rotating_camera:
            behaviours += [
                CameraTrajectory(Circle(args.camera_target,
                                        args.camera_position, args.up_vector),
                                 speed=1 / 180)
            ]
        if args.without_screen:
            behaviours += [
                SaveFrames(args.save_frames, 2),
                SaveGif("/tmp/out.gif", 2)
            ]
            render(renderables,
                   size=args.window_size,
                   behaviours=behaviours,
                   n_frames=args.n_frames)
        else:
            behaviours += [
                SnapshotOnKey(path=args.save_frames, keys={"<ctrl>", "S"})
            ]
            show(renderables, size=args.window_size, behaviours=behaviours)

        # Based on the active primitives report the metrics
        active_primitive_params = \
            get_primitive_parameters_from_indices(y_hat, active_prims, args)
        report_metrics(active_primitive_params, config,
                       config["data"]["dataset_type"], args.model_tags,
                       config["data"]["dataset_directory"])
        if args.with_post_processing:
            indices = get_non_overlapping_primitives(y_hat, active_prims)
        else:
            indices = None
        for i, m in enumerate(sq_meshes(y_hat, indices)):
            m.export(os.path.join(args.output_directory,
                                  "predictions-{}.ply").format(i),
                     file_type="ply")

        if y_hat.space_partition is not None:
            torch.save([y_hat.space_partition, y_hat.fit],
                       os.path.join(args.output_directory,
                                    "space_partition.pkl"))
        if args.visualize_sharpness:
            visualize_sharpness(
                y_hat.sharpness_r.squeeze(0).detach().numpy()[:, 0],
                int(args.weight_file.split("/")[-1].split("_")[-1]))