コード例 #1
0
ファイル: main.py プロジェクト: tim-vu/bf3_nav_mesh
def visualize_layers(reachable_layers):

    layers = {}

    for (x, y, z), label in label_matrix.voxels.items():

        pos = Vec3(x, y, z)

        if label == 0 or label not in reachable_layers:
            continue

        if label in layers:
            layers[label][pos] = True
            continue

        layers[label] = np.zeros(label_matrix.shape, dtype=bool)
        layers[label][pos] = True

    color_gen = randomcolor.RandomColor()
    colors = color_gen.generate(count=len(layers), format_='rgb')

    for i in range(len(colors)):
        values = map(lambda e: int(e), re.findall('\\d+', colors[i]))
        values = list(values)
        colors[i] = (values[0] / 255, values[1] / 255, values[2] / 255)

    meshes = []

    i = 0
    for label in layers:
        color = colors[i]
        meshes.append(Mesh.from_voxel_grid(layers[label], colors=color))
        i += 1

    show(meshes)
コード例 #2
0
ファイル: render_mesh.py プロジェクト: ywcmaike/simple-3dviz
    dphi, dtheta = np.pi / 250.0, np.pi / 250.0
    [phi, theta] = np.mgrid[0:np.pi + dphi * 1.5:dphi,
                            0:2 * np.pi + dtheta * 1.5:dtheta]
    m0 = 4
    m1 = 3
    m2 = 2
    m3 = 3
    m4 = 6
    m5 = 2
    m6 = 6
    m7 = 4
    r = np.sin(m0 * phi)**m1 + np.cos(m2 * phi)**m3
    r = r + np.sin(m4 * theta)**m5 + np.cos(m6 * theta)**m7
    x = r * np.sin(phi) * np.cos(theta)
    y = r * np.cos(phi)
    z = r * np.sin(phi) * np.sin(theta)
    m = Mesh.from_xyz(x, y, z)
    show(m,
         camera_position=(0, 2.32, 2.47),
         up_vector=(-0.7265758, -0.35081482, 0.12523057),
         behaviours=[SnapshotOnKey()],
         size=(256, 256))

    # It is also possible to load a mesh with a colormap
    m = Mesh.from_xyz(x, y, z, colormap=plt.cm.jet)
    show(m,
         camera_position=(0, 2.32, 2.47),
         up_vector=(-0.7265758, -0.35081482, 0.12523057),
         behaviours=[SnapshotOnKey()],
         size=(256, 256))
コード例 #3
0
ファイル: skeleton.py プロジェクト: ywcmaike/simple-3dviz
        [-0.08633196, -0.02354074, 0.02689737],
        [0.07980453, 0.08336258, -0.03348995],
        [0.00127693, 0.08648632, -0.06349777],
        [-0.00372324, 0.06101485, 0.05373947],
        [0.02931256, 0.13969943, 0.04758224],
        [-0.07085561, 0.1854981, -0.01251608],
        [-0.08501478, 0.08730197, -0.03007746],
        [-0.2747325, 0.11887977, -0.02393699],
        [-0.18173946, 0.19762556, -0.04584916],
        [-0.00075296, 0.44299334, -0.00646337],
        [0.00776821, 0.36928397, -0.01387711],
        [0.00775032, 0.27719203, -0.02083887],
        [-0.07311475, 0.2800464, -0.05349565],
        [0.09813186, 0.2514349, -0.05162171],
        [0.06995263, 0.22113597, 0.02499798],
        [0.2917657, 0.11937696, 0.00506067],
        [0.21196616, 0.17858423, -0.02221516],
    ])

    s = Spherecloud(points, colors=(0.8, 0.2, 0.4), sizes=0.03)
    l = Lines(points, (0.1, 0.1, 0.1), width=0.02)
    show([s, l],
         up_vector=(0, 1.0, 0),
         camera_position=(0, 0, 1.5),
         size=(256, 256),
         behaviours=[
             CameraTrajectory(Circle((0, 0, 0), (0, 0.0, 1.5), (0, 1.0, 0)),
                              speed=0.01),
             LightToCamera()
         ])
コード例 #4
0
    link = abs(x - y) + abs(y - z) + abs(z - x) <= 2
    
    # Combine the objects into a single boolean array
    voxels = cube1 | cube2 | link
    
    # Set the colors of each object
    colors = np.empty(voxels.shape + (3,), dtype=np.float32)
    colors[link] = (1, 0, 0)
    colors[cube1] = (0, 0, 1)
    colors[cube2] = (0, 1, 0)

    show(
        Mesh.from_voxel_grid(voxels=voxels, colors=colors),
        light=(-1, -1, 1),
        behaviours=[
            CameraTrajectory(
                Circle(center=(0, 0, 0), point=(2, -1, 0), normal=(0, 0, -1)),
                speed=0.004)
        ]
    )

    # Render scene to file
    # render(
    #     Mesh.from_voxel_grid(voxels=voxels, colors=colors),
    #     n_frames=256,
    #     light=(-1, -1, 1),
    #     behaviours=[
    #         CameraTrajectory(
    #             Circle(center=(0, 0, 0), point=(2, -1, 0), normal=(0, 0, -1)),
    #             speed=0.004
    #         ),
コード例 #5
0
ファイル: movements.py プロジェクト: ywcmaike/simple-3dviz
from simple_3dviz.behaviours.movements import CameraTrajectory, LightTrajectory
from simple_3dviz.behaviours.trajectory import Circle, Repeat, BackAndForth, \
    Lines, QuadraticBezierCurves
from simple_3dviz.window import show

if __name__ == "__main__":
    # Make a random point cloud
    centers = np.random.randn(30, 3)
    colors = np.array([[1., 0, 0, 1], [0, 1, 1,
                                       1]])[np.random.randint(0, 2, size=30)]
    sizes = np.ones(30) * 0.2

    # Move in a circle around the points
    show(Spherecloud(centers, colors, sizes),
         behaviours=[
             CameraTrajectory(Circle([0, 0, 3], [3, 3, 3], [0, 0, 1]),
                              speed=0.001),
             LightToCamera(offset=[-1, -1, 0])
         ])

    # Move in an endless square
    show(Spherecloud(centers, colors, sizes),
         behaviours=[
             CameraTrajectory(Repeat(
                 Lines([-4, -4, 1], [-4, 4, 1], [4, 4, 1], [4, -4, 1],
                       [-4, -4, 1])),
                              speed=0.001),
             LightToCamera(offset=[-1, -1, 0])
         ])

    # Move back and forth on a line
    show(Spherecloud(centers, colors, sizes),
コード例 #6
0
def sign_unsigned(fname='./data/elephant.pwn',
                  K=5,
                  ndisc=50,
                  debug_viz=True,
                  R=15,
                  cache_sign=False,
                  use_sign_cache=False):
    points, tetvertices, trivertices, is_in_band, epsilon, dist = section1(
        fname, K, ndisc)
    print('Chosen Epsilon:', epsilon.item())

    # coarse sign estimate
    print('sign estimate...')
    v2v = get_v2v(tetvertices.numpy())
    outside_ixes = torch.arange(len(dist))[dist >= epsilon]
    is_in_band = dist < epsilon

    if use_sign_cache:
        print('using sign cache...')
        with open('sign_cache.json', 'r') as reader:
            guesses = json.load(reader, parse_int=int)
            guesses = {int(k): v for k, v in guesses.items()}
    else:
        guesses = {}
        for ix in tqdm(outside_ixes.numpy()):
            guesses[ix] = estimate_sign(ix.item(), points.numpy(), v2v,
                                        is_in_band.numpy(), R)
    if cache_sign:
        print('caching sign...')
        guesses = {int(k): [int(v) for v in vec] for k, vec in guesses.items()}
        with open('sign_cache.json', 'w') as writer:
            json.dump(guesses, writer)

    print(guesses)

    # cross-sections of the sign plot
    alldata = []
    for v, changes in guesses.items():
        coords = points[v]
        vals = [val % 2 for val in changes if val > 0]
        if len(vals) > 0:
            sign = 1 if np.mean(vals) > 0.5 else 0
            alldata.append(
                [coords[0], coords[1], coords[2],
                 np.mean(vals), sign])
    alldata = np.array(alldata)

    spec = np.sort(np.unique(alldata[:, 2]))
    for spei, spe in enumerate(spec):
        kept = alldata[:, 2] == spe

        fig = plt.figure()
        gs = mpl.gridspec.GridSpec(1, 2)

        ax = plt.subplot(gs[0, 0])
        plt.scatter(alldata[kept, 0],
                    alldata[kept, 1],
                    c=alldata[kept, 3],
                    vmin=0,
                    vmax=1,
                    cmap='winter')
        ax.set_aspect('equal')
        plt.xlim((np.min(alldata[:, 0]), np.max(alldata[:, 0])))
        plt.ylim((np.min(alldata[:, 1]), np.max(alldata[:, 1])))

        ax = plt.subplot(gs[0, 1])
        plt.scatter(alldata[kept, 0],
                    alldata[kept, 1],
                    c=alldata[kept, 4],
                    vmin=0,
                    vmax=1,
                    cmap='winter')
        ax.set_aspect('equal')
        plt.xlim((np.min(alldata[:, 0]), np.max(alldata[:, 0])))
        plt.ylim((np.min(alldata[:, 1]), np.max(alldata[:, 1])))

        imname = f'out{spei:04}.jpg'
        print('saving', imname)
        plt.savefig(imname)
        plt.close(fig)

    # visualize coarse mesh
    kept = (dist[trivertices[:, 0]] < epsilon) & (dist[
        trivertices[:, 1]] < epsilon) & (dist[trivertices[:, 2]] < epsilon)
    m = Mesh.from_faces(points.numpy(),
                        trivertices.numpy()[kept],
                        colors=np.ones((len(points), 3)) * [1.0, 0.0, 0.0])
    show(m)
コード例 #7
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and visualize the recovered parts")
    parser.add_argument(
        "config_file",
        help="Path to the file that contains the experiment configuration")
    parser.add_argument("--output_directory",
                        default="../demo/output/",
                        help="Save the output files in that directory")
    parser.add_argument(
        "--weight_file",
        default=None,
        help=("The path to a previously trained model to continue"
              " the training from"))
    parser.add_argument("--prediction_file",
                        default=None,
                        help="The path to the predicted primitives")
    parser.add_argument("--background",
                        type=lambda x: list(map(float, x.split(","))),
                        default="1,1,1,1",
                        help="Set the background of the scene")
    parser.add_argument("--up_vector",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,1",
                        help="Up vector of the scene")
    parser.add_argument("--camera_target",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,0",
                        help="Set the target for the camera")
    parser.add_argument("--camera_position",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="-2.0,-2.0,-2.0",
                        help="Camera position in the scene")
    parser.add_argument("--window_size",
                        type=lambda x: tuple(map(int, x.split(","))),
                        default="512,512",
                        help="Define the size of the scene and the window")
    parser.add_argument("--with_rotating_camera",
                        action="store_true",
                        help="Use a camera rotating around the object")
    parser.add_argument("--mesh",
                        action="store_true",
                        help="Visualize the target mesh")
    parser.add_argument("--n_vertices",
                        type=int,
                        default=10000,
                        help="How many vertices to use per part")

    add_dataset_parameters(parser)
    args = parser.parse_args(argv)

    if torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    print("Running code on", device)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    config = load_config(args.config_file)
    # Extract the number of primitives
    n_primitives = config["network"]["n_primitives"]

    # Dictionary to keep the predictions used for the evaluation
    predictions = {}

    if args.prediction_file is None:
        # Create a dataset instance to generate the input samples
        dataset_directory = config["data"]["dataset_directory"]
        dataset_type = config["data"]["dataset_type"]
        train_test_splits_file = config["data"]["splits_file"]
        dataset = dataset_factory(
            config["data"]["dataset_factory"],
            (ModelCollectionBuilder(config).with_dataset(dataset_type).
             filter_category_tags(args.category_tags).filter_tags(
                 args.model_tags).random_subset(
                     args.random_subset).build(dataset_directory)),
        )
        assert len(dataset) == 1

        # Build the network architecture to be used for training
        network, _, _ = build_network(config, args.weight_file, device=device)
        network.eval()

        # Create the prediction input
        with torch.no_grad():
            for sample in dataset:
                sample = [s[None] for s in sample]  # make a batch dimension
                X = sample[0].to(device)
                targets = [yi.to(device) for yi in sample[1:]]
                F = network.compute_features(X)
                phi_volume, _ = network.implicit_surface(F, targets[0])
                y_pred, faces = network.points_on_primitives(
                    F,
                    args.n_vertices,
                    random=False,
                    mesh=True,
                    union_surface=False)
            predictions["phi_volume"] = phi_volume
            predictions["y_prim"] = y_pred
    else:
        preds = torch.load(args.prediction_file, map_location="cpu")
        y_pred = preds[4]
        faces = preds[5]
        targets = preds[0]
        predictions["phi_volume"] = preds[1]
        predictions["y_prim"] = y_pred

    # Get the renderables from the deformed vertices and faces
    vertices = y_pred.detach()
    parts = range(n_primitives)
    renderables = [
        Mesh.from_faces(vertices[0, :, i], faces, colors=get_colors(i))
        for i in parts
    ]
    behaviours = [
        SceneInit(
            scene_init(
                load_ground_truth(dataset) if args.mesh else None,
                args.up_vector, args.camera_position, args.camera_target,
                args.background)),
        LightToCamera(),
    ]
    if args.with_rotating_camera:
        behaviours += [
            CameraTrajectory(Circle(args.camera_target, args.camera_position,
                                    args.up_vector),
                             speed=1 / 180)
        ]
        show(renderables,
             size=args.window_size,
             behaviours=behaviours + [SnapshotOnKey()])

    print("Saving renderables to file")
    for i in range(n_primitives):
        m = trimesh.Trimesh(vertices[0, :, i].detach(), faces)
        m.export(os.path.join(args.output_directory,
                              "part_{:03d}.obj".format(i)),
                 file_type="obj")
コード例 #8
0
    reso = []
    for i in (1, 2, 3):
        reso.append(int(first_line[i]))
    wx, wy, wz = reso
    longest_axis = max(reso)

    arr_vox = np.fromfile(args.rawfile, dtype=np.uint8).astype(np.bool)
    arr_vox_3d = np.reshape(arr_vox, (wx, wy, wz), order='F')

    # bug in simple_3dviz, the spacing is not uniform if the voxel array is not uniform
    arr_vox_3d = np.concatenate(
        (arr_vox_3d, np.zeros((longest_axis - wx, wy, wz), dtype=np.bool)),
        axis=0)
    arr_vox_3d = np.concatenate(
        (arr_vox_3d,
         np.zeros((longest_axis, longest_axis - wy, wz), dtype=np.bool)),
        axis=1)
    arr_vox_3d = np.concatenate(
        (arr_vox_3d,
         np.zeros(
             (longest_axis, longest_axis, longest_axis - wz), dtype=np.bool)),
        axis=2)

    half_edge = ((1 / longest_axis) * 0.5, (1 / longest_axis) * 0.5,
                 (1 / longest_axis) * 0.5)

    show(Mesh.from_voxel_grid(voxels=arr_vox_3d,
                              colors=(0.75, 0.75, 0.75),
                              sizes=half_edge),
         behaviours=[LightToCamera()],
         size=(1024, 1024))
コード例 #9
0
    def test_line(self):
        points = np.array([[-0.5, 0.5, -0.5], [0.5, 0.5, 0.5]])
        colors = np.array([[0., 0., 0., 1.], [0., 0., 0., 1.]])

        show(Lines(points, colors, width=0.1))
コード例 #10
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Do the forward pass and estimate a set of primitives")
    parser.add_argument(
        "config_file",
        help="Path to the file that contains the experiment configuration")
    parser.add_argument("output_directory",
                        help="Save the output files in that directory")
    parser.add_argument(
        "--weight_file",
        default=None,
        help="The path to the previously trainined model to be used")
    parser.add_argument("--run_on_gpu", action="store_true", help="Use GPU")
    parser.add_argument(
        "--qos_threshold",
        default=1.0,
        type=float,
        help="Split primitives if predicted qos less than this threshold")
    parser.add_argument(
        "--vol_threshold",
        default=0.0,
        type=float,
        help="Discard primitives with volume smaller than this threshold")
    parser.add_argument(
        "--prob_threshold",
        default=0.0,
        type=float,
        help="Discard primitives with probability smaller than this threshold")
    parser.add_argument("--with_post_processing",
                        action="store_true",
                        help="Remove overlapping primitives")
    parser.add_argument("--mesh",
                        type=load_ground_truth,
                        help="File of ground truth mesh")
    parser.add_argument("--save_frames",
                        help="Path to save the visualization frames to")
    parser.add_argument("--without_screen",
                        action="store_true",
                        help="Perform no screen rendering")
    parser.add_argument("--n_frames",
                        type=int,
                        default=200,
                        help="Number of frames to be rendered")
    parser.add_argument("--background",
                        type=lambda x: list(map(float, x.split(","))),
                        default="0,0,0,1",
                        help="Set the background of the scene")
    parser.add_argument("--up_vector",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,1",
                        help="Up vector of the scene")
    parser.add_argument("--camera_target",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="0,0,0",
                        help="Set the target for the camera")
    parser.add_argument("--camera_position",
                        type=lambda x: tuple(map(float, x.split(","))),
                        default="-2.0,-2.0,-2.0",
                        help="Camer position in the scene")
    parser.add_argument("--max_depth",
                        type=int,
                        default=3,
                        help="Maximum depth to visualize")
    parser.add_argument("--window_size",
                        type=lambda x: tuple(map(int, x.split(","))),
                        default="512,512",
                        help="Define the size of the scene and the window")
    parser.add_argument(
        "--from_fit",
        action="store_true",
        help="Visulize everything based on primitive_params.fit")
    parser.add_argument(
        "--from_flat_partition",
        action="store_true",
        help=("Visulize everything based on primitive_params.space_partition"
              " with a single depth"))
    parser.add_argument("--group_color",
                        action="store_true",
                        help="Color the active prims based on the group")
    parser.add_argument("--with_rotating_camera",
                        action="store_true",
                        help="Use a camera rotating around the object")
    parser.add_argument(
        "--visualize_sharpness",
        action="store_true",
        help="When set visualize the sharpness together with the prediction")

    add_dataset_parameters(parser)
    args = parser.parse_args(argv)

    # Check if output directory exists and if it doesn't create it
    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    if args.run_on_gpu and torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    print("Running code on", device)

    config = load_config(args.config_file)
    dataloader, network = build_dataloader_and_network_from_args(args,
                                                                 config,
                                                                 device=device)

    for sample in dataloader:
        # Do the forward pass and estimate the primitive parameters
        X = sample[0].to(device)
        y_hat = network(X)
        #import matplotlib.pyplot as plt
        #import seaborn as sns
        #import numpy as np
        #f = plt.figure(figsize=(8, 6))
        #sns.barplot(
        #    np.arange(y_hat.n_primitives),
        #    y_hat.sharpness_r.squeeze(0).detach().numpy()[:, 0]
        #)
        #plt.title("Epoch {}".format(args.weight_file.split("/")[-1].split("_")[-1]))
        #plt.ylim([0, 10.5])
        #plt.ylabel("Sharpness")
        #plt.xlabel("Primitive id")
        #plt.savefig("/tmp/sharpness_{:03d}.png".format(
        #    int(args.weight_file.split("/")[-1].split("_")[-1]))
        #)
        #plt.close()

        renderables, active_prims = get_renderables(y_hat, args)
        with open(os.path.join(args.output_directory, "renderables.pkl"),
                  "wb") as f:
            pickle.dump(renderables, f)
        print(active_prims)

        behaviours = [
            SceneInit(
                scene_init(args.mesh, args.up_vector, args.camera_position,
                           args.camera_target, args.background)),
            LightToCamera(),
        ]
        if args.with_rotating_camera:
            behaviours += [
                CameraTrajectory(Circle(args.camera_target,
                                        args.camera_position, args.up_vector),
                                 speed=1 / 180)
            ]
        if args.without_screen:
            behaviours += [
                SaveFrames(args.save_frames, 2),
                SaveGif("/tmp/out.gif", 2)
            ]
            render(renderables,
                   size=args.window_size,
                   behaviours=behaviours,
                   n_frames=args.n_frames)
        else:
            behaviours += [
                SnapshotOnKey(path=args.save_frames, keys={"<ctrl>", "S"})
            ]
            show(renderables, size=args.window_size, behaviours=behaviours)

        # Based on the active primitives report the metrics
        active_primitive_params = \
            get_primitive_parameters_from_indices(y_hat, active_prims, args)
        report_metrics(active_primitive_params, config,
                       config["data"]["dataset_type"], args.model_tags,
                       config["data"]["dataset_directory"])
        if args.with_post_processing:
            indices = get_non_overlapping_primitives(y_hat, active_prims)
        else:
            indices = None
        for i, m in enumerate(sq_meshes(y_hat, indices)):
            m.export(os.path.join(args.output_directory,
                                  "predictions-{}.ply").format(i),
                     file_type="ply")

        if y_hat.space_partition is not None:
            torch.save([y_hat.space_partition, y_hat.fit],
                       os.path.join(args.output_directory,
                                    "space_partition.pkl"))
        if args.visualize_sharpness:
            visualize_sharpness(
                y_hat.sharpness_r.squeeze(0).detach().numpy()[:, 0],
                int(args.weight_file.split("/")[-1].split("_")[-1]))
コード例 #11
0
import numpy as np

from simple_3dviz import Mesh, Lines
from simple_3dviz.window import show


def heart_voxel_grid(N):
    """Create a NxNxN voxel grid with True if the voxel is inside a heart
    object and False otherwise."""
    x = np.linspace(-1.3, 1.3, N)
    y = np.linspace(-1.3, 1.3, N)
    z = np.linspace(-1.3, 1.3, N)
    x, y, z = np.meshgrid(x, y, z)
    return (2 * x**2 + y**2 + z**2 -
            1)**3 - (1 / 10) * x**2 * z**3 - y**2 * z**3 < 0


if __name__ == "__main__":
    voxels = heart_voxel_grid(64)
    m = Mesh.from_voxel_grid(voxels, colors=(0.8, 0, 0))
    l = Lines.from_voxel_grid(voxels, colors=(0, 0, 0.), width=0.001)
    show([l, m])
コード例 #12
0
                CameraTargetTrajectory(
                    BackAndForth(TrajectoryLine(
                        args.camera_target,
                        args.camera_target - np.array(args.up_vector)*5
                    )),
                    speed=0.001
                ),
                CameraTrajectory(
                    BackAndForth(TrajectoryLine(
                        args.camera_position,
                        args.camera_position + d*15
                    )),
                    speed=0.001
                )
            ]

        if args.sorting:
            behaviours.append(SortTriangles())

        # Behaviours do be considered while rendering
        if args.without_screen:
            behaviours += [
                SaveFrames(args.save_frames, 2),
                SaveGif("/tmp/out.gif", 2)
            ]
            render(renderables, size=args.window_size, behaviours=behaviours,
                   n_frames=args.n_frames)
        else:
            show(renderables, size=args.window_size,
                 behaviours=behaviours + [SnapshotOnKey()])
コード例 #13
0
import numpy as np
import matplotlib.pyplot as plt

from simple_3dviz.renderables import Spherecloud
from simple_3dviz.behaviours.keyboard import SnapshotOnKey
from simple_3dviz.window import show

if __name__ == "__main__":
    t = np.linspace(0, 4 * np.pi, 20)
    x = np.sin(2 * t)
    y = np.cos(t)
    z = np.cos(2 * t)
    sizes = (2 + np.sin(t)) * 0.125
    centers = np.stack([x, y, z]).reshape(3, -1).T
    cmap = plt.cm.copper
    colors = cmap(np.random.choice(np.arange(500), centers.shape[0]))
    s = Spherecloud(centers=centers, sizes=sizes, colors=colors)

    from simple_3dviz import Mesh
    m = Mesh.from_file("models/baby_yoda.stl", color=(0.1, 0.8, 0.1))
    m.to_unit_cube()
    show([s, m], camera_position=(-2.8, -2.8, 0.1), size=(512, 512))
コード例 #14
0
from simple_3dviz.renderables import Mesh
from simple_3dviz.behaviours.keyboard import SnapshotOnKey
from simple_3dviz.window import show

if __name__ == "__main__":
    # Number of e1, e2 parameters to be tested
    N = 7
    # SQs shapes
    e2 = np.linspace(0.1, 1.9, N, endpoint=True)
    e1 = np.linspace(0.1, 1.9, N, endpoint=True)
    epsilon_1, epsilon_2 = np.meshgrid(e1, e2)
    epsilons = np.stack([epsilon_1, epsilon_2]).reshape(2, -1).T
    # SQs sizes
    alphas = np.ones((epsilons.shape[0], 3))
    # SQs translations
    s = np.ceil(N*2.5 / 2)
    x = np.linspace(-s, s, N)
    y = np.linspace(-s, s, N)
    z = np.array([0])
    X, Y, Z = np.meshgrid(x, y, z)
    translations = np.stack([X, Y, Z]).reshape(3, -1).T
    # SQs rotations
    rotations = np.eye(3)[np.newaxis] * np.ones((len(epsilons), 1, 1))

    colors = np.array([[1., 0, 0, 1],
                        [0, 1, 1, 1]])[np.random.randint(0, 2, size=epsilons.shape[0])]

    m = Mesh.from_superquadrics(alphas, epsilons, translations, rotations, colors)
    show(m, size=(512,512), light=(0, 0, 3), behaviours=[SnapshotOnKey()])