Esempio n. 1
0
def make_rigid_body(sample,
                    init_pos,
                    orientation,
                    linear_velocity,
                    angular_velocity,
                    device,
                    target_vertices=None):
    # Load a triangle mesh obj file
    if target_vertices is not None:
        tmp_obj_file = '/tmp/obj.obj'
        from blender_process import Process
        p = Process(sample.obj, target_vertices, tmp_obj_file)
        mesh = TriangleMesh.from_obj(tmp_obj_file)
    else:
        mesh = TriangleMesh.from_obj(sample.obj)
    vertices = (meshutils.normalize_vertices(
        mesh.vertices).unsqueeze(0).to(device))
    faces = mesh.faces.unsqueeze(0).to(device)
    textures = torch.cat(
        (
            sample.color[0] / 255 * torch.ones(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            sample.color[1] / 255 * torch.ones(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            sample.color[2] / 255 * torch.ones(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
        ),
        dim=-1,
    )
    # (Uniform) Masses
    masses = (sample.mass / vertices.shape[-2]) * torch.nn.Parameter(
        torch.ones(vertices.shape[-2], dtype=vertices.dtype, device=device),
        requires_grad=True,
    )
    # Body
    body = RigidBody(
        vertices[0],
        position=torch.tensor(init_pos, dtype=torch.float32, device=device),
        masses=masses,
        orientation=torch.tensor(orientation).type(torch.float32).to(device),
        friction_coefficient=sample.fric,
        # linear_velocity=torch.tensor(linear_velocity).type(torch.float32).to(device)
        # angular_veloctiy=torch.tensor(angular_velocity).type(torch.float32).to(device)
    )
    return body, vertices, faces, textures
Esempio n. 2
0
        elevation = 30.0
        azimuth = 0.0
        # Initialize the renderer.
        renderer = SoftRenderer(image_size=image_size,
                                camera_mode=camera_mode,
                                device=device)
        renderer.set_eye_from_angles(camera_distance, elevation, azimuth)

        sim_duration = 2.0  # seconds
        fps = 30  # frames per second
        sim_steps = int((sim_duration * fps) / 2)

        # obj = get_primitive_obj(shape[0])
        obj = get_primitive_obj(INT_TO_PRIMITIVE[shape[0]])
        # print(obj)
        mesh = TriangleMesh.from_obj(obj)
        vertices = ((
            meshutils.normalize_vertices(mesh.vertices)  # + \
            # torch.from_numpy(init_pos[i]).float().unsqueeze(0)
        ).to(device).unsqueeze(0))
        faces = mesh.faces.unsqueeze(0).to(device)
        textures = torch.cat(
            (
                color[0][0] / 255.0 * torch.ones(1,
                                                 faces.shape[1],
                                                 2,
                                                 1,
                                                 dtype=torch.float32,
                                                 device=device),
                color[0][1] / 255.0 * torch.ones(1,
                                                 faces.shape[1],
Esempio n. 3
0
    # Camera settings.
    camera_distance = (
        2.0  # Distance of the camera from the origin (i.e., center of the object)
    )
    elevation = 30.0  # Angle of elevation
    azimuth = 0.0  # Azimuth angle

    # Directory in which sample data is located.
    DATA_DIR = Path(__file__).parent / "sampledata"

    # Directory in which logs (gifs) are saved.
    logdir = Path(__file__).parent / "cache" / "dibr"
    logdir.mkdir(exist_ok=True)

    # Read in the input mesh. TODO: Add filepath as argument.
    mesh = TriangleMesh.from_obj(DATA_DIR / "banana.obj")

    # Output filename to write out a rendered .gif to, showing the progress of optimization.
    progressfile = logdir / "texture_optimization_progress.gif"
    # Output filename to write out a rendered .gif file to, rendering the optimized mesh.
    outfile = logdir / "texture_optimization_output.gif"

    # Extract the vertices, faces, and texture the mesh (currently color with white).
    vertices = mesh.vertices
    faces = mesh.faces
    vertices = vertices[None, :, :].cuda()
    faces = faces[None, :, :].cuda()
    # textures = torch.ones(1, faces.shape[1], 2, 3, dtype=torch.float32, device=device)
    textures = torch.ones(1, vertices.shape[-2], 3, dtype=torch.float32, device=device)

    # Translate the mesh such that its centered at the origin.
    torch.autograd.set_detect_anomaly(True)

    device = "cuda:0"

    logdir = os.path.join(args.logdir, args.expid)
    if args.log:
        os.makedirs(logdir, exist_ok=True)
        with open(os.path.join(logdir, "args.txt"), "w") as f:
            json.dump(args.__dict__, f, indent=2)

    sim_dt = (1.0 / args.physics_engine_rate) / args.sim_substeps
    sim_steps = int(args.sim_duration / sim_dt)
    sim_time = 0.0

    builder_gt = df.sim.ModelBuilder()
    obj = TriangleMesh.from_obj(args.mesh)
    vertices = meshutils.normalize_vertices(obj.vertices).to(device)
    points = vertices.detach().cpu().numpy()
    indices = list(obj.faces.numpy().reshape((-1)))

    mesh = df.sim.Mesh(points, indices)

    pos_gt = (0.0, 4.0, 0.0)
    rot_gt = df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.3)
    # rot_gt = (0.0, 0.0, 0.0, 0.0)
    vel_gt = (0.0, 2.0, 0.0)
    omega_gt = (0.0, 0.0, 0.0)
    scale_gt = (1.0, 1.0, 1.0)
    density_gt = 5.0
    ke_gt = 4900.0
    kd_gt = 15.0
    # Device to run experiments on (MUST be CUDA-enabled, for render to work).
    device = "cuda:0"

    # Seed RNG for repeatability
    torch.manual_seed(args.seed)

    # Initialize the differentiable renderer.
    renderer = SoftRenderer(camera_mode="look_at", device=device)
    camera_distance = 8.0
    elevation = 0.0
    azimuth = 0.0
    renderer.set_eye_from_angles(camera_distance, elevation, azimuth)

    # Load the template sphere mesh (for the bob)
    sphere = TriangleMesh.from_obj(args.template)
    vertices_gt = meshutils.normalize_vertices(
        sphere.vertices.unsqueeze(0), scale_factor=args.radius).to(device)
    faces = sphere.faces.to(device).unsqueeze(0)
    textures_red = torch.cat(
        (
            torch.ones(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.zeros(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.zeros(
                1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
        ),
        dim=-1,
    )
    textures_blue = torch.cat(
Esempio n. 6
0
                             SemiImplicitEulerWithContacts)
from gradsim.forces import ConstantForce
from gradsim.renderutils import SoftRenderer, TriangleMesh
from gradsim.simulator import Simulator
from gradsim.utils import meshutils

if __name__ == "__main__":

    # Device to store tensors on (MUST be CUDA-capable, for renderer to work).
    device = "cuda:0"

    # Output (gif) file path
    outfile = Path("cache/demorestitution.gif")

    # Load a body (from a triangle mesh obj file).
    mesh = TriangleMesh.from_obj(Path("sampledata/cube.obj"))
    # mesh = TriangleMesh.from_obj(Path("sampledata/sphere.obj"))
    # mesh = TriangleMesh.from_obj(Path("sampledata/banana.obj"))
    vertices = meshutils.normalize_vertices(mesh.vertices.unsqueeze(0)).to(device)
    faces = mesh.faces.to(device).unsqueeze(0)
    textures = torch.cat(
        (
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.zeros(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
        ),
        dim=-1,
    )
    masses = torch.nn.Parameter(
        0.1 * torch.ones(vertices.shape[1], dtype=vertices.dtype, device=device),
        requires_grad=True,
    # Camera settings.
    camera_distance = (
        2.0  # Distance of the camera from the origin (i.e., center of the object)
    )
    elevation = 30.0  # Angle of elevation
    azimuth = 0.0  # Azimuth angle

    # Directory in which sample data is located.
    DATA_DIR = Path(__file__).parent / "sampledata"

    # Directory in which logs (gifs) are saved.
    logdir = Path(__file__).parent / "cache" / "dibr"
    logdir.mkdir(exist_ok=True)

    # Read in the input mesh. TODO: Add filepath as argument.
    mesh = TriangleMesh.from_obj(DATA_DIR / "dibr_sphere.obj")

    # Output filename to write out a rendered .gif to, showing the progress of optimization.
    progressfile = logdir / "vertex_optimization_progress.gif"
    # Output filename to write out a rendered .gif file to, rendering the optimized mesh.
    outfile = logdir / "vertex_optimization_output.gif"

    # Extract the vertices, faces, and texture the mesh (currently color with white).
    vertices = mesh.vertices
    faces = mesh.faces
    vertices = vertices[None, :, :].cuda()
    faces = faces[None, :, :].cuda()
    # Initialize all faces to yellow (to color the banana)!
    textures = torch.stack(
        (
            torch.ones(
Esempio n. 8
0
    if args.compare_every >= args.simsteps:
        raise ValueError(
            f"Arg --compare-every cannot be greater than or equal to {args.simsteps}."
        )

    # Seed RNG for repeatability.
    torch.manual_seed(args.seed)

    # We don't need gradients in this experiment
    torch.autograd.set_grad_enabled(False)

    # Device to store tensors on (MUST be CUDA-capable, for renderer to work).
    device = "cuda:0"

    # Load a body (from a triangle mesh obj file).
    mesh = TriangleMesh.from_obj(args.infile)
    vertices = meshutils.normalize_vertices(mesh.vertices.unsqueeze(0)).to(device)
    faces = mesh.faces.to(device).unsqueeze(0)
    textures = torch.cat(
        (
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.zeros(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
        ),
        dim=-1,
    )
    # masses_gt = torch.nn.Parameter(
    #     1 * torch.ones(vertices.shape[1], dtype=vertices.dtype, device=device),
    #     requires_grad=False,
    # )
    masses_gt = torch.nn.Parameter(
Esempio n. 9
0
    args = parser.parse_args()

    if args.compare_every >= args.simsteps:
        raise ValueError(
            f"Arg --compare-every cannot be greater than or equal to {args.simsteps}."
        )

    # Seed RNG for repeatability.
    torch.manual_seed(args.seed)

    # Device to store tensors on (MUST be CUDA-capable, for renderer to work).
    device = "cuda:0"

    # Load a body (from a triangle mesh obj file).
    mesh_gt = TriangleMesh.from_obj(args.infile)
    vertices_gt = meshutils.normalize_vertices(
        mesh_gt.vertices.unsqueeze(0)).to(device)
    faces_gt = mesh_gt.faces.to(device).unsqueeze(0)
    textures_gt = torch.cat(
        (
            torch.ones(
                1, faces_gt.shape[1], 2, 1, dtype=torch.float32,
                device=device),
            torch.ones(
                1, faces_gt.shape[1], 2, 1, dtype=torch.float32,
                device=device),
            torch.zeros(
                1, faces_gt.shape[1], 2, 1, dtype=torch.float32,
                device=device),
        ),
Esempio n. 10
0
def main():

    ROOT_DIR = Path(__file__).parent.resolve()

    parser = argparse.ArgumentParser(description=" DIB-R Example")

    parser.add_argument(
        "--mesh",
        type=str,
        default=ROOT_DIR / "sampledata" / "banana.obj",
        help="Path to the mesh OBJ file",
    )
    parser.add_argument("--use-texture",
                        action="store_true",
                        help="Whether to render a textured mesh.")
    parser.add_argument(
        "--texture",
        type=str,
        default=ROOT_DIR / "sampledata" / "texture.png",
        help="Specifies path to the texture to be used.",
    )
    parser.add_argument(
        "--output_path",
        type=str,
        default=ROOT_DIR / "cache" / "dibr",
        help="Output directory.",
    )

    args = parser.parse_args()

    CAMERA_DISTANCE = 2
    CAMERA_ELEVATION = 30
    MESH_SIZE = 5
    HEIGHT = 256
    WIDTH = 256

    mesh = TriangleMesh.from_obj(args.mesh)
    vertices = mesh.vertices.cuda()
    faces = mesh.faces.long().cuda()

    # Expand such that batch size = 1
    vertices = vertices.unsqueeze(0)

    ###########################
    # Normalize mesh position
    ###########################

    vertices_max = vertices.max()
    vertices_min = vertices.min()
    vertices_middle = (vertices_max + vertices_min) / 2.0
    vertices = (vertices - vertices_middle) * MESH_SIZE

    ###########################
    # Generate vertex color
    ###########################

    if not args.use_texture:
        vert_min = torch.min(vertices, dim=1, keepdims=True)[0]
        vert_max = torch.max(vertices, dim=1, keepdims=True)[0]
        colors = (vertices - vert_min) / (vert_max - vert_min)

    ###########################
    # Generate texture mapping
    ###########################

    if args.use_texture:
        uv = get_spherical_coords_x(vertices[0].cpu().numpy())
        uv = torch.from_numpy(uv).cuda()

        # Expand such that batch size = 1
        uv = uv.unsqueeze(0)

    ###########################
    # Load texture
    ###########################

    if args.use_texture:
        # Load image as numpy array
        texture = np.array(Image.open(args.texture))

        # Convert numpy array to PyTorch tensor
        texture = torch.from_numpy(texture).cuda()

        # Convert from [0, 255] to [0, 1]
        texture = texture.float() / 255.0

        # Convert to NxCxHxW layout
        texture = texture.permute(2, 0, 1).unsqueeze(0)

    ###########################
    # Render
    ###########################

    if args.use_texture:
        renderer_mode = "Lambertian"

    else:
        renderer_mode = "VertexColor"

    renderer = DIBRenderer(HEIGHT, WIDTH, mode=renderer_mode)

    loop = tqdm.tqdm(list(range(0, 360, 4)))
    loop.set_description("Drawing")

    args.output_path.mkdir(exist_ok=True)
    savename = ("rendered_vertexcolor.gif"
                if not args.use_texture else "rendered_texture.gif")
    writer = imageio.get_writer(args.output_path / savename, mode="I")
    for azimuth in loop:
        renderer.set_look_at_parameters([90 - azimuth], [CAMERA_ELEVATION],
                                        [CAMERA_DISTANCE])

        if args.use_texture:
            predictions, _, _ = renderer(points=[vertices,
                                                 faces.long()],
                                         uv_bxpx2=uv,
                                         texture_bx3xthxtw=texture)

        else:
            predictions, _, _ = renderer(points=[vertices,
                                                 faces.long()],
                                         colors_bxpx3=colors)

        image = predictions.detach().cpu().numpy()[0]
        writer.append_data((image * 255).astype(np.uint8))

    writer.close()
Esempio n. 11
0
from gradsim.utils import meshutils

if __name__ == "__main__":

    # Device to store tensors on (MUST be CUDA-capable, for renderer to work).
    device = "cuda:0"

    # Create cache if it does not previously exist
    cache = Path("cache")
    cache.mkdir(exist_ok=True)

    # Output (gif) file path
    outfile = cache / "hellogradsim.gif"

    # Load a body (from a triangle mesh obj file).
    mesh = TriangleMesh.from_obj(Path("sampledata/banana.obj"))
    vertices = meshutils.normalize_vertices(mesh.vertices.unsqueeze(0)).to(device)
    faces = mesh.faces.to(device).unsqueeze(0)
    textures = torch.cat(
        (
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.ones(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
            torch.zeros(1, faces.shape[1], 2, 1, dtype=torch.float32, device=device),
        ),
        dim=-1,
    )
    body = RigidBody(vertices[0])

    # Create a force that applies gravity (g = 10 metres / second^2).
    # gravity = Gravity(device=device)
    gravity = ConstantForce(direction=torch.tensor([0.0, -1.0, 0.0]), device=device)