Пример #1
0
        model_gt.tri_kd = 100.0
        model_gt.tri_lift = 10.0
        model_gt.tri_drag = 5.0

        model_gt.contact_ke = 1.0e4
        model_gt.contact_kd = 1000.0
        model_gt.contact_kf = 1000.0
        model_gt.contact_mu = 0.5

        model_gt.particle_radius = 0.01
        model_gt.ground = False
        state_gt = model_gt.state()
        device = "cuda:0"
        target_image = renderer.forward(
            state_gt.q.unsqueeze(0).to(device),
            faces.unsqueeze(0).to(device),
            textures.to(device),
        )
        if args.log:
            imageio.imwrite(
                os.path.join(logdir, "target_image.png"),
                (target_image[0].permute(1, 2, 0).detach().cpu().numpy() * 255).astype(
                    np.uint8
                ),
            )
            np.savetxt(
                os.path.join(logdir, "target_position.txt"),
                target_position.detach().cpu().numpy(),
            )
            os.makedirs(os.path.join(logdir, "gt"), exist_ok=True)
            np.savetxt(
Пример #2
0
    theta1_gt = ret[:, 0]
    theta2_gt = ret[:, 1]

    # Simulation

    x_gt = args.length * theta1_gt.sin()
    y_gt = -args.length * theta1_gt.cos()
    z_gt = torch.zeros_like(x_gt)
    pos_gt = torch.stack((x_gt, y_gt, z_gt), dim=-1)

    imgs_gt = []
    print("Rendering GT images...")
    for i in trange(numsteps):
        _vertices = vertices_gt.clone() + pos_gt[i]
        rgba = renderer.forward(_vertices, faces_gt, textures)
        imgs_gt.append(rgba)

    logdir = Path(args.logdir) / args.expid
    if args.log:
        logdir.mkdir(exist_ok=True)

    if args.log:
        write_imglist_to_gif(imgs_gt,
                             logdir / "gt.gif",
                             imgformat="rgba",
                             verbose=False)

    if args.save_timelapse:
        timelapse = kaolin.visualize.Timelapse(args.logdir)
Пример #3
0
            state_gt = integrator.forward(model_gt, state_gt, sim_dt)
            sim_time += sim_dt

            # render
            if i % render_every == 0 or i == sim_steps - 1:
                # with torch.no_grad():
                device = "cuda:0"
                # print(i, state.rigid_x, state.rigid_r, faces.unsqueeze(0).shape, textures.shape)
                # print(state.rigid_x.shape)
                # v_in = torch.from_numpy(np.asarray(model_gt.shape_geo_src[0].vertices)).float()
                # print(torch.allclose(v_in, vertices))
                vertices_current = get_world_vertices(
                    vertices, state_gt.rigid_r.view(-1), state_gt.rigid_x)
                rgba = renderer.forward(
                    vertices_current.unsqueeze(0).to(device),
                    faces.unsqueeze(0).to(device),
                    textures.to(device),
                )
                imgs_gt.append(rgba)
                positions_gt.append(state_gt.rigid_x)
                logvertices_gt.append(vertices_current.detach().cpu().numpy())

        if args.log:
            write_imglist_to_gif(imgs_gt, os.path.join(logdir, "gt.gif"))
            write_meshes_to_file(logvertices_gt,
                                 faces.detach().cpu().numpy(),
                                 os.path.join(logdir, "vertices_gt"))

    # """
    # Optimize for physical parameters.
    # """
Пример #4
0
        # Add gravity
        gravity = ConstantForce(
            magnitude=10.0,
            direction=torch.tensor([0, -1, 0]),
            device=device,
        )
        body_gt.add_external_force(gravity)

        sim_gt = Simulator([body_gt])

        # 2 seconds; 30 fps
        imgs_gt = []
        with torch.no_grad():
            for t in range(sim_steps):
                sim_gt.step()
                rgba = renderer.forward(
                    body_gt.get_world_vertices().unsqueeze(0), faces, textures)
                imgs_gt.append(rgba)

        masses_est = torch.nn.Parameter(
            (0.2) * torch.ones_like(masses_gt),
            requires_grad=True,
        )
        massmodel = MassModel(
            masses_est,
            uniform_density=True,
            minmass=1e-9,
            maxmass=1e9,
        )
        massmodel.to(device)

        # optimizer = torch.optim.Adam(massmodel.parameters(), lr=1)
Пример #5
0
    # Convert to Cartesian coordinates of the two bob positions
    x1 = double_pendulum_gt.length1 * theta1_gt.sin()
    y1 = -double_pendulum_gt.length1 * theta1_gt.cos()
    x2 = x1 + double_pendulum_gt.length2 * theta2_gt.sin()
    y2 = y1 - double_pendulum_gt.length2 * theta2_gt.cos()

    # GT positions of the first bob
    pos1_gt = torch.stack((x1, y1, torch.zeros_like(x1)), dim=-1)
    pos2_gt = torch.stack((x2, y2, torch.zeros_like(x2)), dim=-1)

    imgs1_gt = []
    imgs2_gt = []
    print("Rendering GT images...")
    for i in trange(numsteps):
        _vertices = vertices_gt.clone() + pos1_gt[i]
        rgba1 = renderer.forward(_vertices, faces, textures_red)
        imgs1_gt.append(rgba1)
        _vertices = vertices_gt.clone() + pos2_gt[i]
        rgba2 = renderer.forward(_vertices, faces, textures_blue)
        imgs2_gt.append(rgba2)

    if args.log:
        imgs_gt = [
            0.5 * (bob1 + bob2) for bob1, bob2 in zip(imgs1_gt, imgs2_gt)
        ]
        write_imglist_to_gif(
            imgs_gt,
            logdir / "gt.gif",
            imgformat="rgba",
            verbose=False,
        )
    # Create a 'model' (an nn.Module) that wraps around the vertices, making it 'optimizable'.
    # TODO: Replace with a torch optimizer that takes vertices as a 'params' argument.
    # Deform the vertices slightly.
    model = Model(textures).cuda()
    # renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth)
    optimizer = torch.optim.Adam(model.parameters(), 1.0, betas=(0.5, 0.99))
    renderer.set_eye_from_angles(camera_distance, elevation, azimuth)
    mseloss = torch.nn.MSELoss()

    # Perform texture optimization.
    if not args.no_viz:
        writer = imageio.get_writer(progressfile, mode="I")
    for i in trange(args.iters):
        optimizer.zero_grad()
        textures = model()
        rgba = renderer.forward(vertices, faces, textures)
        loss = mseloss(rgba, img_target)
        loss.backward()
        optimizer.step()
        if i % 5 == 0:
            # TODO: Add functionality to write to gif output file.
            tqdm.write(f"Loss: {loss.item():.5}")
            if not args.no_viz:
                img = rgba[0].permute(1, 2, 0).detach().cpu().numpy()
                writer.append_data((255 * img).astype(np.uint8))
    if not args.no_viz:
        writer.close()

        # Write optimized mesh to output file.
        writer = imageio.get_writer(outfile, mode="I")
        for azimuth in trange(0, 360, 6):
    # Create a 'model' (an nn.Module) that wraps around the vertices, making it 'optimizable'.
    # TODO: Replace with a torch optimizer that takes vertices as a 'params' argument.
    # Deform the vertices slightly.
    model = Model(vertices.clone()).cuda()
    renderer.set_eye_from_angles(camera_distance, elevation, azimuth)
    optimizer = torch.optim.Adam(model.parameters(), 0.01, betas=(0.5, 0.99))
    mseloss = torch.nn.MSELoss()

    # Perform vertex optimization.
    if not args.no_viz:
        writer = imageio.get_writer(progressfile, mode="I")
    for i in trange(args.iters):
        optimizer.zero_grad()
        new_vertices = model()
        rgba = renderer.forward(new_vertices, faces, textures)
        loss = mseloss(rgba, img_target)
        loss.backward()
        optimizer.step()
        if i % 20 == 0:
            # TODO: Add functionality to write to gif output file.
            tqdm.write(f"Loss: {loss.item():.5}")
            if not args.no_viz:
                img = rgba[0].permute(1, 2, 0).detach().cpu().numpy()
                writer.append_data((255 * img).astype(np.uint8))
    if not args.no_viz:
        writer.close()

        # Write optimized mesh to output file.
        writer = imageio.get_writer(outfile, mode="I")
        for azimuth in trange(0, 360, 6):
Пример #8
0
                phases[p] = math.sin(20.0 * (sim_time + 0.5 * p * math.pi))

            model.tri_activations = network(phases) * activation_strength
            state = integrator.forward(model, state, sim_dt)

            sim_time += sim_dt

            # if (render and (i%sim_substeps == 0)):
            #     render_time += sim_dt*sim_substeps
            # renderer.update(state, render_time)
            if i % render_every == 0 or i == sim_steps - 1:
                # with torch.no_grad():
                device = "cuda:0"
                rgba = renderer.forward(
                    state.q.unsqueeze(0).to(device),
                    faces.unsqueeze(0).to(device),
                    textures.to(device),
                )
                imgs.append(rgba)

            com_pos = torch.mean(state.q, 0)
            com_vel = torch.mean(state.u, 0)
            """
            TODO: Apart from the model.tri_activation variable, no other
            term (not even state.q.mean(0) or state.com.mean(0)) seems to
            affect the loss function. Look into this.
            """

            if args.method == "physics-only":
                # use integral of velocity over course of the run
                loss = (loss - com_vel[0] +
Пример #9
0
    logdir = Path("cache/bounce2d") / args.expid
    if args.log:
        logdir.mkdir(exist_ok=True)

    traj_gt = []
    imgs_gt = []
    print("Rendering GT images...")
    for t in trange(args.simsteps):
        ball2d_gt.step(args.dtime)
        # traj.append((ball2d.position_cur[0].item(), ball2d.position_cur[1].item()))
        traj_gt.append(ball2d_gt.position_cur)
        pos = torch.zeros(3, dtype=vertices_gt.dtype, device=device)
        pos[0] = ball2d_gt.position_cur[0]
        pos[1] = ball2d_gt.position_cur[1]
        _vertices = vertices_gt.clone() + pos
        imgs_gt.append(renderer.forward(_vertices, faces, textures_red))

    if args.log:
        write_imglist_to_gif(
            imgs_gt,
            logdir / "gt.gif",
            imgformat="rgba",
            verbose=False,
        )

    # Parameters to estimate
    speed_est = torch.nn.Parameter(
        torch.tensor([3.0], device=device, requires_grad=True))
    speedmodel = SimpleModel(speed_est).to(device)

    gravity_est = torch.nn.Parameter(