Example #1
0
    def make_patch(v_ctr, n_ctr):
        idx_i = np.array(kdtree.query_ball_point(v_ctr, ball_radius, p=np.inf))
        good_normals = np.squeeze(n[idx_i] @ n_ctr.reshape([3, 1]) > angle_thresh)
        idx_i = idx_i[good_normals]

        if len(idx_i) < min_pts_per_patch:
            print("Rejecting small patch with %d points" % len(idx_i))
            return

        covered_indices = idx_i[np.linalg.norm(x[idx_i] - v_ctr, axis=1) < r]
        covered[covered_indices] = True

        uv_i = pcu.lloyd_2d(len(idx_i)).astype(np.float32)
        x_i = x[idx_i].astype(np.float32)
        translate_i = -np.mean(x_i, axis=0)

        device = devices[len(patch_xs) % len(devices)]
                
        scale_i = np.array([1.0 / np.max(np.linalg.norm(x_i + translate_i, axis=1))], dtype=np.float32)
        rotate_i, _, _ = np.linalg.svd((x_i + translate_i).T, full_matrices=False)
        transform_i = (torch.from_numpy(translate_i).to(device),
                       torch.from_numpy(scale_i).to(device),
                       torch.from_numpy(rotate_i).to(device))

        x_i = torch.from_numpy((scale_i * (x_i.astype(np.float32) + translate_i)) @ rotate_i).to(device)

        patch_transformations.append(transform_i)
        patch_indexes.append(torch.from_numpy(idx_i))
        patch_uvs.append(torch.tensor(uv_i, device=device, requires_grad=True))
        patch_xs.append(x_i)
        print("Computed patch with %d points" % x_i.shape[0])
def lloyd_sample_im(im, npts):
    uv = pcu.lloyd_2d(npts)
    idx = np.floor(uv * np.array(im.shape)).astype(np.int32)
    h = []
    for i in range(idx.shape[0]):
        h.append(im[idx[i, 0], idx[i, 1]])
    h = np.array(h)
    return np.concatenate([idx / max(*im.shape), h[:, np.newaxis]], axis=-1)
Example #3
0
def main():
    # NOTE: We're doing everything in float32 and numpy defaults to float64, so you need to make sure you cast
    # everything to the right type

    # x is a tensor of shape [n, 3] containing the positions of the points we are trying to fit
    x = torch.from_numpy(load_mesh_by_file_extension(args.mesh_filename)).to(
        args.device)

    # t is a tensor of shape [n, 2] containing a set of nicely distributed samples in the unit square
    t = embed_3d(
        torch.from_numpy(pcu.lloyd_2d(x.shape[0]).astype(np.float32)).to(
            args.device), 0)
    # t_mean = t.sum(0) / t.shape[0]
    # t = t - t_mean

    # The model is a simple fully connected network mapping a 2D parameter point to 3D
    # phi = ParametrizationNet(in_dim=3, out_dim=3, var_dim=4).to(args.device)
    vardim = 3
    phi = InjAugNODE(in_dim=3,
                     out_dim=3,
                     var_dim=vardim,
                     ker_dims=[1024, 1024, 1024, 1024],
                     device="cuda").to(args.device)
    # Eps is 1/lambda and max_iters is the maximum number of Sinkhorn iterations to do
    loss_fun = SinkhornLoss(eps=args.sinkhorn_eps,
                            max_iters=args.max_sinkhorn_iters)
    dummy = torch.ones(x.shape[0], vardim).to(args.device)
    dummy[:, 0:x.shape[1]] = x
    x = dummy

    # Here I'm using the Adam optimizer just as an example, you'll need to replace this with your thing
    optimizer = torch.optim.Adam(phi.parameters(), lr=0.0001)
    # optimizer.add_param_group({"params": phi.augment_part})
    print("Number of Parameters=", count_parameters(phi))

    for epoch in range(1, args.num_epochs + 1):
        optimizer.zero_grad()

        # Do the forward pass of the neural net, evaluating the function at the parametric points
        y = phi(t)

        loss = loss_fun(y.unsqueeze(0), x.unsqueeze(0))

        loss.backward()
        optimizer.step()
        print("Epoch %d, loss = %f" % (epoch, loss.item()))
        # b = x[0:1,:]
        # print(phi.invert(x))

    print(phi.invert(x)[:, -1])
    # print(x)
    torch.save(phi.state_dict(), "../models/phi.pt")
    phi.load_state_dict(torch.load("../models/phi.pt"))
    print("hah", phi(t)[:, 0:3] - phi.event_t(1, t))
    plot_flow(x[:, 0:3], t, phi, 128, t.shape[0] // 100)
    def test_lloyd_relaxation(self):
        import point_cloud_utils as pcu

        # v is a nv by 3 NumPy array of vertices
        # f is an nf by 3 NumPy array of face indexes into v
        v, f, n = pcu.read_obj(os.path.join(self.test_path, "cube_twist.obj"))

        # Generate 1000 points on the mesh with Lloyd's algorithm
        samples = pcu.sample_mesh_lloyd(v, f, 1000)

        # Generate 100 points on the unit square with Lloyd's algorithm
        samples_2d = pcu.lloyd_2d(100)
def main():
    argparser = argparse.ArgumentParser()
    argparser.add_argument("mesh_filename",
                           type=str,
                           help="Point cloud to reconstruct")
    argparser.add_argument("--plot",
                           action="store_true",
                           help="Plot the output when done training")
    argparser.add_argument("--local-epochs",
                           "-nl",
                           type=int,
                           default=128,
                           help="Number of local fitting iterations")
    argparser.add_argument("--global-epochs",
                           "-ng",
                           type=int,
                           default=128,
                           help="Number of global fitting iterations")
    argparser.add_argument("--learning-rate",
                           "-lr",
                           type=float,
                           default=1e-3,
                           help="Step size for gradient descent")
    argparser.add_argument(
        "--device",
        "-d",
        type=str,
        default="cuda",
        help="The device to use when fitting (either 'cpu' or 'cuda')")
    argparser.add_argument(
        "--exact-emd",
        "-e",
        action="store_true",
        help="Use exact optimal transport distance instead of sinkhorn")
    argparser.add_argument("--max-sinkhorn-iters",
                           "-si",
                           type=int,
                           default=32,
                           help="Maximum number of Sinkhorn iterations")
    argparser.add_argument(
        "--sinkhorn-epsilon",
        "-sl",
        type=float,
        default=1e-3,
        help=
        "The reciprocal (1/lambda) of the sinkhorn regularization parameter.")
    argparser.add_argument(
        "--output",
        "-o",
        type=str,
        default="out.pt",
        help=
        "Destination to save the output reconstruction. Note, the file produced by this script "
        "is not a mesh or a point cloud. To construct a dense point cloud, "
        "see export_point_cloud.py.")
    argparser.add_argument(
        "--seed",
        "-s",
        type=int,
        default=-1,
        help="Random seed to use when initializing network weights. "
        "If the seed not positive, a seed is selected at random.")
    argparser.add_argument("--use-best",
                           action="store_true",
                           help="Use the model with the lowest loss")
    argparser.add_argument("--print-every",
                           type=int,
                           default=16,
                           help="Print every N epochs")
    args = argparser.parse_args()

    # We'll populate this dictionary and save it as output
    output_dict = {
        "final_model": None,
        "uv": None,
        "x": None,
        "transform": None,
        "exact_emd": args.exact_emd,
        "global_epochs": args.global_epochs,
        "local_epochs": args.local_epochs,
        "learning_rate": args.learning_rate,
        "device": args.device,
        "sinkhorn_epsilon": args.sinkhorn_epsilon,
        "max_sinkhorn_iters": args.max_sinkhorn_iters,
        "seed": utils.seed_everything(args.seed),
    }

    # Read a point cloud and normals from a file, center it about its mean, and align it along its principle vectors
    x, n = utils.load_point_cloud_by_file_extension(args.mesh_filename,
                                                    compute_normals=True)

    # Center the point cloud about its mean and align about its principle components
    x, transform = transform_pointcloud(x, args.device)

    # Generate an initial set of UV samples in the plane
    uv = torch.tensor(pcu.lloyd_2d(x.shape[0]).astype(np.float32),
                      requires_grad=True,
                      device=args.device)

    # Initialize the model for the surface
    # phi = mlp_ultra_shallow(2, 3, hidden=8192).to(args.device)
    phi = MLP(2, 3).to(args.device)
    # phi = MLPWideAndDeep(2, 3).to(args.device)

    output_dict["uv"] = uv
    output_dict["x"] = x
    output_dict["transform"] = transform

    optimizer = torch.optim.Adam(phi.parameters(), lr=args.learning_rate)
    uv_optimizer = torch.optim.Adam([uv], lr=args.learning_rate)
    sinkhorn_loss = SinkhornLoss(max_iters=args.max_sinkhorn_iters,
                                 return_transport_matrix=True)
    mse_loss = nn.MSELoss()

    # Cache correspondences to plot them later
    pi = None

    # Cache model with the lowest loss if --use-best is passed
    best_model = None
    best_loss = np.inf

    for epoch in range(args.local_epochs):
        optimizer.zero_grad()
        uv_optimizer.zero_grad()

        epoch_start_time = time.time()

        y = phi(uv)

        with torch.no_grad():
            if args.exact_emd:
                M = pairwise_distances(
                    x.unsqueeze(0),
                    y.unsqueeze(0)).squeeze().cpu().squeeze().numpy()
                p = ot.emd(np.ones(x.shape[0]), np.ones(x.shape[0]), M)
                p = torch.from_numpy(p.astype(np.float32)).to(args.device)
            else:
                _, p = sinkhorn_loss(x.unsqueeze(0), y.unsqueeze(0))
            pi = p.squeeze().max(0)[1]

        loss = mse_loss(x[pi].unsqueeze(0), y.unsqueeze(0))

        loss.backward()

        if args.use_best and loss.item() < best_loss:
            best_loss = loss.item()
            best_model = copy.deepcopy(phi.state_dict())

        epoch_end_time = time.time()

        if epoch % args.print_every == 0:
            print("%d/%d: [Loss = %0.5f] [Time = %0.3f]" %
                  (epoch, args.local_epochs, loss.item(),
                   epoch_end_time - epoch_start_time))

        optimizer.step()
        uv_optimizer.step()

    if args.use_best:
        phi.load_state_dict(best_model)

    output_dict["final_model"] = copy.deepcopy(phi.state_dict())

    torch.save(output_dict, args.output)

    if args.plot:
        plot_reconstruction(uv, x, transform, phi, pad=1.0)
        plot_correspondences(phi, uv, x, pi)