def execute(self, x): batch_size = x.shape[0] x = nn.relu(self.fc1(x)) x = nn.relu(self.fc2(x)) # decoder follows NMR centroid = self.fc_centroid(x) * self.centroid_scale bias = self.fc_bias(x) * self.bias_scale bias = bias.view(-1, self.nv, 3) base = self.vertices_base * self.obj_scale sign = nn.sign(base) base = base.abs() base = jt.log(base / (1 - base)) centroid = jt.tanh(centroid[:, None, :]) scale_pos = 1 - centroid scale_neg = centroid + 1 vertices = (base + bias).sigmoid() * sign vertices = nn.relu(vertices) * scale_pos - nn.relu( -vertices) * scale_neg vertices = vertices + centroid vertices = vertices * 0.5 faces = self.faces[None, :, :].repeat(batch_size, 1, 1) return vertices, faces
def execute(self): num = np.random.uniform(0, 360) self.renderer.eye = nr.get_points_from_angles(2.732, 0, num) image, _, _ = self.renderer(self.vertices, self.faces, jt.tanh(self.textures)) loss = jt.sum((image - self.image_ref).sqr()) return loss
def execute(self, x): x = self.fc1(x) x = nn.relu(x) x = self.fc2(x) x = nn.relu(x) x = self.fc3(x) return jt.tanh(x)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'teapot.obj')) parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join(data_dir, 'example4_ref.png')) parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'example4_result.gif')) parser.add_argument('-mr', '--make_reference_image', type=int, default=0) parser.add_argument('-g', '--gpu', type=int, default=0) args = parser.parse_args() if args.make_reference_image: make_reference_image(args.filename_ref, args.filename_obj) model = Model(args.filename_obj, args.filename_ref) optimizer = nn.Adam(model.parameters(), lr=0.1) loop = tqdm.tqdm(range(1000)) for i in loop: loss = model() optimizer.step(loss) images, _, _ = model.renderer(model.vertices, model.faces, jt.tanh(model.textures)) image = images.numpy()[0].transpose(1,2,0) imsave('/tmp/_tmp_%04d.png' % i, image) loop.set_description('Optimizing (loss %.4f)' % loss.data) if loss.data < 70: break make_gif(args.filename_output)
def make_reference_image(filename_ref, filename_obj): model = Model(filename_obj) model.renderer.eye = nr.get_points_from_angles(2.732, 30, -15) images, _, _ = model.renderer.render(model.vertices, model.faces, jt.tanh(model.textures)) image = images.numpy()[0] imsave(filename_ref, image)
def execute(self, batch_size): base = jt.log(self.vertices.abs() / (1 - self.vertices.abs())) centroid = jt.tanh(self.center) vertices = (base + self.displace).sigmoid() * nn.sign(self.vertices) vertices = nn.relu(vertices) * (1 - centroid) - nn.relu(-vertices) * (centroid + 1) vertices = vertices + centroid # apply Laplacian and flatten geometry constraints laplacian_loss = self.laplacian_loss(vertices).mean() flatten_loss = self.flatten_loss(vertices).mean() return jr.Mesh(vertices.repeat(batch_size, 1, 1), self.faces.repeat(batch_size, 1, 1), dr_type='n3mr'), laplacian_loss, flatten_loss
def main(): parser = argparse.ArgumentParser() parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'obj/spot/spot_triangulated.obj')) parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join(data_dir, 'ref/ref_texture.png')) parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'results/output_optim_textures')) parser.add_argument('-g', '--gpu', type=int, default=0) args = parser.parse_args() os.makedirs(args.filename_output, exist_ok=True) model = Model(args.filename_obj, args.filename_ref) optimizer = nn.Adam([model.textures], lr=0.1, betas=(0.5, 0.999)) loop = tqdm.tqdm(range(300)) for num in loop: loop.set_description('Optimizing') loss = model() optimizer.step(loss) # draw object loop = tqdm.tqdm(range(0, 360, 4)) for num, azimuth in enumerate(loop): loop.set_description('Drawing') model.renderer.transform.set_eyes_from_angles(2.732, 0, azimuth) images = model.renderer(model.vertices, model.faces, jt.tanh(model.textures)) image = images.numpy()[0].transpose((1, 2, 0)) imsave('/tmp/_tmp_%04d.png' % num, image) make_gif(os.path.join(args.filename_output, 'result.gif'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'teapot.obj')) parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join(data_dir, 'example3_ref.png')) parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'example3_result.gif')) parser.add_argument('-g', '--gpu', type=int, default=0) args = parser.parse_args() model = Model(args.filename_obj, args.filename_ref) optimizer = nn.Adam(model.parameters(), lr=0.1, betas=(0.5, 0.999)) loop = tqdm.tqdm(range(300)) for num in loop: loop.set_description('Optimizing') loss = model() optimizer.step(loss) # draw object loop = tqdm.tqdm(range(0, 360, 4)) for num, azimuth in enumerate(loop): loop.set_description('Drawing') model.renderer.eye = nr.get_points_from_angles(2.732, 0, azimuth) images, _, _ = model.renderer(model.vertices, model.faces, jt.tanh(model.textures)) image = images.numpy()[0].transpose((1, 2, 0)) imsave('/tmp/_tmp_%04d.png' % num, image) make_gif(args.filename_output)
def execute(self, x): self.x = x return x * (jt.tanh(nn.softplus(x))) # x * tanh(ln(1 + exp(x)))