def show_models(): TRANSITION_TIME = 2 viewer = MeshRenderer() while True: for sample_index in range(SAMPLE_COUNT): try: start = time.perf_counter() end = start + TRANSITION_TIME while time.perf_counter() < end: progress = min( (time.perf_counter() - start) / TRANSITION_TIME, 1.0) if ROTATE_MODEL: viewer.rotation = ( 147 + (sample_index + progress) / SAMPLE_COUNT * 360 * 6, 40) code = torch.tensor(spline(float(sample_index) + progress), dtype=torch.float32, device=device) viewer.set_mesh( sdf_net.get_mesh(code, voxel_resolution=64, sphere_only=False, level=SURFACE_LEVEL)) except KeyboardInterrupt: viewer.stop() return
indices[BATCH_SIZE // 2:] = torch.tensor(np.random.choice( negative_indices, BATCH_SIZE // 2), device=device) sdf_net.zero_grad() predicted_sdf = sdf_net(points[indices, :], latent_code) batch_sdf = sdf[indices] loss = torch.mean(torch.abs(predicted_sdf - batch_sdf)) loss.backward() optimizer.step() if loss.item() < error_targets[image_index]: try: viewer.set_mesh( sdf_net.get_mesh(latent_code[0, :], voxel_resolution=64, raise_on_empty=True)) if save_images: image = viewer.get_image(flip_red_blue=True) cv2.imwrite("images/frame-{:05d}.png".format(image_index), image) image_index += 1 except ValueError: pass step += 1 print('Step {:04d}, Image {:04d}, loss: {:.6f}, target: {:.6f}'.format( step, image_index, loss.item(), error_targets[image_index])) except KeyboardInterrupt: viewer.stop() break