def main(): filenames = [filename for filename in sys.argv[1:]] assert len(filenames) == 2 fromfile, tofile = filenames print("Converting '%s' to '%s'" % (fromfile, tofile)) image = imread(fromfile) imwrite(image, tofile)
def main(): filenames = sys.argv[1:] if len(filenames) < 2: print("Se requieren al menos dos ficheros para operar la sustracción.") return 1 files = [(filename, imread(filename)) for filename in filenames] name, left = files.pop(0) for right_name, right in files: left = subtract(left, right) filename_tail = path.split(right_name)[1] name += "-%s" % filename_tail print("Writting to %s" % name) imwrite(left, name)
def main(): import sys filenames = sys.argv[1:] if not filenames: print("No filenames where specified") return 1 pea = PEA() pea.resolution_limit = 0 # no use img_resize pea.unwrapper = unwrap_qg # a better algoritm pea.phase_denoise = 4 for filename in filenames: print("\n%s:" % filename) if "-h" in filename: afix = "-h" elif "-c" in filename: afix = "-c" else: print("Invalid filename, must be on /.*[hc].[.*]/ form") print("Ignoring '%s'" % filename) continue pea.filename_holo = filename # module_filename = filename.replace(afix, "-module") # imwrite(pea.module, module_filename) # phase_filename = filename.replace(afix, "-phase") # imwrite(pea.phase, phase_filename) # phasediff_filename = filename.replace(afix, "-phasediff") # imwrite(wrapped_diff(pea.phase), phasediff_filename) uphase_filename = filename.replace(afix, "-unwraped phase qg") imwrite(pea.unwrapped_phase, uphase_filename) return 0
def forward(ctx, seed, *args): # Unpack arguments current_index = 0 num_materials = args[current_index] current_index += 1 num_shapes = args[current_index] current_index += 1 num_lights = args[current_index] current_index += 1 cam_to_world = args[current_index] current_index += 1 world_to_cam = args[current_index] current_index += 1 sample_to_cam = args[current_index] current_index += 1 cam_to_sample = args[current_index] current_index += 1 fov_factor = args[current_index] current_index += 1 aspect_ratio = args[current_index] current_index += 1 clip_near = args[current_index] current_index += 1 fisheye = args[current_index] current_index += 1 diffuse_reflectance_list = [] specular_reflectance_list = [] roughness_list = [] diffuse_uv_scale_list = [] specular_uv_scale_list = [] roughness_uv_scale_list = [] two_sided_list = [] for i in range(num_materials): diffuse_reflectance_list.append(args[current_index]) current_index += 1 specular_reflectance_list.append(args[current_index]) current_index += 1 roughness_list.append(args[current_index]) current_index += 1 diffuse_uv_scale_list.append(args[current_index]) current_index += 1 specular_uv_scale_list.append(args[current_index]) current_index += 1 roughness_uv_scale_list.append(args[current_index]) current_index += 1 two_sided_list.append(args[current_index]) current_index += 1 vertices_list = [] indices_list = [] uvs_list = [] normals_list = [] material_id_list = [] for i in range(num_shapes): vertices_list.append(args[current_index]) current_index += 1 indices_list.append(args[current_index]) current_index += 1 uvs_list.append(args[current_index]) current_index += 1 normals_list.append(args[current_index]) current_index += 1 material_id_list.append(args[current_index]) current_index += 1 light_shape_id_list = [] light_intensity_list = [] for i in range(num_lights): light_shape_id_list.append(args[current_index]) current_index += 1 light_intensity_list.append(args[current_index]) current_index += 1 resolution = args[current_index] current_index += 1 num_samples = args[current_index] current_index += 1 max_bounces = args[current_index] current_index += 1 cam = delta_ray.Camera(cam_to_world.data.numpy(), world_to_cam.data.numpy(), sample_to_cam.data.numpy(), cam_to_sample.data.numpy(), fov_factor, aspect_ratio, clip_near, fisheye) materials = [] for diffuse_reflectance, specular_reflectance, roughness, \ diffuse_uv_scale, specular_uv_scale, roughness_uv_scale, two_sided in \ zip(diffuse_reflectance_list, specular_reflectance_list, roughness_list, diffuse_uv_scale_list, specular_uv_scale_list, roughness_uv_scale_list, two_sided_list): materials.append(delta_ray.Material(\ diffuse_reflectance.data.numpy(), specular_reflectance.data.numpy(), roughness.data.numpy(), diffuse_uv_scale.data.numpy(), specular_uv_scale.data.numpy(), roughness_uv_scale.data.numpy(), two_sided)) shapes = [] for vertices, indices, uvs, normals, material_id in \ zip(vertices_list, indices_list, uvs_list, normals_list, material_id_list): mat = materials[material_id] if uvs is not None: uvs = uvs.numpy() if normals is not None: normals = normals.data.numpy() shapes.append(delta_ray.Shape(\ vertices.data.numpy(), indices.data.numpy(), uvs, normals, mat, None)) lights = [] for light_shape_id, light_intensity in zip(light_shape_id_list, light_intensity_list): light_mesh = shapes[light_shape_id] light = delta_ray.Light(light_mesh, light_intensity.data.numpy()) light_mesh.light = light lights.append(light) # d_img = np.ones([resolution[1], resolution[0], 3], dtype=np.float32) d_img = np.array(0.0, dtype=np.float32) print('forward pass') result = \ delta_ray.render(cam, shapes, materials, lights, resolution, d_img, num_samples, max_bounces, seed, True) if False: import matplotlib.cm as cm dx = result.dx_image image.imwrite(dx, 'dx.exr') #width = 0.02 #dx = np.clip(dx, -width, width) #dx = (dx + width) / (2.0 * width) #dx = cm.viridis(dx[:, :, 0]) #image.imwrite(dx, 'dx.png') exit() # dy = result.dy_image # print('max(dy):', np.max(dy)) # print('min(dy):', np.min(dy)) # print('sum(dy):', np.sum(dy)) # dy = dy# / np.max(dy) # image.imwrite(dy, 'fwd_dy.exr') # dy = -dy# / np.min(dy) # image.imwrite(dy, 'fwd_inv_dy.exr') # dx = result.dx_image # print('max(dx):', np.max(dx)) # print('min(dx):', np.min(dx)) # print('sum(dx):', np.sum(dx)) # dx = dx# / np.max(dx) # image.imwrite(dx, 'fwd_dx.exr') # dx = -dx# / np.min(dx) # image.imwrite(dx, 'fwd_inv_dx.exr') # exit() ctx.cam = cam ctx.shapes = shapes ctx.materials = materials ctx.lights = lights ctx.resolution = resolution ctx.num_samples = num_samples ctx.max_bounces = max_bounces ctx.seed = seed img = torch.from_numpy(result.image) return img
import transform import torch import torch.optim from torch.autograd import Variable import numpy as np import scipy.ndimage.filters cam, materials, shapes, lights, resolution = \ load_mitsuba.load_mitsuba('test/scenes/room_0/room.xml') args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, num_samples = 625, max_bounces = 1) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/room_0/target.exr') image.imwrite(img.data.numpy(), 'test/results/room_0/target.png') diffuse_reflectance_bases = [] mat_variables = [] # Don't optimize the last 3 materials for mat_id in range(len(materials)): mat = materials[mat_id] d = np.array([0.5, 0.5, 0.5], dtype=np.float32) diffuse_reflectance_bases.append(\ Variable(torch.from_numpy(\ scipy.special.logit(d)), requires_grad = True)) mat_variables.append(diffuse_reflectance_bases[-1]) lgt_variables = [] lgt_intensity_bases = []
def forward(self, input): tanh = nn.Tanh() height_batch = self.preprocess(input) height_batch = height_batch.view(-1, 4 * self.ngf, 4, 4) _4x4 = height_batch _8x8 = self._4_to_8(_4x4) _16x16 = self._8_to_16(_8x8) upsample = nn.Upsample(size=(32, 32), mode='bilinear') height_batch = (tanh(self._16_to_32(_16x16)) + \ upsample(tanh(self._16_to_16(_16x16))) + \ upsample(tanh(self._8_to_8(_8x8))) + \ upsample(tanh(self._4_to_4(_4x4)))) / 4.0 height_batch = height_batch.permute(0, 3, 2, 1) if np.any(np.isnan(height_batch.data.numpy())): print('NANNANNAN') exit() if self.save_heightfield: height_batch_np = height_batch.data.numpy() height_flatten = np.zeros([32 * 8, 32 * 8, 1]) for i in range(8): for j in range(8): img = height_batch_np[8 * i + j, :, :, :] height_flatten[32 * i:32 * (i + 1), 32 * j:32 * (j + 1), :] = img image.imwrite( height_flatten.squeeze(), 'results/heightfield_gan/heightfield_%06d.png' % iteration) output = Variable(torch.zeros([input.shape[0], 1, 32, 32])) for i in range(input.shape[0]): height = torch.stack([\ Variable(torch.from_numpy(np.zeros(heightfield_res, dtype=np.float32))), height_batch[i, :, :, 0], Variable(torch.from_numpy(np.zeros(heightfield_res, dtype=np.float32)))], dim=-1) height = height.view([-1, 3]) shape_plane.vertices = plane_vertices + height if self.save_heightfield: v = shape_plane.vertices.data.numpy() ind = shape_plane.indices.data.numpy() + 1 with open('results/heightfield_gan/model_%06d_%03d.obj' \ % (self.iteration, i), 'w') as f: for vid in range(v.shape[0]): f.write('v %f %f %f\n' % (v[vid, 0], v[vid, 1], v[vid, 2])) for iid in range(ind.shape[0]): f.write('f %d %d %d\n' % (ind[iid, 0], ind[iid, 1], ind[iid, 2])) shape_plane.normals = compute_vertex_normal( shape_plane.vertices, shape_plane.indices) cam = camera.Camera(\ position = Variable(torch.from_numpy(np.array([self.xz[i][0], 3, self.xz[i][1]], dtype=np.float32))), look_at = Variable(torch.from_numpy(np.array([0, 0, 0], dtype=np.float32))), up = Variable(torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))), cam_to_world = None, fov = Variable(torch.from_numpy(np.array([45.0], dtype=np.float32))), clip_near = Variable(torch.from_numpy(np.array([0.01], dtype=np.float32))), clip_far = Variable(torch.from_numpy(np.array([10000.0], dtype=np.float32))), resolution = self.resolution) args = render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,self.resolution,4,1) render = render_pytorch.RenderFunction.apply img = render(random.randint(0, 1048576), *args) img = img.permute([2, 1, 0]) output[i, :, :, :] = img[0, :, :] return output
def backward(ctx, grad_img): cam = ctx.cam shapes = ctx.shapes materials = ctx.materials lights = ctx.lights resolution = ctx.resolution num_samples = ctx.num_samples max_bounces = ctx.max_bounces seed = ctx.seed print('backward pass') result = \ delta_ray.render(cam, shapes, materials, lights, resolution, grad_img.data.numpy(), num_samples, max_bounces, seed, True) if False: image.imwrite(result.image, 'img.exr') n = grad_img.data.numpy().copy() n = n / np.max(n) image.imwrite(n, 'grad_img.exr') n = n / np.min(n) image.imwrite(n, 'inv_grad_img.exr') #dy = result.dy_image #print('max(dy):', np.max(dy)) #print('min(dy):', np.min(dy)) #print('sum(dy):', np.sum(dy)) #dy = dy / np.max(dy) #image.imwrite(dy, 'dy.exr') #dy = dy / np.min(dy) #image.imwrite(dy, 'inv_dy.exr') dx = result.dx_image print('max(dx):', np.max(dx)) print('min(dx):', np.min(dx)) print('sum(dx):', np.sum(dx)) dx = dx# / np.max(dx) image.imwrite(dx, 'dx.exr') dx = -dx# / np.min(dx) image.imwrite(dx, 'inv_dx.exr') exit() ret_list = [] ret_list.append(None) # seed ret_list.append(None) # num_materials ret_list.append(None) # num_shapes ret_list.append(None) # num_lights ret_list.append(Variable(torch.from_numpy(\ result.d_camera.d_cam_to_world))) # cam_to_world ret_list.append(Variable(torch.from_numpy(\ result.d_camera.d_world_to_cam))) # world_to_cam ret_list.append(Variable(torch.from_numpy(\ result.d_camera.d_sample_to_cam))) # sample_to_cam ret_list.append(Variable(torch.from_numpy(\ result.d_camera.d_cam_to_sample))) # cam_to_sample ret_list.append(None) # fov_factor ret_list.append(None) # aspect_ratio ret_list.append(None) # clip_near ret_list.append(None) # fisheye for d_material in result.d_materials: d_diffuse = Variable(torch.from_numpy(d_material.diffuse_reflectance)) d_specular = Variable(torch.from_numpy(d_material.specular_reflectance)) d_roughness = Variable(torch.from_numpy(d_material.roughness)) d_diffuse_uv_scale = Variable(torch.from_numpy(d_material.diffuse_uv_scale)) d_specular_uv_scale = Variable(torch.from_numpy(d_material.specular_uv_scale)) d_roughness_uv_scale = Variable(torch.from_numpy(d_material.roughness_uv_scale)) ret_list.append(d_diffuse) # diffuse_reflection ret_list.append(d_specular) # specular_reflection ret_list.append(d_roughness) # roughness ret_list.append(d_diffuse_uv_scale) ret_list.append(d_specular_uv_scale) ret_list.append(d_roughness_uv_scale) ret_list.append(None) # two-sided for d_shape in result.d_shapes: d_vertices = Variable(torch.from_numpy(d_shape.vertices)) ret_list.append(d_vertices) # vertices ret_list.append(None) # indices ret_list.append(None) # uvs if d_shape.normals.ndim != 2: ret_list.append(None) # normal else: d_normals = Variable(torch.from_numpy(d_shape.normals)) ret_list.append(d_normals) # normal ret_list.append(None) # material id for d_light in result.d_lights: ret_list.append(None) # light shape id # intensity ret_list.append(Variable(torch.from_numpy(d_light.intensity))) ret_list.append(None) # resolution ret_list.append(None) # num_samples ret_list.append(None) # max_bounces return tuple(ret_list)
Variable(torch.from_numpy(np.array([0.15, 0.2, 0.15], dtype=np.float32))) materials[-1].specular_reflectance = \ Variable(torch.from_numpy(np.array([0.8, 0.8, 0.8], dtype=np.float32))) materials[-1].roughness = \ Variable(torch.from_numpy(np.array([0.0001], dtype=np.float32))) args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, num_samples = 256, max_bounces = 2) render = render_pytorch.RenderFunction.apply # img = render(0, *args) # image.imwrite(img.data.numpy(), 'test/results/teapot_specular/target.exr') target = Variable( torch.from_numpy(image.imread('test/results/teapot_specular/target.exr'))) image.imwrite(target.data.numpy(), 'test/results/teapot_specular/target.png') ref_pos = shapes[-1].vertices translation = Variable(torch.from_numpy( np.array([20.0, 0.0, 2.0], dtype=np.float32)), requires_grad=True) shapes[-1].vertices = ref_pos + translation args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, num_samples = 256, max_bounces = 2) # img = render(1, *args) # image.imwrite(img.data.numpy(), 'test/results/teapot_specular/init.png') # diff = torch.abs(target - img) # image.imwrite(diff.data.numpy(), 'test/results/teapot_specular/init_diff.png') optimizer = torch.optim.Adam([translation], lr=0.5)
if iteration % 100 == 0: netG.xz = fixed_xz netG.iteration = iteration netG.save_heightfield = True fake = netG(fixed_noise).data.numpy() netG.save_heightfield = False fake_flatten = np.zeros([32 * 8, 32 * 8, 1]) for i in range(8): for j in range(8): img = fake[8 * i + j, :, :, :].transpose([2, 1, 0]) fake_flatten[32 * i:32 * (i + 1), 32 * j:32 * (j + 1), :] = img if np.any(np.isnan(fake_flatten)): print('NANNANNAN') exit() image.imwrite(fake_flatten.squeeze(), 'results/heightfield_gan/generated_%06d.png' % iteration) #netG.resolution = [256, 256] #netG.x = 0.0 #netG.z = -6.0 #fake = netG(fixed_noise) #netG.resolution = resolution #image.imwrite(fake.data.numpy(), # 'results/heightfield_gan/highres_%06d.exr' % iteration) # render a "real" data #height = generate_heightfield(heightfield_res, # 0.2 * random.random() + 0.5, # 0.5 * random.random() + 0.5, # 0.5 * random.random() + 0.5, # math.pi * random.random(),
import load_mitsuba import render_pytorch import image import transform import torch import torch.optim from torch.autograd import Variable import numpy as np cam, materials, shapes, lights, resolution = \ load_mitsuba.load_mitsuba('results/living-room-3/scene.xml') args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, 64, 32) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'results/test_living_room/living_room.exr') target_luminance = torch.mean(0.212671 * img[:, :, 0] + 0.715160 * img[:, :, 1] + 0.072169 * img[:, :, 2]) print('target_luminance:', target_luminance) light_translation=Variable(torch.from_numpy(\ np.array([0.0,0.0,0.0],dtype=np.float32)), requires_grad=True) light_rotation=Variable(torch.from_numpy(\ np.array([0.0,0.0,0.0], dtype=np.float32)), requires_grad=True) light_vertices = shapes[-1].vertices.clone() optimizer = torch.optim.Adam([light_translation, light_rotation], lr=5e-2) for t in range(100): print('iteration:', t) print('light_translation', light_translation) print('light_rotation', light_rotation)
cam, materials, shapes, lights, resolution = \ load_mitsuba.load_mitsuba('test/scenes/teapot.xml') materials[-1].diffuse_reflectance = \ Variable(torch.from_numpy(np.array([0.3, 0.2, 0.2], dtype=np.float32))) materials[-1].specular_reflectance = \ Variable(torch.from_numpy(np.array([0.6, 0.6, 0.6], dtype=np.float32))) materials[-1].roughness = \ Variable(torch.from_numpy(np.array([0.05], dtype=np.float32))) args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, num_samples = 256, max_bounces = 2) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/teapot_reflectance/target.exr') cam_position = cam.position cam_translation = Variable(torch.from_numpy(\ np.array([-0.1,0.1,-0.1],dtype=np.float32)), requires_grad=True) materials[-1].diffuse_reflectance = \ Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5], dtype=np.float32)), requires_grad = True) materials[-1].specular_reflectance = \ Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5], dtype=np.float32)), requires_grad = True) materials[-1].roughness = \ Variable(torch.from_numpy(np.array([0.2], dtype=np.float32)), requires_grad = True) target = Variable( torch.from_numpy(
light_vertices=Variable(torch.from_numpy(\ np.array([[-0.1,5,6.9],[-0.1,5,7.1],[0.1,5,6.9],[0.1,5,7.1]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,2,1],[1,2,3]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 1) shapes = [shape_floor, shape_light] light_intensity=torch.from_numpy(\ np.array([100,100,100],dtype=np.float32)) light = light.Light(1, light_intensity) lights = [light] args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes, lights, resolution, 256, 1) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/test_glossy/target.exr') exit() light_translation = Variable(torch.from_numpy(\ np.array([-2.0,-0.5,-0.5],dtype=np.float32)), requires_grad=True) optimizer = torch.optim.Adam([light_translation], lr=5e-2) for t in range(200): print('iteration:', t) shape_light.vertices = light_vertices + light_translation args = render_pytorch.RenderFunction.serialize_scene( cam, materials, shapes, lights, resolution, 4, 1) # To apply our Function, we use Function.apply method. We alias this as 'render'. render = render_pytorch.RenderFunction.apply optimizer.zero_grad() # Forward pass: render the image
np.array([10000,10000,10000],dtype=np.float32)) light = light.Light(3, light_intensity) lights = [light] optimizer = torch.optim.Adam([light_rotation], lr=1e-2) for t in range(100): print('iteration:', t) print('light_rotation', light_rotation) light_rotation_matrix = transform.torch_rotate_matrix(light_rotation) shape_light.vertices = light_vertices @ torch.t( light_rotation_matrix) + light_translation args = render_pytorch.RenderFunction.serialize_scene( cam, materials, shapes, lights, resolution, 4, 32) # To apply our Function, we use Function.apply method. We alias this as 'render'. render = render_pytorch.RenderFunction.apply optimizer.zero_grad() # Forward pass: render the image img = render(t, *args) image.imwrite(img.data.numpy(), 'results/test_gi/iter_{}.png'.format(t)) target = Variable( torch.from_numpy(image.imread('results/test_gi/target.exr'))) loss = (img - target).pow(2).sum() print('loss:', loss.data[0]) loss.backward() print('grad:', light_rotation.grad) optimizer.step()
org_target, (1.0 / downscale_factor, 1.0 / downscale_factor, 1.0), order=1) else: downscale_factor = 1 res = 512 target = org_target print('target.shape:', target.shape) x = np.linspace(-1, 1, res) y = np.linspace(-1, 1, res) xv, yv = np.meshgrid(x, y) weight = (xv * xv + yv * yv < 1.0).astype(np.float32) weight = Variable( torch.from_numpy(np.stack([weight, weight, weight], axis=-1))) #image.imwrite(weight, 'weight.exr') #exit() image.imwrite(target, 'test/results/perception_lab/target_{}.exr'.format(scale)) target = Variable(torch.from_numpy(target)) cam_optimizer = torch.optim.Adam(cam_variables, lr=2e-3) mat_optimizer = torch.optim.Adam(mat_variables, lr=2e-3) lgt_optimizer = torch.optim.Adam(lgt_variables, lr=2e-3) base_num_iter = 50 num_iter = base_num_iter if scale == num_scales - 1: num_iter = 200 for t in range(num_iter): print('iteration: ({}, {})'.format(scale, t)) cam_optimizer.zero_grad() mat_optimizer.zero_grad() lgt_optimizer.zero_grad() cam.position = 100 * position_base
shape_triangle = shape.Shape(vertices, indices, None, None, 0) light_vertices=Variable(torch.from_numpy(\ np.array([[-1,-1,-9],[1,-1,-9],[-1,1,-9],[1,1,-9]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,1,2],[1,3,2]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 0) shapes = [shape_triangle, shape_light] light_intensity=torch.from_numpy(\ np.array([30,30,30],dtype=np.float32)) light = light.Light(1, light_intensity) lights = [light] args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,4,1) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_fisheye/target.exr') target = Variable( torch.from_numpy( image.imread('test/results/test_single_triangle_fisheye/target.exr'))) position = Variable(torch.from_numpy( np.array([0.5, -0.5, -3.0], dtype=np.float32)), requires_grad=True) optimizer = torch.optim.Adam([position], lr=5e-2) for t in range(200): print('iteration:', t) # To apply our Function, we use Function.apply method. We alias this as 'render'. cam = camera.Camera(position=position, look_at=look_at, up=up, cam_to_world=None,
optimizer = torch.optim.Adam([bunny_translation, bunny_rotation], lr=1e-2) for t in range(200): print('iteration:', t) optimizer.zero_grad() # Forward pass: render the image bunny_rotation_matrix = transform.torch_rotate_matrix(bunny_rotation) shapes[-1].vertices = \ (bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \ torch.mean(bunny_vertices, 0) + bunny_translation args=render_pytorch.RenderFunction.serialize_scene(\ cam, materials, shapes, lights, resolution, num_samples = 4, max_bounces = 6) img = render(t + 1, *args) image.imwrite(img.data.numpy(), 'test/results/bunny_box/iter_{}.png'.format(t)) dirac = np.zeros([7, 7], dtype=np.float32) dirac[3, 3] = 1.0 dirac = Variable(torch.from_numpy(dirac)) f = np.zeros([3, 3, 7, 7], dtype=np.float32) gf = scipy.ndimage.filters.gaussian_filter(dirac, 1.0) f[0, 0, :, :] = gf f[1, 1, :, :] = gf f[2, 2, :, :] = gf f = Variable(torch.from_numpy(f)) m = torch.nn.AvgPool2d(2) res = 256 diff_0 = (img - target).view(1, res, res, 3).permute(0, 3, 2, 1) diff_1 = m(torch.nn.functional.conv2d(diff_0, f, padding=3)) # 128 x 128
np.array([[-0.1,5,-0.1],[-0.1,5,0.1],[0.1,5,-0.1],[0.1,5,0.1]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,2,1],[1,2,3]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 1) shapes = [shape_floor, shape_blocker, shape_light] light_intensity=torch.from_numpy(\ np.array([1000,1000,1000],dtype=np.float32)) light = light.Light(2, light_intensity) lights = [light] args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes, lights, resolution, 256, 1) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/test_shadow/target.exr') image.imwrite(img.data.numpy(), 'test/results/test_shadow/target.png') target = Variable( torch.from_numpy(image.imread('test/results/test_shadow/target.exr'))) shape_blocker.vertices=Variable(torch.from_numpy(\ np.array([[-0.2,3.5,-0.8],[-0.8,3.0,0.3],[0.4,2.8,-0.8],[0.3,3.2,1.0]],dtype=np.float32)), requires_grad=True) args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes, lights, resolution, 256, 1) img = render(1, *args) image.imwrite(img.data.numpy(), 'test/results/test_shadow/init.png') diff = torch.abs(target - img) image.imwrite(diff.data.numpy(), 'test/results/test_shadow/init_diff.png') optimizer = torch.optim.Adam([shape_blocker.vertices], lr=1e-2)
np.array([[-1,-1,-7],[1,-1,-7],[-1,1,-7],[1,1,-7]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,1,2],[1,3,2]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 0) shapes = [shape_triangle, shape_light] light_intensity=torch.from_numpy(\ np.array([20,20,20],dtype=np.float32)) light = light.Light(1, light_intensity) lights = [light] args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,256,1) # To apply our Function, we use Function.apply method. We alias this as 'render'. render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/target.exr') image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/target.png') target = Variable( torch.from_numpy( image.imread('test/results/test_single_triangle/target.exr'))) shape_triangle.vertices = Variable(torch.from_numpy(\ np.array([[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],dtype=np.float32)), requires_grad=True) args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,16,1) img = render(1, *args) image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/init.png') diff = torch.abs(target - img) image.imwrite(diff.data.numpy(), 'test/results/test_single_triangle/init_diff.png')
def _ImageToFile( self, img ): (filehandle, tmp_file) = tempfile.mkstemp(suffix=".nii",dir=image.tmp_dir) image.imwrite( tmp_file, img ) return tmp_file
look_at = cam_lookat, up = cam_up, cam_to_world = None, fov = cam.fov, clip_near = cam.clip_near, clip_far = cam.clip_far, resolution = (224, 224)) shapes[0].vertices = org_light_pos + light_translation lights[0].intensity = org_intensity0 + intensity0 lights[1].intensity = org_intensity1 + intensity1 args=render_pytorch.RenderFunction.serialize_scene(\ cam_, materials, shapes, lights, cam.resolution, num_samples = 4, max_bounces = 1) img = render(t, *args) image.imwrite(img.data.numpy(), 'test/results/stop_sign/render_%04d.exr' % (t)) nimg = img.permute(2, 1, 0) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) nimg = normalize(nimg) nimg = nimg.unsqueeze(0) vec = m(net(nimg)) tk = torch.topk(vec, 5) loss = vec[0, 919] + vec[0, 920] loss.backward() print('light_translation:', light_translation) print('intensity0:', intensity0) print('intensity1:', intensity1) print('tk:', tk)
resolution=resolution) mat_grey = material.Material(diffuse_reflectance=torch.from_numpy( np.array([0.5, 0.5, 0.5], dtype=np.float32))) mat_black = material.Material(diffuse_reflectance=torch.from_numpy( np.array([0.0, 0.0, 0.0], dtype=np.float32))) materials = [mat_grey, mat_black] # plane_vertices, plane_indices=generate_plane([32, 32]) # shape_plane=shape.Shape(plane_vertices,plane_indices,None,None,0) indices, vertices, uvs, normals = load_obj.load_obj( 'results/heightfield_gan/model.obj') indices = Variable(torch.from_numpy(indices.astype(np.int64))) vertices = Variable(torch.from_numpy(vertices)) normals = compute_vertex_normal(vertices, indices) shape_plane = shape.Shape(vertices, indices, None, normals, 0) light_vertices=Variable(torch.from_numpy(\ np.array([[-0.1,50,-0.1],[-0.1,50,0.1],[0.1,50,-0.1],[0.1,50,0.1]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,2,1],[1,2,3]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 1) shapes = [shape_plane, shape_light] light_intensity=torch.from_numpy(\ np.array([100000,100000,100000],dtype=np.float32)) light = light.Light(1, light_intensity) lights = [light] render = render_pytorch.RenderFunction.apply args = render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,4,1) img = render(random.randint(0, 1048576), *args) image.imwrite(img.data.numpy(), 'results/heightfield_gan/test.exr')
light_vertices=Variable(torch.from_numpy(\ np.array([[-1,-1,-7],[1,-1,-7],[-1,1,-7],[1,1,-7]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,1,2],[1,3,2]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 2) shapes = [shape_tri0, shape_tri1, shape_light] light_intensity=torch.from_numpy(\ np.array([20,20,20],dtype=np.float32)) light = light.Light(2, light_intensity) lights = [light] args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,256,1) render = render_pytorch.RenderFunction.apply img = render(0, *args) image.imwrite(img.data.numpy(), 'test/results/test_two_triangles/target.exr') image.imwrite(img.data.numpy(), 'test/results/test_two_triangles/target.png') shape_tri0.vertices = Variable(torch.from_numpy(\ np.array([[-1.3,1.5,0.1], [1.5,0.7,-0.2], [-0.8,-1.1,0.2]],dtype=np.float32)), requires_grad=True) shape_tri1.vertices = Variable(torch.from_numpy(\ np.array([[-0.5,1.2,1.2], [0.3,1.7,1.0], [0.5,-1.8,1.3]],dtype=np.float32)), requires_grad=True) args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,256,1) img = render(1, *args) image.imwrite(img.data.numpy(), 'test/results/test_two_triangles/init.png') args=render_pytorch.RenderFunction.serialize_scene(\ cam,materials,shapes,lights,resolution,4,1) target = Variable( torch.from_numpy(