import torch import pyredner vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128) m = pyredner.Material(diffuse_reflectance=torch.tensor( (0.5, 0.5, 0.5), device=pyredner.get_device())) obj = pyredner.Object(vertices=vertices, indices=indices, uvs=uvs, normals=normals, material=m) cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640)) scene = pyredner.Scene(objects=[obj], camera=cam) img = pyredner.render_g_buffer( scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal]) uv_img = torch.cat([img[:, :, :2], torch.zeros(480, 640, 1)], dim=2) normal_img = img[:, :, 2:] pyredner.imwrite(uv_img, 'results/test_sphere/uv.png') pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
def parse_material(node, two_sided = False): node_id = None if 'id' in node.attrib: node_id = node.attrib['id'] if node.attrib['type'] == 'diffuse': diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]) diffuse_uv_scale = torch.tensor([1.0, 1.0]) specular_reflectance = torch.tensor([0.0, 0.0, 0.0]) specular_uv_scale = torch.tensor([1.0, 1.0]) roughness = torch.tensor([1.0]) for child in node: if child.attrib['name'] == 'reflectance': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': diffuse_reflectance = pyredner.imread(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'uscale': diffuse_uv_scale[0] = float(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': diffuse_uv_scale[1] = float(grandchild.attrib['value']) elif child.tag == 'rgb' or child.tag == 'spectrum': diffuse_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'specular': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': specular_reflectance = pyredner.imread(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'uscale': specular_uv_scale[0] = float(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': specular_uv_scale[1] = float(grandchild.attrib['value']) elif child.tag == 'rgb' or child.tag == 'spectrum': specular_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'roughness': roughness = torch.tensor([float(child.attrib['value'])]) if pyredner.get_use_gpu(): # Copy to GPU diffuse_reflectance = diffuse_reflectance.cuda() specular_reflectance = specular_reflectance.cuda() roughness = roughness.cuda() return (node_id, pyredner.Material(\ diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale), specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale), roughness = pyredner.Texture(roughness), two_sided = two_sided)) elif node.attrib['type'] == 'roughplastic': diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]) diffuse_uv_scale = torch.tensor([1.0, 1.0]) specular_reflectance = torch.tensor([0.0, 0.0, 0.0]) specular_uv_scale = torch.tensor([1.0, 1.0]) roughness = torch.tensor([1.0]) for child in node: if child.attrib['name'] == 'diffuseReflectance': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': diffuse_reflectance = pyredner.imread(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'uscale': diffuse_uv_scale[0] = float(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': diffuse_uv_scale[1] = float(grandchild.attrib['value']) elif child.tag == 'rgb' or child.tag == 'spectrum': diffuse_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'specularReflectance': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': specular_reflectance = pyredner.imread(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'uscale': specular_uv_scale[0] = float(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': specular_uv_scale[1] = float(grandchild.attrib['value']) elif child.tag == 'rgb' or child.tag == 'spectrum': specular_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'alpha': alpha = float(child.attrib['value']) roughness = torch.tensor([alpha * alpha]) if pyredner.get_use_gpu(): # Copy to GPU diffuse_reflectance = diffuse_reflectance.cuda() specular_reflectance = specular_reflectance.cuda() roughness = roughness.cuda() return (node_id, pyredner.Material(\ diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale), specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale), roughness = pyredner.Texture(roughness), two_sided = two_sided)) elif node.attrib['type'] == 'twosided': ret = parse_material(node[0], True) return (node_id, ret[1]) else: print('Unsupported material type:', node.attrib['type']) assert(False)
# mesh = mesh_list[0][1] shapes = [pyredner.Shape(\ vertices = mesh.vertices, indices = mesh.indices, uvs = mesh.uvs, normals = mesh.normals, material_id = 0)] tex_path = '../tutorials/teapot.png' tex_tensor = pyredner.imread(tex_path) if pyredner.get_use_gpu(): tex_tensor = tex_tensor.cuda(device=pyredner.get_device()) diffuse_reflectance = tex_tensor materials = [pyredner.Material(diffuse_reflectance=diffuse_reflectance)] # Construct the scene. # Don't setup any light sources, only use primary visibility. scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None) # TEST1: render (test forward function) scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 16, max_bounces = 0, channels = [redner.channels.position, redner.channels.shading_normal, redner.channels.diffuse_reflectance]) scene_args = [2] + 2 * scene_args
# Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) cam = pyredner.Camera( position=torch.tensor([0.0, 0.0, -5.0]), look_at=torch.tensor([0.0, 0.0, 0.0]), up=torch.tensor([0.0, 1.0, 0.0]), fov=torch.tensor([45.0]), # in degree clip_near=1e-2, # needs to > 0 resolution=(256, 256), fisheye=False) mat_grey = pyredner.Material(\ diffuse_reflectance = \ torch.tensor([0.4, 0.4, 0.4], device = pyredner.get_device()), specular_reflectance = \ torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()), roughness = \ torch.tensor([0.05], device = pyredner.get_device())) materials = [mat_grey] vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) shape_sphere = pyredner.Shape(\ vertices = vertices, indices = indices, uvs = uvs, normals = normals, material_id = 0) shapes = [shape_sphere]
resolution = (512, 512), fisheye = False) cam3 = pyredner.Camera(position = torch.tensor( [2.5, 0.0, -3.0]) , look_at = torch.tensor([0.0, 0.0, -3.0]), up = torch.tensor([0.0, 1.0, 0.0]), camera_type = redner.CameraType.perspective, fov = torch.tensor([45.0]), clip_near = 1e-2, # needs to > 0 resolution = (512, 512), fisheye = False) for obj in range(1, num_iters): target_obj1 = pyredner.load_obj('results/shadow_art/multitarget/' + step + '/iter_' + str(obj) + '.obj', return_objects=True) target_obj1[0].material = pyredner.Material(diffuse_reflectance=torch.tensor([1.0, 1.0, 1.0]), two_sided=True) target_obj1[0].normals = pyredner.compute_vertex_normal(target_obj1[0].vertices, target_obj1[0].indices) shapes = [] shapes.append(target_obj1[0]) numShapes = len(shapes) shapes.extend(lights) area_lights = [] for i in range(numShapes, len(shapes)): area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity]))) area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10]))) scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
clip_near = 1e-2 resolution = (256, 256) cam = pyredner.Camera(position=position, look_at=look_at, up=up, fov=fov, clip_near=clip_near, resolution=resolution) if pyredner.get_use_gpu(): diffuse = diffuse.cuda() specular = specular.cuda() roughness = roughness.cuda() print(roughness.dim()) mat_perlin = pyredner.Material(\ diffuse_reflectance = diffuse, specular_reflectance = specular, roughness = roughness) mat_black = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device())) materials = [mat_perlin, mat_black] vertices = torch.tensor( [[-1.5, -1.5, 0.0], [-1.5, 1.5, 0.0], [1.5, -1.5, 0.0], [1.5, 1.5, 0.0]], device=pyredner.get_device()) indices = torch.tensor([[0, 1, 2], [1, 3, 2]], dtype=torch.int32, device=pyredner.get_device()) uvs = torch.tensor([[0.05, 0.05], [0.05, 0.95], [0.95, 0.05], [0.95, 0.95]], device=pyredner.get_device()) shape_plane = pyredner.Shape(vertices, indices, uvs, None, 0) light_vertices = torch.tensor([[-1.0, -1.0, -7.0], [1.0, -1.0, -7.0], [-1.0, 1.0, -7.0], [1.0, 1.0, -7.0]],
smooth_losses.append(smooth_loss.data.item()) total_loss.backward() ver_optimizer.step() vertices.data = vertices.data * bound.reshape(-1, 1).expand(-1, 3) + boundary # normals_optimizer.step() # normals.data = normals.data / normals.data.norm(dim=1).reshape(-1, 1).expand(-1, 3) if 1:#smooth_scheme != 'none': # and t > 20:smi for num_of_smooth in range(2): pyredner.smooth(vertices, indices, smooth_lmd, smooth_scheme, bound) pyredner.smooth(vertices, indices, -smooth_lmd, smooth_scheme, bound) print("{:.^10}total_loss:{:.6f}...img_loss:{:.6f}...smooth_loss:{:.6f}".format(t, total_loss, img_loss, smooth_loss)) print() m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5])) obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m) # , colors=colors) pyredner.save_obj(obj, output_path + '/final.obj') if num_iters_2 > 0: for t in range(num_iters_2): total_loss = 0 normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme) for i in range(len(cam_poses)): cam_pos = torch.tensor(cam_poses[i]) dir_light_direction = torch.tensor(dir_light_directions[i % len(dir_light_directions)]) img = model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals) loss = (img - target[i]).pow(2).mean() losses[i].append(loss.data.item()) total_loss += loss
#for _, mesh1 in mesh_list1: # mesh1.normals = pyredner.compute_vertex_normal(mesh1.vertices/1, mesh1.indices) # print (_) for _, mesh2 in mesh_list2: mesh2.normals = pyredner.compute_vertex_normal(mesh2.vertices / 3, mesh2.indices) #for _, mesh3 in mesh_list3: # mesh3.normals = pyredner.compute_vertex_normal(mesh3.vertices/5, mesh3.indices) #diffuse_reflectance_green =torch.tensor([0.0, 1.0, 0.0], device = pyredner.get_device()) diffuse_reflectance_green = torch.tensor([0.65, 0.32, 0.16], device=pyredner.get_device()) mat_green = pyredner.Material(diffuse_reflectance_green) #diffuse_reflectance_red =torch.tensor([1.0, 0.0, 0.0], device = pyredner.get_device()) diffuse_reflectance_red = torch.tensor([0.65, 0.32, 0.16], device=pyredner.get_device()) mat_red = pyredner.Material(diffuse_reflectance_red) #diffuse_reflectance_blue =torch.tensor([0.0, 0.0, 1.0], device = pyredner.get_device()) diffuse_reflectance_blue = torch.tensor([0.65, 0.32, 0.16], device=pyredner.get_device()) mat_blue = pyredner.Material(diffuse_reflectance_blue) #diffuse_reflectance_purple =torch.tensor([0.50, 0.0, 0.50], device = pyredner.get_device()) diffuse_reflectance_purple = torch.tensor([0.65, 0.32, 0.16], device=pyredner.get_device()) mat_purple = pyredner.Material(diffuse_reflectance_purple)
face_target = int(sys.argv[4]) # Load Target model target_objects = pyredner.load_obj(target_obj_file, return_objects=True) print(target_objects[0].vertices.shape) #diffuse = pyredner.imread('resources/wood_diffuse.jpg') #specular = pyredner.imread('resources/wood_specular.jpg') / 100.0 #None #pyredner.imread('resources/checkerboard.png') #normal_map = pyredner.imread('resources/GroundForest003_NRM_3K.jpg', gamma=1.0) #roughness = (1.0 - specular) / 10.0 normal_map = None diffuse = torch.tensor([0.7, 0.7, 0.7]) specular = torch.tensor([0.0, 0.0, 0.0]) roughness = torch.tensor([0.6]) target_objects[0].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular, roughness=roughness, normal_map=normal_map) resolution = (64, 64) num_cameras = 8 radius = float(sys.argv[5]) lightLocs = None camera0 = pyredner.automatic_camera_placement(target_objects, resolution) camLocs = fibonacci_sphere(num_cameras, False) target_scenes = generate_scenes(camLocs, target_objects, None, lightLocs) max_bounces_targets = 4 max_bounces_optim = 2 use_deferred_rendering = False # Render Targets
def load_obj(filename, obj_group=True, is_load_mtl=True): """ Load from a Wavefront obj file as PyTorch tensors. XXX: this is slow, maybe move to C++? """ print('obj:' + filename) vertices_pool = [] uvs_pool = [] normals_pool = [] indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} material_map = {} current_mtllib = {} current_material_name = None mesh_list = [] light_map = {} f = open(filename, 'r') d = os.path.dirname(filename) cwd = os.getcwd() if d != '': os.chdir(d) for line in f: line = line.strip() splitted = re.split('\ +', line) if splitted[0] == 'mtllib' and is_load_mtl: current_mtllib = load_mtl(splitted[1]) elif splitted[0] == 'usemtl': if len(indices) > 0 and obj_group is True: # Flush mesh_list.append((current_material_name, create_mesh(indices, vertices, normals, uvs))) indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} if not is_load_mtl: continue mtl_name = splitted[1] current_material_name = mtl_name if mtl_name not in material_map: m = current_mtllib[mtl_name] if m.map_Kd is None: diffuse_reflectance = torch.tensor( m.Kd, dtype=torch.float32, device=pyredner.get_device()) else: diffuse_reflectance = pyredner.imread(m.map_Kd) if pyredner.get_use_gpu(): diffuse_reflectance = diffuse_reflectance.cuda() if m.map_Ks is None: specular_reflectance = torch.tensor( m.Ks, dtype=torch.float32, device=pyredner.get_device()) else: specular_reflectance = pyredner.imread(m.map_Ks) if pyredner.get_use_gpu(): specular_reflectance = specular_reflectance.cuda() if m.map_Ns is None: roughness = torch.tensor([2.0 / (m.Ns + 2.0)], dtype=torch.float32, device=pyredner.get_device()) else: roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0) if pyredner.get_use_gpu(): roughness = roughness.cuda() if m.Ke != (0.0, 0.0, 0.0): light_map[mtl_name] = torch.tensor(m.Ke, dtype=torch.float32) material_map[mtl_name] = pyredner.Material(\ diffuse_reflectance, specular_reflectance, roughness) elif splitted[0] == 'v': vertices_pool.append( [float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif splitted[0] == 'vt': uvs_pool.append([float(splitted[1]), float(splitted[2])]) elif splitted[0] == 'vn': normals_pool.append( [float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif splitted[0] == 'f': def num_indices(x): return len(re.split('/', x)) def get_index(x, i): return int(re.split('/', x)[i]) def parse_face_index(x, i): f = get_index(x, i) if f < 0: if (i == 0): f += len(vertices) if (i == 1): # f += len(uvs) f = None else: f -= 1 return f assert (len(splitted) <= 5) def get_vertex_id(indices): pi = parse_face_index(indices, 0) uvi = None if (num_indices(indices) > 1 and re.split('/', indices)[1] != ''): uvi = parse_face_index(indices, 1) ni = None if (num_indices(indices) > 2 and re.split('/', indices)[2] != ''): ni = parse_face_index(indices, 2) key = (pi, uvi, ni) if key in vertices_map: return vertices_map[key] vertex_id = len(vertices) vertices_map[key] = vertex_id vertices.append(vertices_pool[pi]) if uvi is not None: uvs.append(uvs_pool[uvi]) if ni is not None: normals.append(normals_pool[ni]) return vertex_id vid0 = get_vertex_id(splitted[1]) vid1 = get_vertex_id(splitted[2]) vid2 = get_vertex_id(splitted[3]) indices.append([vid0, vid1, vid2]) if (len(splitted) == 5): vid3 = get_vertex_id(splitted[4]) indices.append([vid0, vid2, vid3]) mesh_list.append( (current_material_name, create_mesh(indices, vertices, normals, uvs))) if d != '': os.chdir(cwd) return material_map, mesh_list, light_map
def render(self, scene, svbrdf): imgs = [] svbrdf = svbrdf.unsqueeze(0) if len(svbrdf.shape) == 3 else svbrdf sensor_size = (svbrdf.shape[-1], svbrdf.shape[-2]) for svbrdf_single in torch.split(svbrdf, 1, dim=0): normals, diffuse, roughness, specular = utils.unpack_svbrdf( svbrdf_single.squeeze(0)) # Redner expects the normal map to be in range [0, 1] normals = utils.encode_as_unit_interval(normals) # Redner expects the roughness to have one channel only. # We also need to convert from GGX roughness to Blinn-Phong power. # See: https://github.com/iondune/csc473/blob/master/lectures/07-cook-torrance.md roughness = torch.mean(torch.clamp(roughness, min=0.001), dim=0, keepdim=True)**4 # Convert from [c,h,w] to [h,w,c] for redner normals = normals.permute(1, 2, 0) diffuse = diffuse.permute(1, 2, 0) roughness = roughness.permute(1, 2, 0) specular = specular.permute(1, 2, 0) material = pyredner.Material( diffuse_reflectance=pyredner.Texture( diffuse.to(self.redner_device)), specular_reflectance=pyredner.Texture( specular.to(self.redner_device)), roughness=pyredner.Texture(roughness.to(self.redner_device)), normal_map=pyredner.Texture(normals.to(self.redner_device))) material_patch = pyredner.Object(vertices=self.patch_vertices, uvs=self.patch_uvs, indices=self.patch_indices, material=material) # Define the camera parameters (focused at the middle of the patch) and make sure we always have a valid 'up' direction position = np.array(scene.camera.pos) lookat = np.array([0.0, 0.0, 0.0]) cz = lookat - position # Principal axis up = np.array([0.0, 0.0, 1.0]) if np.linalg.norm(np.cross(cz, up)) == 0.0: up = np.array([0.0, 1.0, 0.0]) camera = pyredner.Camera( position=torch.FloatTensor(position).to(self.redner_device), look_at=torch.FloatTensor(lookat).to(self.redner_device), up=torch.FloatTensor(up).to(self.redner_device), fov=torch.FloatTensor([90]), resolution=sensor_size, camera_type=self.camera_type) # # The deferred rendering path. # # It does not have a specular model and therefore is of limited usability for us # full_scene = pyredner.Scene(camera = camera, objects = [material_patch]) # light = pyredner.PointLight(position = torch.tensor(scene.light.pos).to(self.redner_device), # intensity = torch.tensor(scene.light.color).to(self.redner_device)) # img = pyredner.render_deferred(scene = full_scene, lights = [light]) light = pyredner.generate_quad_light( position=torch.Tensor(scene.light.pos).to(self.redner_device), look_at=torch.zeros(3).to(self.redner_device), size=torch.Tensor([0.6, 0.6]).to(self.redner_device), intensity=torch.Tensor(scene.light.color).to( self.redner_device)) full_scene = pyredner.Scene(camera=camera, objects=[material_patch, light]) img = pyredner.render_pathtracing(full_scene, num_samples=(16, 8)) # Transform the rendered image back to something torch can interprete imgs.append(img.permute(2, 0, 1).to(svbrdf.device)) return torch.stack(imgs)
vertices = obj.vertices.detach().clone() v = vertices.clone() ideal_shift = pyredner.smooth(vertices, indices, 0., smooth_scheme, return_shift=True) tex = obj.material.diffuse_reflectance.texels texels = tex.clone() #pyredner.imresize(tex, (200, 200)) ideal_lap = compute_lap(texels) ideal_nab = compute_nab(texels) pyredner.imwrite(ideal_nab.unsqueeze(2).expand(-1, -1, 3), 'nab.png') print('texels size: ', texels.size()) texels.requires_grad = True uvs = obj.uvs uv_indices = obj.uv_indices m = pyredner.Material(diffuse_reflectance=texels, specular_reflectance=torch.tensor([0.05, 0.05, 0.05]), roughness=torch.tensor([0.02])) vertices.requires_grad = True cam_poses, cam_look_ats, resolution = np.load(target_data_path + "env_data.npy", allow_pickle=True) num_views = len(cam_poses) target = [] for i in range(num_views): target.append(pyredner.imread(target_data_path + 'tgt_img{:0>2d}.png'.format(i)).to(pyredner.get_device())) tgt_envmap_img = pyredner.imread(target_data_path + 'env_map.png') envmap_img = (torch.zeros((64, 128, 3), dtype=torch.float32) + 0.5).detach() envmap_img.requires_grad = True envmap = pyredner.EnvironmentMap(envmap_img)
# Setup camera: We place the camera at (0, 0, -1), with look vector # (0, 0, 1). We also use an orthographic camera just to # make the projection more "2D": the depth is only used # for determining the order of the meshes. cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -1.0]), look_at = torch.tensor([0.0, 0.0, 0.0]), up = torch.tensor([0.0, 1.0, 0.0]), fov = torch.tensor([45.0]), # in degree clip_near = 1e-2, # needs to > 0 resolution = (256, 256), camera_type = redner.CameraType.orthographic) # The materials: mat_quad = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.75, 0.75, 0.25], device = pyredner.get_device())) mat_tri = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.9, 0.35, 0.35], device = pyredner.get_device())) materials = [mat_quad, mat_tri] # We'll have a quad and a triangle as our meshes. # First we define the 2D coordinates. The size of the screen is # from -1.0 to 1.0. Y is pointing up. quad_vertices_2d = torch.tensor(\ [[-0.3, 0.5], [0.2, 0.6], [-0.5, -0.3], [0.5, -0.4]], device = pyredner.get_device()) tri_vertices_2d = torch.tensor(\ [[-0.6, 0.3], [0.4, 0.5], [-0.1, -0.2]], device = pyredner.get_device())
#Next, we setup the materials for the scene. All materials in the scene are stored in a single Python list. #The index of a material in the list is its material id. Our simple scene only has a single grey #material with reflectance 0.5 # This material gives color to the initial guess as well as the target image. # [0.5, 0.5, 0.5] is the color for gray # Lets try green with [0,255,0] (has to be floats as [0.0, 1.0, 0.0] ) #mat_grey = pyredner.Material(\ # diffuse_reflectance = \ # torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device())) mat_grey = pyredner.Material(\ diffuse_reflectance = \ torch.tensor([1.0, 0.0, 0.0], device = pyredner.get_device())) # The material list of the scene # materials = [mat_grey] # Shape of the target image #shape_triangle = pyredner.Shape(\ # vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]], # device = pyredner.get_device()), # indices = torch.tensor([[0, 1, 2]], dtype = torch.int32, # device = pyredner.get_device()), # uvs = None, # normals = None, # material_id = 0)
def load_obj(filename: str, obj_group: bool = True, flip_tex_coords: bool = True, use_common_indices: bool = False, return_objects: bool = False, device: Optional[torch.device] = None): """ Load from a Wavefront obj file as PyTorch tensors. Args ==== filename: str Path to the obj file. obj_group: bool Split the meshes based on materials. flip_tex_coords: bool Flip the v coordinate of uv by applying v' = 1 - v. use_common_indices: bool Use the same indices for position, uvs, normals. Not recommended since texture seams in the objects sharing. The same positions would cause the optimization to "tear" the object. return_objects: bool Output list of Object instead. If there is no corresponding material for a shape, assign a grey material. device: Optional[torch.device] Which device should we store the data in. If set to None, use the device from pyredner.get_device(). Returns ======= if return_objects == True, return a list of Object if return_objects == False, return (material_map, mesh_list, light_map), material_map -> Map[mtl_name, WavefrontMaterial] mesh_list -> List[TriangleMesh] light_map -> Map[mtl_name, torch.Tensor] """ if device is None: device = pyredner.get_device() vertices_pool = [] uvs_pool = [] normals_pool = [] indices = [] uv_indices = [] normal_indices = [] vertices = [] uvs = [] normals = [] vertices_map = {} uvs_map = {} normals_map = {} material_map = {} current_mtllib = {} current_material_name = None def create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals): indices = torch.tensor(indices, dtype=torch.int32, device=device) if len(uv_indices) == 0: uv_indices = None else: uv_indices = torch.tensor(uv_indices, dtype=torch.int32, device=device) if len(normal_indices) == 0: normal_indices = None else: normal_indices = torch.tensor(normal_indices, dtype=torch.int32, device=device) vertices = torch.tensor(vertices, device=device) if len(uvs) == 0: uvs = None else: uvs = torch.tensor(uvs, device=device) if len(normals) == 0: normals = None else: normals = torch.tensor(normals, device=device) return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs, normals) mesh_list = [] light_map = {} with open(filename, 'r') as f: d = os.path.dirname(filename) cwd = os.getcwd() if d != '': os.chdir(d) for line in f: line = line.strip() splitted = re.split('\ +', line) if splitted[0] == 'mtllib': current_mtllib = load_mtl(splitted[1]) elif splitted[0] == 'usemtl': if len(indices) > 0 and obj_group is True: # Flush mesh_list.append( (current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) indices = [] uv_indices = [] normal_indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} uvs_map = {} normals_map = {} mtl_name = splitted[1] current_material_name = mtl_name if mtl_name not in material_map: m = current_mtllib[mtl_name] if m.map_Kd is None: diffuse_reflectance = torch.tensor(m.Kd, dtype=torch.float32, device=device) else: diffuse_reflectance = pyredner.imread(m.map_Kd) if pyredner.get_use_gpu(): diffuse_reflectance = diffuse_reflectance.cuda( device=device) if m.map_Ks is None: specular_reflectance = torch.tensor( m.Ks, dtype=torch.float32, device=device) else: specular_reflectance = pyredner.imread(m.map_Ks) if pyredner.get_use_gpu(): specular_reflectance = specular_reflectance.cuda( device=device) if m.map_Ns is None: roughness = torch.tensor([2.0 / (m.Ns + 2.0)], dtype=torch.float32, device=device) else: roughness = 2.0 / (pyredner.imread(m.map_Ks) + 2.0) if pyredner.get_use_gpu(): roughness = roughness.cuda(device=device) if m.Ke != (0.0, 0.0, 0.0): light_map[mtl_name] = torch.tensor(m.Ke, dtype=torch.float32) material_map[mtl_name] = pyredner.Material(\ diffuse_reflectance, specular_reflectance, roughness) elif splitted[0] == 'v': vertices_pool.append([ float(splitted[1]), float(splitted[2]), float(splitted[3]) ]) elif splitted[0] == 'vt': u = float(splitted[1]) v = float(splitted[2]) if flip_tex_coords: v = 1 - v uvs_pool.append([u, v]) elif splitted[0] == 'vn': normals_pool.append([ float(splitted[1]), float(splitted[2]), float(splitted[3]) ]) elif splitted[0] == 'f': def num_indices(x): return len(re.split('/', x)) def get_index(x, i): return int(re.split('/', x)[i]) def parse_face_index(x, i): f = get_index(x, i) if f > 0: f -= 1 return f assert (len(splitted) <= 5) def get_vertex_id(indices): pi = parse_face_index(indices, 0) uvi = None if (num_indices(indices) > 1 and re.split('/', indices)[1] != ''): uvi = parse_face_index(indices, 1) ni = None if (num_indices(indices) > 2 and re.split('/', indices)[2] != ''): ni = parse_face_index(indices, 2) if use_common_indices: # vertex, uv, normals share the same indexing key = (pi, uvi, ni) if key in vertices_map: vertex_id = vertices_map[key] return vertex_id, vertex_id, vertex_id vertex_id = len(vertices) vertices_map[key] = vertex_id vertices.append(vertices_pool[pi]) if uvi is not None: uvs.append(uvs_pool[uvi]) if ni is not None: normals.append(normals_pool[ni]) return vertex_id, vertex_id, vertex_id else: # vertex, uv, normals use separate indexing vertex_id = None uv_id = None normal_id = None if pi in vertices_map: vertex_id = vertices_map[pi] else: vertex_id = len(vertices) vertices.append(vertices_pool[pi]) vertices_map[pi] = vertex_id if uvi is not None: if uvi in uvs_map: uv_id = uvs_map[uvi] else: uv_id = len(uvs) uvs.append(uvs_pool[uvi]) uvs_map[uvi] = uv_id if ni is not None: if ni in normals_map: normal_id = normals_map[ni] else: normal_id = len(normals) normals.append(normals_pool[ni]) normals_map[ni] = normal_id return vertex_id, uv_id, normal_id vid0, uv_id0, n_id0 = get_vertex_id(splitted[1]) vid1, uv_id1, n_id1 = get_vertex_id(splitted[2]) vid2, uv_id2, n_id2 = get_vertex_id(splitted[3]) indices.append([vid0, vid1, vid2]) if uv_id0 is not None: assert (uv_id1 is not None and uv_id2 is not None) uv_indices.append([uv_id0, uv_id1, uv_id2]) if n_id0 is not None: assert (n_id1 is not None and n_id2 is not None) normal_indices.append([n_id0, n_id1, n_id2]) if (len(splitted) == 5): vid3, uv_id3, n_id3 = get_vertex_id(splitted[4]) indices.append([vid0, vid2, vid3]) if uv_id0 is not None: assert (uv_id3 is not None) uv_indices.append([uv_id0, uv_id2, uv_id3]) if n_id0 is not None: assert (n_id3 is not None) normal_indices.append([n_id0, n_id2, n_id3]) mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) if d != '': os.chdir(cwd) if return_objects: objects = [] for mtl_name, mesh in mesh_list: if mtl_name in material_map: m = material_map[mtl_name] else: m = pyredner.Material(diffuse_reflectance = \ torch.tensor((0.5, 0.5, 0.5), device = device)) if mtl_name in light_map: l = light_map[mtl_name] else: l = None objects.append(pyredner.Object(\ vertices = mesh.vertices, indices = mesh.indices, material = m, light_intensity = l, uvs = mesh.uvs, normals = mesh.normals, uv_indices = mesh.uv_indices, normal_indices = mesh.normal_indices)) return objects else: return material_map, mesh_list, light_map
0., smooth_scheme, return_shift=True) ideal_quad_shift = pyredner.smooth(ideal_shift, indices, 0., smooth_scheme, return_shift=True) tex = obj.material.diffuse_reflectance.texels texels = pyredner.imresize(tex, (200, 200)) print('texels size: ', texels.size()) texels.requires_grad = True uvs = obj.uvs uv_indices = obj.uv_indices m = pyredner.Material(diffuse_reflectance=texels) vertices.requires_grad = True cam_pos, cam_look_at, num_views, all_euler_angles, center, resolution = np.load( target_data_path + "env_data.npy", allow_pickle=True) # cam_poses = cam_poses[:1] center = center.to(pyredner.get_device()) target = [] for i in range(num_views): target.append( pyredner.imread(target_data_path + 'tgt_img{:0>2d}.png'.format(i)).to( pyredner.get_device())) envmap_img = pyredner.imread(target_data_path + 'env_map.png')
folder_name= 'Cube_1' material_map, mesh_list, light_map = pyredner.load_obj('ReferenceOutputMeshes/cylinderHighNVO.obj') # The teapot we loaded is relatively low-poly and doesn't have vertex normal. # Fortunately we can compute the vertex normal from the neighbor vertices. # We can use pyredner.compute_vertex_normal for this task: # (Try commenting out the following two lines to see the differences in target images!) for _, mesh in mesh_list: mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices) print (_) # None diffuse_reflectance =torch.tensor([0.0, 0.4, 0.6], device = pyredner.get_device()) mat_grey = pyredner.Material(diffuse_reflectance) # The material list of the scene # materials = [mat_grey] #% shape_sphere = pyredner.Shape(vertices = mesh.vertices, indices = mesh.indices, uvs = None, normals = mesh.normals, material_id = 0) shape_light = pyredner.Shape(\ vertices = torch.tensor([[-1.0, -1.0, -7.0], [ 2.0, -1.0, -7.0], [-1.0, 1.0, -7.0], [ 2.0, 1.0, -7.0]], device = pyredner.get_device()), indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
fov = torch.tensor([45.0]) clip_near = 1e-2 resolution = (256, 256) cam = pyredner.Camera(position=position, look_at=look_at, up=up, fov=fov, clip_near=clip_near, resolution=resolution) checkerboard_texture = pyredner.imread('checkerboard.exr') if pyredner.get_use_gpu(): checkerboard_texture = checkerboard_texture.cuda() mat_checkerboard = pyredner.Material(\ diffuse_reflectance = checkerboard_texture) mat_black = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device())) materials = [mat_checkerboard, mat_black] vertices = torch.tensor( [[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0], [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]], device=pyredner.get_device()) indices = torch.tensor([[0, 1, 2], [1, 3, 2]], dtype=torch.int32, device=pyredner.get_device()) uvs = torch.tensor([[0.05, 0.05], [0.05, 0.95], [0.95, 0.05], [0.95, 0.95]], device=pyredner.get_device()) shape_plane = pyredner.Shape(vertices, indices, uvs, None, 0) light_vertices = torch.tensor([[-1.0, -1.0, -7.0], [1.0, -1.0, -7.0], [-1.0, 1.0, -7.0], [1.0, 1.0, -7.0]],
position = torch.tensor([0.0, 2.0, -4.0]) look_at = torch.tensor([0.0, -2.0, 0.0]) up = torch.tensor([0.0, 1.0, 0.0]) fov = torch.tensor([45.0]) clip_near = 1e-2 resolution = (256, 256) cam = pyredner.Camera(position=position, look_at=look_at, up=up, fov=fov, clip_near=clip_near, resolution=resolution) mat_shiny = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device()), specular_reflectance = torch.tensor([1.0, 1.0, 1.0], device = pyredner.get_device()), roughness = torch.tensor([0.0005], device = pyredner.get_device())) mat_grey = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device())) mat_black = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device())) materials = [mat_shiny, mat_grey, mat_black] floor_vertices = torch.tensor( [[-4.0, 0.0, -4.0], [-4.0, 0.0, 4.0], [4.0, 0.0, -4.0], [4.0, 0.0, 4.0]], device=pyredner.get_device()) floor_indices = torch.tensor([[0, 1, 2], [1, 3, 2]], device=pyredner.get_device(), dtype=torch.int32)
def parse_material(node, two_sided=False): def parse_material_bitmap(node, scale=None): reflectance_texture = None uv_scale = torch.tensor([1.0, 1.0]) for grandchild in node: if grandchild.attrib['name'] == 'filename': reflectance_texture = pyredner.imread( grandchild.attrib['value']) if scale: reflectance_texture = reflectance_texture * scale elif grandchild.attrib['name'] == 'uscale': uv_scale[0] = float(grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': uv_scale[1] = float(grandchild.attrib['value']) assert reflectance_texture is not None return reflectance_texture, uv_scale # support mitsuba pulgin 'scale' for texture def parse_texture(node): if node.attrib['type'] == 'scale': scale_value = None for grandchild in node: if grandchild.attrib[ 'name'] == 'scale' and grandchild.tag == 'float': scale_value = float(grandchild.attrib['value']) elif grandchild.attrib[ 'type'] == 'bitmap' and grandchild.tag == 'texture': assert scale_value is not None # avoid 'scale' element is declared below the 'bitmap' return parse_material_bitmap(grandchild, scale_value) else: raise NotImplementedError( 'Unsupported scale param type {}'.format( grandchild.child['type'])) elif node.attrib['type'] == 'bitmap': return parse_material_bitmap(node) else: raise NotImplementedError('Unsupported Texture type {}'.format( node.attrib['type'])) node_id = None if 'id' in node.attrib: node_id = node.attrib['id'] if node.attrib['type'] == 'diffuse': diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]) diffuse_uv_scale = torch.tensor([1.0, 1.0]) specular_reflectance = torch.tensor([0.0, 0.0, 0.0]) specular_uv_scale = torch.tensor([1.0, 1.0]) roughness = torch.tensor([1.0]) for child in node: if child.attrib['name'] == 'reflectance': if child.tag == 'texture': diffuse_reflectance, diffuse_uv_scale = parse_texture( child) elif child.tag == 'rgb' or child.tag == 'spectrum': diffuse_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'specular': if child.tag == 'texture': specular_reflectance, specular_uv_scale = parse_texture( child) elif child.tag == 'rgb' or child.tag == 'spectrum': specular_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'roughness': roughness = torch.tensor([float(child.attrib['value'])]) if pyredner.get_use_gpu(): # Copy to GPU diffuse_reflectance = diffuse_reflectance.cuda() specular_reflectance = specular_reflectance.cuda() roughness = roughness.cuda() return (node_id, pyredner.Material(\ diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale), specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale), roughness = pyredner.Texture(roughness), two_sided = two_sided)) elif node.attrib['type'] == 'roughplastic': diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]) diffuse_uv_scale = torch.tensor([1.0, 1.0]) specular_reflectance = torch.tensor([0.0, 0.0, 0.0]) specular_uv_scale = torch.tensor([1.0, 1.0]) roughness = torch.tensor([1.0]) roughness_uv_scale = torch.tensor([1.0, 1.0]) for child in node: if child.attrib['name'] == 'diffuseReflectance': if child.tag == 'texture': diffuse_reflectance, diffuse_uv_scale = parse_texture( child) elif child.tag == 'rgb' or child.tag == 'spectrum': diffuse_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'specularReflectance': if child.tag == 'texture': specular_reflectance, specular_uv_scale = parse_texture( child) elif child.tag == 'rgb' or child.tag == 'spectrum': specular_reflectance = parse_vector(child.attrib['value']) elif child.attrib['name'] == 'alpha': # Add 'alpha texture' support if child.tag == 'texture': roughness, roughness_uv_scale = parse_texture( child) #? not sure to do square here elif child.tag == 'float': alpha = float(child.attrib['value']) roughness = torch.tensor([alpha * alpha]) if pyredner.get_use_gpu(): # Copy to GPU diffuse_reflectance = diffuse_reflectance.cuda() specular_reflectance = specular_reflectance.cuda() roughness = roughness.cuda() return (node_id, pyredner.Material(\ diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale), specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale), roughness = pyredner.Texture(roughness, roughness_uv_scale), two_sided = two_sided)) elif node.attrib['type'] == 'twosided': ret = parse_material(node[0], True) return (node_id, ret[1]) # Simply bypass mask's opacity elif node.attrib['type'] == 'mask': #TODO add opacity!!! ret = parse_material(node[0]) return (node_id, ret[1]) else: print('Unsupported material type:', node.attrib['type']) assert (False)
# Example of optimizing vertex color of a sphere. # Use GPU if available pyredner.set_use_gpu(torch.cuda.is_available()) cam = pyredner.Camera( position=torch.tensor([0.0, 0.0, -5.0]), look_at=torch.tensor([0.0, 0.0, 0.0]), up=torch.tensor([0.0, 1.0, 0.0]), fov=torch.tensor([45.0]), # in degree clip_near=1e-2, # needs to > 0 resolution=(256, 256)) # Set "use_vertex_color = True" to use vertex color mat_vertex_color = pyredner.Material(use_vertex_color=True) materials = [mat_vertex_color] vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) # For the target we randomize the vertex color. vertex_color = torch.zeros_like(vertices).uniform_(0.0, 1.0) shape_sphere = pyredner.Shape(\ vertices = vertices, indices = indices, uvs = uvs, normals = normals, colors = vertex_color, # use the 'colors' field in Shape to store the color material_id = 0) shapes = [shape_sphere] envmap = pyredner.imread('sunsky.exr')
position = torch.tensor([0.0, 2.0, -5.0]) look_at = torch.tensor([0.0, 0.0, 0.0]) up = torch.tensor([0.0, 1.0, 0.0]) fov = torch.tensor([45.0]) clip_near = 1e-2 resolution = (256, 256) cam = pyredner.Camera(position=position, look_at=look_at, up=up, fov=fov, clip_near=clip_near, resolution=resolution) mat_grey = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device())) mat_black = pyredner.Material(\ diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device())) materials = [mat_grey, mat_black] floor_vertices = torch.tensor( [[-2.0, 0.0, -2.0], [-2.0, 0.0, 2.0], [2.0, 0.0, -2.0], [2.0, 0.0, 2.0]], device=pyredner.get_device()) floor_indices = torch.tensor([[0, 1, 2], [1, 3, 2]], device=pyredner.get_device(), dtype=torch.int32) shape_floor = pyredner.Shape(floor_vertices, floor_indices, 0) blocker_vertices = torch.tensor(\ [[-0.5,3.0,-0.5],[-0.5,3.0,0.5],[0.5,3.0,-0.5],[0.5,3.0,0.5]],
face_max = int(sys.argv[5]) # Load Target model target_objects = pyredner.load_obj(target_obj_file, return_objects=True) print(target_objects[0].vertices.shape) target_objects[0].uvs, target_objects[0].uv_indices = pyredner.compute_uvs( target_objects[0].vertices, target_objects[0].indices) diffuse = pyredner.imread('resources/wood_diffuse_low_res.jpg') specular_uv = torch.tensor([0.01, 0.01, 0.01]) for_roughness = pyredner.imread('resources/wood_specular_low_res.jpg') / 100.0 roughness = (1.0 - for_roughness) / 10.0 normal_map = None target_objects[0].material = pyredner.Material( diffuse_reflectance=torch.tensor([1.0, 1.0, 1.0], device=pyredner.get_device()) ) #, specular_reflectance=specular_uv, roughness=roughness, normal_map=normal_map) resolution = (512, 512) num_cameras = 32 radius = 1.4 camera0 = pyredner.automatic_camera_placement(target_objects, resolution) camLocs = fibonacci_sphere(num_cameras, False) target_scenes = generate_scenes(camLocs, target_objects, None) # Render Targets #targets = pyredner.render_pathtracing(scene = target_scenes, num_samples = (128, 0), max_bounces=2, seed=None) targets = pyredner.render_albedo(scene=target_scenes, num_samples=(64, 0)) for ind, img in enumerate(targets): img = img[:, :, 0].data.cpu()