def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution, center, all_euler_angles, all_translations): # First rotate around center, then translation imgs = [] #obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0] vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) vertices *= 80 m = pyredner.Material( diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32)) obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, uvs=uvs, material=m) v = obj.vertices.clone() for i in range(len(all_translations)): rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to( pyredner.get_device()) center = center.to(pyredner.get_device()) # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device()) obj.vertices = (v - center) @ torch.t(rotation_matrix) + center obj.normals = pyredner.compute_vertex_normal(obj.vertices, indices) # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3) # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])) m = pyredner.Material(use_vertex_color=True) # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors) if i == 0: pyredner.save_obj(obj, "generated/env_dataset_" + name + '/tgt_obj.obj') cam = pyredner.Camera( position=cam_pos, look_at=cam_look_at, # Center of the vertices up=torch.tensor([0.0, 1.0, 0.0]), fov=torch.tensor([45.0]), resolution=resolution) scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap) img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4)) imgs.append(img) return imgs
fov=torch.tensor([45.0]), # in degree clip_near=1e-2, # needs to > 0 resolution=(256, 256), fisheye=False) mat_grey = pyredner.Material(\ diffuse_reflectance = \ torch.tensor([0.4, 0.4, 0.4], device = pyredner.get_device()), specular_reflectance = \ torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()), roughness = \ torch.tensor([0.05], device = pyredner.get_device())) materials = [mat_grey] vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) shape_sphere = pyredner.Shape(\ vertices = vertices, indices = indices, uvs = uvs, normals = normals, material_id = 0) shapes = [shape_sphere] envmap = pyredner.imread('sunsky.exr') if pyredner.get_use_gpu(): envmap = envmap.cuda(device=pyredner.get_device()) envmap = pyredner.EnvironmentMap(envmap) scene = pyredner.Scene(cam, shapes, materials, [], envmap) scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene,
print(vars(args)) os.chdir('..') os.system("rm -rf " + output_path) pyredner.set_print_timing(False) shape_mean, shape_basis, triangle_list, color_mean, color_basis = np.load("3dmm.npy", allow_pickle=True) #indices = triangle_list.permute(1, 0).contiguous() #vertices = shape_mean.view(-1, 3) obj = pyredner.load_obj("new_init.obj", return_objects=True)[0] indices = obj.indices.detach() vertices = obj.vertices.detach() if 0: vertices, indices, uvs, normals = pyredner.generate_sphere(theta_steps=256, phi_steps=512) vertices = vertices * 120 vertices.requires_grad = True target_data_path = "generated/dataset2/" cam_poses, cam_look_at, dir_light_intensity, dir_light_directions = np.load(target_data_path + "env_data.npy", allow_pickle=True) #cam_poses = cam_poses[:1] def model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals): # normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme) m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5])) obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m) # , colors=colors) cam = pyredner.Camera(position=cam_pos, look_at=cam_look_at, # Center of the vertices
resolution = (256, 256), fisheye = False) # Setup material mat_grey = pyredner.Material(\ diffuse_reflectance = \ torch.tensor([0.4, 0.4, 0.4], device = pyredner.get_device()), specular_reflectance = \ torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()), roughness = \ torch.tensor([0.02], device = pyredner.get_device())) materials = [mat_grey] # Setup scene geometry: we use the utility function "generate_sphere" to generate a sphere # triangle mesh vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64) shape_sphere = pyredner.Shape(\ vertices = vertices, indices = indices, uvs = uvs, normals = normals, material_id = 0) shapes = [shape_sphere] # Setup lighting: the scene is lit by a single environment map, parameterized by 3rd-order # spherical harmonics coefficients. # First we setup the target coefficients for r, g, b, # taken from https://cseweb.ucsd.edu/~ravir/papers/envmap/envmap.pdf # Last 7 coefficients are randomly determined coeffs = torch.tensor([[ 0.79, 0.39, -0.35, -0.34,