def init_renderer(mesh): obj_fp = mesh.filename pyr.set_print_timing(False) objects = pyr.load_obj(obj_fp, return_objects=True) #camera = pyr.automatic_camera_placement(objects, (256, 256)) camera = pyr.Camera(position=torch.tensor([1.2, 0, 0], dtype=torch.float32), look_at=torch.tensor([0, 0, 0], dtype=torch.float32), up=torch.tensor([0, 1, 0], dtype=torch.float32), fov=torch.tensor([60], dtype=torch.float32), resolution=(256, 256), camera_type=pyr.camera_type.perspective) lights = [ pyr.DirectionalLight(direction=torch.tensor([-1, 0, 0], dtype=torch.float32, device=pyr.get_device()), intensity=torch.tensor([1, 1, 1], dtype=torch.float32, device=pyr.get_device())), pyr.DirectionalLight(direction=torch.tensor([1, 0, 0], dtype=torch.float32, device=pyr.get_device()), intensity=torch.tensor([1, 1, 1], dtype=torch.float32, device=pyr.get_device())) ] return objects, camera, lights
def model(cam_pos, cam_look_at, vertices, color_coeffs, ambient_color, dir_light_intensity, dir_light_direction): #vertices = (shape_mean + shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(-1, 3) normals = pyredner.compute_vertex_normal(vertices, indices) colors = (color_mean + color_basis @ color_coeffs).view(-1, 3) #m = pyredner.Material(use_vertex_color=True) m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5])) obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors) cam = pyredner.Camera( position=cam_pos, look_at=cam_look_at, # Center of the vertices up=torch.tensor([0.0, 1.0, 0.0]), fov=torch.tensor([45.0]), resolution=(512, 512)) scene = pyredner.Scene(camera=cam, objects=[obj]) ambient_light = pyredner.AmbientLight(ambient_color) dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity) img = pyredner.render_deferred(scene=scene, lights=[ambient_light, dir_light]) return img, obj
def model(cam_pos, cam_look_at, vertices, indices, ambient_color, dir_light_intensity, dir_light_direction, normals, colors): #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme) #m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5])) m = pyredner.Material(use_vertex_color=True) obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors) cam = pyredner.Camera( position=cam_pos, look_at=cam_look_at, # Center of the vertices up=torch.tensor([0.0, 1.0, 0.0]), fov=torch.tensor([45.0]), resolution=(1000, 1000)) scene = pyredner.Scene(camera=cam, objects=[obj]) ambient_light = pyredner.AmbientLight(ambient_color) dir_light = pyredner.DirectionalLight(dir_light_direction, dir_light_intensity) img = pyredner.render_deferred(scene=scene, lights=[ambient_light, dir_light], aa_samples=1) return img
def parse_one_light(dict_light): if dict_light['type'] == 'directional': return pyr.DirectionalLight( direction = gpu(dict_light['direction']), intensity = gpu(dict_light['intensity']) ) else: return None
import torch import pyredner import numpy as np import os import sys import argparse dir_light_directions = torch.tensor([[-1.0, -1.0, -1.0], [1.0, -0.0, -1.0], [0.0, 0.0, -1.0]]) dir_light_intensities = torch.ones(3, dtype=torch.float32).expand(3, 3) dir_lights = [ pyredner.DirectionalLight(dir_light_directions[i], dir_light_intensities[i]) for i in range(len(dir_light_directions)) ] data = np.array(dir_lights) np.save("data.npy", data)