Example #1
0
def init_renderer(mesh):
    obj_fp = mesh.filename
    pyr.set_print_timing(False)
    objects = pyr.load_obj(obj_fp, return_objects=True)
    #camera = pyr.automatic_camera_placement(objects, (256, 256))
    camera = pyr.Camera(position=torch.tensor([1.2, 0, 0],
                                              dtype=torch.float32),
                        look_at=torch.tensor([0, 0, 0], dtype=torch.float32),
                        up=torch.tensor([0, 1, 0], dtype=torch.float32),
                        fov=torch.tensor([60], dtype=torch.float32),
                        resolution=(256, 256),
                        camera_type=pyr.camera_type.perspective)
    lights = [
        pyr.DirectionalLight(direction=torch.tensor([-1, 0, 0],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()),
                             intensity=torch.tensor([1, 1, 1],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device())),
        pyr.DirectionalLight(direction=torch.tensor([1, 0, 0],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()),
                             intensity=torch.tensor([1, 1, 1],
                                                    dtype=torch.float32,
                                                    device=pyr.get_device()))
    ]
    return objects, camera, lights
Example #2
0
def decimate(obj: pyr.Object, verbose=False):
    """
    Decimates a mesh, reducing the number of faces by 2.
    This is EXTREMELY inefficient, and not differentiable - use it sparingly!
    Modifies the input mesh.
    """
    # Let's make a temporary directory
    intermediate_dir = tempfile.mkdtemp()

    orig_out = os.path.join(intermediate_dir, "orig.obj")
    new_out = os.path.join(intermediate_dir, "decim.obj")

    if verbose:
        print("Made temp dir:")
        print(intermediate_dir)

    # First, let's save the redner
    pyr.save_obj(obj, orig_out)
    # Now, let's load in openmesh
    mesh = openmesh.read_trimesh(orig_out)
    # Now, decimate by half
    orig_nfaces = mesh.n_faces()

    if verbose:
        print("Original # of faces:", orig_nfaces)

    decimator = openmesh.TriMeshDecimater(mesh)
    algorithm = openmesh.TriMeshModQuadricHandle()

    decimator.add(algorithm)
    decimator.initialize()
    decimator.decimate_to_faces(n_faces=round(orig_nfaces / 2))

    mesh.garbage_collection()

    if verbose:
        print("New # of faces:", mesh.n_faces())

    openmesh.write_mesh(new_out, mesh)

    # Now, we have it. Load it back into redner
    decim_obj = pyr.load_obj(new_out, return_objects=True)[0]
    # And set the faces/indices
    obj.vertices = decim_obj.vertices
    obj.indices = decim_obj.indices

    # Recompute normals - the face normals have been broken
    recompute_normals(obj)

    # Finally, clean up the dir
    files_to_delete = os.listdir(intermediate_dir)
    for each_file in files_to_delete:
        apath = os.path.join(intermediate_dir, each_file)
        if verbose:
            print("Deleting", apath)
        os.remove(apath)
    if verbose:
        print("Deleting", intermediate_dir)
    os.rmdir(intermediate_dir)
Example #3
0
def load_3d(mesh_name):
    '''
    Loads a 3D model, computing vertex normals as needed
    '''
    dpath = os.path.join(mydir, mesh_name)
    fpath = os.path.join(dpath, "mesh.obj")
    if os.path.isfile(fpath):
        obj = pyr.load_obj(fpath, return_objects=True)[0]
        recompute_normals(obj)
        texpath = os.path.join(dpath, "texture.png")
        if os.path.isfile(texpath):
            tex_img = pyr.imread(texpath)
            obj.material.diffuse_reflectance = pyr.Texture(tex_img)
        return obj
    else:
        raise FileNotFoundError(f"Could not find {mesh_name}.obj")
Example #4
0
 def load_obj(cls, path, learn_tex_label, device):
     mtl_map, mesh_list, _ = pyredner.load_obj(path)
     mtl_id_map = dict()
     materials = list()
     cnt = 0
     for k, v in mtl_map.items():
         mtl_id_map[k] = cnt
         cnt += 1
         materials.append(v)
     assert (learn_tex_label in mtl_id_map)
     learn_tex_idx = mtl_id_map[learn_tex_label]
     shapes = list()
     for mtl_name, mesh in mesh_list:
         shapes.append(
             pyredner.Shape(vertices=mesh.vertices,
                            indices=mesh.indices,
                            uvs=mesh.uvs,
                            normals=mesh.normals,
                            material_id=mtl_id_map[mtl_name]))
     return cls(materials, shapes, learn_tex_idx)
Example #5
0
def model(cam_pos, cam_look_at, shape_coeffs, color_coeffs, resolution,
          center, all_euler_angles, all_translations):
    # First rotate around center, then translation

    imgs = []

    obj = pyredner.load_obj('p_ones30/final.obj', return_objects=True)[0]
    #vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
    #vertices *= 80
    #m = pyredner.Material(diffuse_reflectance=torch.ones(2, 2, 3, dtype=torch.float32))
    #obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, uvs=uvs, material=m)
    v = obj.vertices.clone()

    for i in range(len(all_translations)):
        rotation_matrix = pyredner.gen_rotate_matrix(all_euler_angles[i]).to(pyredner.get_device())
        center = center.to(pyredner.get_device())
        # vertices = ((shape_mean + shape_basis @ shape_coeffs).view(-1, 3) - center) @ torch.t(rotation_matrix) + center + all_translations[i].to(pyredner.get_device())
        obj.vertices = (v - center) @ torch.t(rotation_matrix) + center
        # normals = pyredner.compute_vertex_normal(vertices, indices)
        # colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
        # m = pyredner.Material(diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5]))
        m = pyredner.Material(use_vertex_color=True)
        # obj = pyredner.Object(vertices=vertices, indices=indices, normals=normals, material=m, colors=colors)

        if i == 0:
            pyredner.save_obj(obj, "generated/env_dataset_" + name + '/tgt_obj.obj')

        cam = pyredner.Camera(position=cam_pos,
                              look_at=cam_look_at,  # Center of the vertices
                              up=torch.tensor([0.0, 1.0, 0.0]),
                              fov=torch.tensor([45.0]),
                              resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
Example #6
0
def model(cam_poses, cam_look_ats, shape_coeffs, color_coeffs, resolution):
    # First rotate around center, then translation

    imgs = []

    vertices = (shape_mean + shape_basis @ shape_coeffs).view(-1, 3)
    normals = pyredner.compute_vertex_normal(vertices, indices)
    colors = (color_mean + color_basis @ color_coeffs).view(-1, 3)
    m = pyredner.Material(use_vertex_color=False,
                          specular_reflectance=torch.tensor(
                              [1., 1., 1.], device=pyredner.get_device()),
                          roughness=torch.tensor([0.02]))
    obj = pyredner.Object(vertices=vertices,
                          indices=indices,
                          normals=normals,
                          material=m,
                          colors=colors)
    obj = pyredner.load_obj('generated/env_dataset_oness_n/tgt_obj.obj',
                            return_objects=True)[0]
    obj.material.specular_reflectance = pyredner.Texture(
        torch.tensor([0.05, 0.05, 0.05], device=pyredner.get_device()))
    obj.material.roughness = pyredner.Texture(torch.tensor([0.02]))
    pyredner.save_obj(obj, "generated/senv_dataset_" + name + '/tgt_obj.obj')

    for i in range(len(cam_poses)):
        cam = pyredner.Camera(
            position=cam_poses[i],
            look_at=cam_look_ats[i %
                                 len(cam_look_ats)],  # Center of the vertices
            up=torch.tensor([0.0, 1.0, 0.0]),
            fov=torch.tensor([45.0]),
            resolution=resolution)
        scene = pyredner.Scene(camera=cam, objects=[obj], envmap=envmap)

        img = pyredner.render_pathtracing(scene=scene, num_samples=(128, 4))
        imgs.append(img)
    return imgs
        z = math.sin(phi) * r

        points.append([x,y,z])

    return points

lights = []
num_cameras = 1
fov = 45.0
resolution = 1
look_at = torch.tensor([0.0, 0.0, 0.0])
camLocs = fibonacci_sphere(num_cameras, False)


cams = []
target_objects = pyredner.load_obj('resources/monkey.obj', return_objects=True)
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
for ind, pos in enumerate(camLocs):
    pos = torch.tensor([0.5, 0.0, 100.0])
    pos = torch.tensor(pos)
    normal = pos.div(torch.norm(pos))                                                     
    pos = normal * radius2    
    lights.append(pyredner.generate_quad_light(position = pos + torch.tensor([0.0,0.0,-15.0]), \
                                     look_at = camera0.look_at, \
                                     size = torch.tensor([2.0, 2.0]), \
                                     intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
print("LIGHT ONE DONE")
for ind, pos in enumerate(camLocs):
    pos = torch.tensor([100, 0.0, -3.0]) 
    normal = pos.div(torch.norm(pos - torch.tensor([-3.0, 0.0, -3.0]) ))                                                   
    pos = normal * radius2   
cam = pyredner.Camera(
    position=torch.tensor([0.0, -0.0, -5.0]),  # -8.5
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(1650, 2843),
    fisheye=False)

folder_name = 'HDR_Cube_2'

#%
#material_map, mesh_list, light_map = pyredner.load_obj('ReferenceOutputMeshes/cylinderHighNVO.obj')
#material_map1, mesh_list1, light_map1 = pyredner.load_obj('hemisphere.obj')

material_map2, mesh_list2, light_map2 = pyredner.load_obj(
    'ReferenceOutputMeshes/cubeNVO.obj')

#material_map3, mesh_list3, light_map3 = pyredner.load_obj('cone.obj')
#%%
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)

#for _, mesh in mesh_list:
#    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices/2.5, mesh.indices)
#    print (_) # None

#for _, mesh1 in mesh_list1:
#    mesh1.normals = pyredner.compute_vertex_normal(mesh1.vertices/1, mesh1.indices)
#    print (_)
Example #9
0
        scene = pyredner.Scene(camera=cam, objects=[obj])
        grad_imgs.append(pyredner.render_albedo(scene=scene))
    for d in range(3):
        pyredner.imwrite(
            grad_imgs[d].cpu(), output_path +
            '/grad_imgs/{:0>2d}{:0>2d}.png'.format(d, visual_vertex_grad.x))
    return grad_imgs


# <editor-fold desc="LOADING DATA">
os.chdir('..')
os.system("rm -rf " + output_path)

pyredner.set_print_timing(False)

obj = pyredner.load_obj("init/final.obj", return_objects=True)[0]
indices = obj.indices.detach().clone()
vertices = obj.vertices.detach().clone()
v = vertices.clone()
ideal_shift = pyredner.smooth(vertices,
                              indices,
                              0.,
                              smooth_scheme,
                              return_shift=True)
ideal_quad_shift = pyredner.smooth(ideal_shift,
                                   indices,
                                   0.,
                                   smooth_scheme,
                                   return_shift=True)
tex = obj.material.diffuse_reflectance.texels
texels = pyredner.imresize(tex, (200, 200))
Example #10
0
smooth_lmd = args.smooth_lmd
output_path = args.output_path
num_iters_1 = args.num_iters_1
print(vars(args))

#</editor-fold>

os.chdir('..')
#os.system("rm -rf " + output_path)

pyredner.set_print_timing(False)

#shape_mean, shape_basis, triangle_list, color_mean, color_basis = np.load("3dmm.npy", allow_pickle=True)
#indices = triangle_list.permute(1, 0).contiguous()
#vertices = shape_mean.view(-1, 3)
obj = pyredner.load_obj("p_ones30/final.obj", return_objects=True)[0]
indices = obj.indices.detach()
vertices = obj.vertices.detach()

tex = obj.material.diffuse_reflectance.texels

texels = pyredner.imresize(tex, (200, 200))
print('texels size: ', texels.size())
texels.requires_grad = True
m = pyredner.Material(diffuse_reflectance=texels)

vertices.requires_grad = True

target_data_path = "generated/dataset_ones30/"
cam_poses, cam_look_at, lights_list = np.load(target_data_path +
                                              "env_data.npy",
Example #11
0
def parse_shape(node, material_dict, shape_id, device, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        max_smooth_angle = -1
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
                elif child.attrib['name'] == 'maxSmoothAngle':
                    max_smooth_angle = float(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            # Load in CPU for rebuild_topology
            _, mesh_list, _ = pyredner.load_obj(filename,
                                                obj_group=False,
                                                device=torch.device('cpu'))
            vertices = mesh_list[0][1].vertices
            indices = mesh_list[0][1].indices
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            uv_indices = mesh_list[0][1].uv_indices
            normal_indices = mesh_list[0][1].normal_indices
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None
            uv_indices = None  # Serialized doesn't use different indices for UV & normal
            normal_indices = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        if max_smooth_angle >= 0:
            if normals is None:
                normals = torch.zeros_like(vertices)
            new_num_vertices = redner.rebuild_topology(\
                redner.float_ptr(vertices.data_ptr()),
                redner.int_ptr(indices.data_ptr()),
                redner.float_ptr(uvs.data_ptr() if uvs is not None else 0),
                redner.float_ptr(normals.data_ptr() if normals is not None else 0),
                redner.int_ptr(uv_indices.data_ptr() if uv_indices is not None else 0),
                int(vertices.shape[0]),
                int(indices.shape[0]),
                max_smooth_angle)
            print('Rebuilt topology, original vertices size: {}, new vertices size: {}'.format(\
                int(vertices.shape[0]), new_num_vertices))
            vertices.resize_(new_num_vertices, 3)
            if uvs is not None:
                uvs.resize_(new_num_vertices, 2)
            if normals is not None:
                normals.resize_(new_num_vertices, 3)

        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        vertices = vertices.to(device)
        indices = indices.to(device)
        if uvs is not None:
            uvs = uvs.to(device)
        if normals is not None:
            normals = normals.to(device)
        if uv_indices is not None:
            uv_indices = uv_indices.to(device)
        if normal_indices is not None:
            normal_indices = normal_indices.to(device)
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              uv_indices=uv_indices,
                              normal_indices=normal_indices,
                              material_id=mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        vertices = vertices.to(device)
        indices = indices.to(device)
        if uvs is not None:
            uvs = uvs.to(device)
        if normals is not None:
            normals = normals.to(device)
        return pyredner.Shape(vertices,
                              indices,
                              uvs=uvs,
                              normals=normals,
                              material_id=mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1, device=vertices.device)
        to_world = to_world.to(vertices.device)
        vertices = torch.cat((vertices, vector1), dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices,
                              shape.indices,
                              uvs=shape.uvs,
                              normals=normals,
                              material_ids=shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
Example #12
0
import pyredner
import redner
import torch
import math

# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Load from the teapot Wavefront object file
material_map, mesh_list, light_map = pyredner.load_obj(
    '../tutorials/teapot.obj')
# Compute shading normal
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(
    position=torch.tensor([0.0, 30.0, 200.0]),
    look_at=torch.tensor([0.0, 30.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

# Get a list of shapes
shapes = []
for mtl_name, mesh in mesh_list:
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
Example #13
0
def generate_poses(model_path, output_path):
    # Init logger
    log = dict()

    # Load renderer configs
    material_map, mesh_list, light_map = pyredner.load_obj(model_path)
    material_id_map = {}
    materials = []
    count = 0
    for key, value in material_map.items():
        material_id_map[key] = count
        count += 1
        materials.append(value)

    shapes = []
    for mtl_name, mesh in mesh_list:
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

    envmap = pyredner.EnvironmentMap(
        torch.tensor(imread('./datasets/envmaps/one/sunsky.exr'),
                     dtype=torch.float32,
                     device=pyredner.get_device()))

    # Object pose parameters
    euler_angles = [0.0, 0.0, 0.0]
    translation = [0.0, -0.75, 0.0]
    up = [0.0, 1.0, 0.0]
    distance = 7.0

    # Setup base scene to modify during iterations
    cam_params = camera_parameters(euler_angles, translation, distance, up)

    camera = pyredner.Camera(position=torch.tensor(cam_params[0],
                                                   dtype=torch.float32),
                             look_at=torch.tensor(cam_params[1],
                                                  dtype=torch.float32),
                             up=torch.tensor(cam_params[2],
                                             dtype=torch.float32),
                             fov=torch.tensor([45.0]),
                             clip_near=1e-2,
                             resolution=(opt.resolution, opt.resolution),
                             fisheye=False)

    scene = pyredner.Scene(camera,
                           shapes,
                           materials,
                           area_lights=[],
                           envmap=envmap)

    # Generate alphamasks
    for i in range(opt.num_elev):
        # Set elevation angle
        elev_pc = i / opt.num_elev
        elevation = opt.max_elev * elev_pc + opt.min_elev * (1 - elev_pc)
        euler_angles[1] = elevation

        # Calculate number of azimuthal iterations
        num_azimuth = int(opt.num_elev * math.sin(math.pi / 2 - elevation))
        for j in range(num_azimuth):
            # Set azimuthal angle
            azimuth_pc = j / num_azimuth
            azimuth = math.pi * 2 * azimuth_pc

            euler_angles[0] = azimuth

            print('Params: Elevation - {:.4f}\tAzimuth - {:.4f}'\
                    .format(elevation, azimuth))

            # Set Camera params
            cam_params = camera_parameters(euler_angles, translation, distance,
                                           up)

            # Update scene params
            scene.camera = pyredner.Camera(
                position=torch.tensor(cam_params[0], dtype=torch.float32),
                look_at=torch.tensor(cam_params[1], dtype=torch.float32),
                up=torch.tensor(cam_params[2], dtype=torch.float32),
                fov=torch.tensor([45.0]),
                clip_near=1e-2,
                resolution=(opt.resolution, opt.resolution),
                fisheye=False)
            args = pyredner.RenderFunction.serialize_scene(
                scene=scene,
                num_samples=1,
                max_bounces=1,
                channels=[redner.channels.alpha])

            out = pyredner.RenderFunction.apply(1, *args)

            fn = gen_hash(6)
            imwrite(out, os.path.join(output_path, '{}.png'.format(fn)))
            log[fn] = {'elevation': elevation, 'azimuth': azimuth}
    return log
import pyredner
import torch

# Test the sample pixel center flag

pyredner.set_use_gpu(torch.cuda.is_available())
objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(128, 128))
scene = pyredner.Scene(camera=camera, objects=objects)
img = pyredner.render_albedo(scene, sample_pixel_center=True)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_no_aa.exr')
img = pyredner.render_albedo(scene, sample_pixel_center=False)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_with_aa.exr')
Example #15
0
normal_scheme = args.normal_scheme
smooth_scheme = args.smooth_scheme
smooth_lmd = args.smooth_lmd
output_path = args.output_path
num_iters_1 = args.num_iters_1
print(vars(args))

os.chdir('..')
os.system("rm -rf " + output_path)

pyredner.set_print_timing(False)

shape_mean, shape_basis, triangle_list, color_mean, color_basis = np.load("3dmm.npy", allow_pickle=True)
#indices = triangle_list.permute(1, 0).contiguous()
#vertices = shape_mean.view(-1, 3)
obj = pyredner.load_obj("new_init.obj", return_objects=True)[0]
indices = obj.indices.detach()
vertices = obj.vertices.detach()

if 0:
    vertices, indices, uvs, normals = pyredner.generate_sphere(theta_steps=256, phi_steps=512)
    vertices = vertices * 120
vertices.requires_grad = True

target_data_path = "generated/dataset2/"
cam_poses, cam_look_at, dir_light_intensity, dir_light_directions = np.load(target_data_path + "env_data.npy",
                                                                            allow_pickle=True)
#cam_poses = cam_poses[:1]

def model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals):
    # normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
Example #16
0
    def __init__(self,
                 framework,
                 filename,
                 dims,
                 label_names,
                 normalize_params,
                 background,
                 pose,
                 num_classes,
                 attack_type="benign"):

        self.NUM_CLASSES = num_classes
        self.framework = framework.to(pyredner.get_device())
        self.image_dims = dims
        self.label_names = label_names
        self.framework_params = normalize_params

        # self.objects = pyredner.load_obj(filename, return_objects=True)
        self.material_map, mesh_list, self.light_map = pyredner.load_obj(
            filename)
        for _, mesh in mesh_list:
            mesh.normals = pyredner.compute_vertex_normal(
                mesh.vertices, mesh.indices)

        vertices = []

        self.modifiers = []
        self.input_adv_list = []
        self.input_orig_list = []
        self.targeted = False
        self.clamp_fn = "tanh"

        self.attack_type = attack_type

        if attack_type == "CW":
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                modifier = torch.zeros(mesh.vertices.size(),
                                       requires_grad=True,
                                       device=pyredner.get_device())
                self.modifiers.append(modifier)
                self.input_orig_list.append(
                    tanh_rescale(torch_arctanh(mesh.vertices)))
                mesh.vertices = tanh_rescale(
                    torch_arctanh(mesh.vertices) + modifier)

                self.input_adv_list.append(mesh.vertices)
                mesh.vertices.retain_grad()
        else:
            for _, mesh in mesh_list:
                vertices.append(mesh.vertices)
                mesh.vertices = Variable(mesh.vertices, requires_grad=True)
                mesh.vertices.retain_grad()

        material_id_map = {}
        self.materials = []
        count = 0
        for key, value in self.material_map.items():
            material_id_map[key] = count
            count += 1
            self.materials.append(value)

        self.shapes = []
        self.cw_shapes = []
        for mtl_name, mesh in mesh_list:
            # assert(mesh.normal_indices is None)
            self.shapes.append(
                pyredner.Shape(vertices=mesh.vertices,
                               indices=mesh.indices,
                               material_id=material_id_map[mtl_name],
                               uvs=mesh.uvs,
                               normals=mesh.normals,
                               uv_indices=mesh.uv_indices))

        self.camera = pyredner.automatic_camera_placement(self.shapes,
                                                          resolution=(512,
                                                                      512))
        # Compute the center of the teapot
        self.center = torch.mean(torch.cat(vertices), 0)
        self.translation = torch.tensor([0., 0., 0.],
                                        device=pyredner.get_device(),
                                        requires_grad=True)

        self.angle_input_adv_list = []
        self.angle_input_orig_list = []
        self.pose = pose
        if attack_type == "CW":
            self.euler_angles_modifier = torch.tensor(
                [0., 0., 0.], device=pyredner.get_device(), requires_grad=True)
            if pose == 'forward':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'top':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0.35, 0., 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'left':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., 0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))
            elif pose == 'right':
                self.euler_angles = tanh_rescale(
                    torch_arctanh(
                        torch.tensor([0., -0.50, 0.],
                                     device=pyredner.get_device())) +
                    self.euler_angles_modifier)
                self.angle_input_orig_list.append(
                    tanh_rescale(
                        torch_arctanh(
                            torch.tensor([0., 0., 0.],
                                         device=pyredner.get_device()))))

            self.angle_input_adv_list.append(self.euler_angles)
        else:
            if pose == 'forward':
                self.euler_angles = torch.tensor([0., 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'top':
                self.euler_angles = torch.tensor([0.35, 0., 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'left':
                self.euler_angles = torch.tensor([0., 0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)
            elif pose == 'right':
                self.euler_angles = torch.tensor([0., -0.50, 0.],
                                                 device=pyredner.get_device(),
                                                 requires_grad=True)

        self.light_init_vals = torch.tensor([20000.0, 30000.0, 20000.0],
                                            device=pyredner.get_device())
        if attack_type == "CW":
            self.light_input_orig_list = []
            self.light_input_adv_list = []
            delta = 1e-6  # constant for stability
            self.light_modifier = torch.tensor([0., 0., 0.],
                                               device=pyredner.get_device(),
                                               requires_grad=True)
            # redner can't accept negative light intensities, so we have to be a bit creative and work with lighting norms instead and then rescale them afterwards...
            tanh_factor = tanh_rescale(
                torch_arctanh(self.light_init_vals /
                              torch.norm(self.light_init_vals)) +
                self.light_modifier / torch.norm(self.light_modifier + delta))
            self.light_intensity = torch.norm(
                self.light_init_vals) * torch.clamp(tanh_factor, 0, 1)

            self.light_input_orig_list.append(self.light_init_vals /
                                              torch.norm(self.light_init_vals))
            self.light_input_adv_list.append(self.light_intensity)
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=self.light_intensity)
        else:
            self.light = pyredner.PointLight(
                position=(self.camera.position + torch.tensor(
                    (0.0, 0.0, 100.0))).to(pyredner.get_device()),
                intensity=Variable(torch.tensor((20000.0, 30000.0, 20000.0),
                                                device=pyredner.get_device()),
                                   requires_grad=True))

        background = pyredner.imread(background)
        self.background = background.to(pyredner.get_device())
Example #17
0
        torch.sub(torch.add(pos, tangent), bitangent)
    ]
    for i in range(4):
        lightPos[i] = lightPos[i] + torch.tensor([10.0, 0.0, -3.0])
    lightPos = torch.cat(lightPos, 0)
    print(lightPos)

    lights.append(pyredner.Shape(\
        vertices = torch.tensor(lightPos, device = pyredner.get_device()),
        indices = torch.tensor([[0, 1,2],[1, 3, 2]],
            dtype = torch.int32, device = pyredner.get_device()),
        uvs = None,
        normals = None,
        material_id = 0))

material_map, mesh_list, light_map = pyredner.load_obj(
    'resources/shadow_cube.obj')
material_map2, mesh_list2, light_map2 = pyredner.load_obj(
    'resources/monkey.obj')  #<-- target

material_map1, mesh_list1, light_map1 = pyredner.load_obj(
    'resources/sphere_subdiv.obj')

for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -2.5]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    camera_type=redner.CameraType.orthographic,
        out_channels=channels,
        groups=channels,
        kernel_size=kernel_size,
        bias=False,
        stride=2,
        padding=[int((kernel_size - 1) / 2),
                 int((kernel_size - 1) / 2)])  # with torch.no_grad():

    gaussian_filter.weight.data = gaussian_kernel
    gaussian_filter.weight.requires_grad = True
    return gaussian_filter


gaussian_func = getGaussianFilter()

target_objects = pyredner.load_obj('resources/bunny-uv.obj',
                                   return_objects=True)
print(target_objects[0].vertices.shape)

diffuse = pyredner.imread('resources/wood_diffuse.jpg')
specular = pyredner.imread('resources/wood_specular.jpg') / 100.0
#normal_map = pyredner.imread('resources/GroundForest003_NRM_3K.jpg', gamma=1.0)
roughness = (1.0 - specular) / 10.0
normal_map = None
target_objects[0].material = pyredner.Material(diffuse_reflectance=diffuse,
                                               specular_reflectance=specular,
                                               roughness=roughness,
                                               normal_map=normal_map)

resolution = (256, 256)
num_cameras = 32
radius = 1.0
    return new_v + vertices * (1 - lmd)

def dot(v1, v2):
    return torch.sum(v1 * v2, dim=1)

def squared_length(v):
    return torch.sum(v * v, dim=1)

def length(v):
    return torch.sqrt(squared_length(v))

def safe_asin(v):
    # Hack: asin(1)' is infinite, so we want to clamp the contribution
    return torch.asin(v.clamp(0, 1 - 1e-6))

objs = pyredner.load_obj('smoothed.obj', return_objects=True)
obj = objs[0]
vertices = obj.vertices
indices = obj.indices

normals = torch.zeros(vertices.shape, dtype=torch.float32, device=vertices.device)
v = [vertices[indices[:, 0].long(), :],  # all the 0th vertices of triangles
     vertices[indices[:, 1].long(), :],  # all the 1st vertices of triangles
     vertices[indices[:, 2].long(), :]]
for i in range(3):  # 0th, 1st and 2nd
    v0 = v[i]
    v1 = v[(i + 1) % 3]
    v2 = v[(i + 2) % 3]
    e1 = v1 - v0
    e2 = v2 - v0
    e1_len = length(e1)  # lengths of the first edges
Example #20
0
def parse_shape(node, material_dict, shape_id, shape_group_dict=None):
    if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
        to_world = torch.eye(4)
        serialized_shape_id = 0
        mat_id = -1
        light_intensity = None
        filename = ''
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'filename':
                    filename = child.attrib['value']
                elif child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                elif child.attrib['name'] == 'shapeIndex':
                    serialized_shape_id = int(child.attrib['value'])
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])

        if node.attrib['type'] == 'obj':
            _, mesh_list, _ = pyredner.load_obj(filename)
            vertices = mesh_list[0][1].vertices.cpu()
            indices = mesh_list[0][1].indices.cpu()
            uvs = mesh_list[0][1].uvs
            normals = mesh_list[0][1].normals
            if uvs is not None:
                uvs = uvs.cpu()
            if normals is not None:
                normals = normals.cpu()
        else:
            assert (node.attrib['type'] == 'serialized')
            mitsuba_tri_mesh = redner.load_serialized(filename,
                                                      serialized_shape_id)
            vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
            indices = torch.from_numpy(mitsuba_tri_mesh.indices)
            uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
            normals = torch.from_numpy(mitsuba_tri_mesh.normals)
            if uvs.shape[0] == 0:
                uvs = None
            if normals.shape[0] == 0:
                normals = None

        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    elif node.attrib['type'] == 'rectangle':
        indices = torch.tensor([[0, 2, 1], [1, 2, 3]], dtype=torch.int32)
        vertices = torch.tensor([[-1.0, -1.0, 0.0], [-1.0, 1.0, 0.0],
                                 [1.0, -1.0, 0.0], [1.0, 1.0, 0.0]])
        uvs = None
        normals = None
        to_world = torch.eye(4)
        mat_id = -1
        light_intensity = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
            if child.tag == 'ref':
                mat_id = material_dict[child.attrib['id']]
            elif child.tag == 'emitter':
                for grandchild in child:
                    if grandchild.attrib['name'] == 'radiance':
                        light_intensity = parse_vector(
                            grandchild.attrib['value'])
                        if light_intensity.shape[0] == 1:
                            light_intensity = torch.tensor(\
                                         [light_intensity[0],
                                          light_intensity[0],
                                          light_intensity[0]])
        # Transform the vertices
        # Transform the vertices and normals
        vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)),
                             dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        assert (vertices is not None)
        assert (indices is not None)
        lgt = None
        if light_intensity is not None:
            lgt = pyredner.AreaLight(shape_id, light_intensity)

        if pyredner.get_use_gpu():
            # Copy to GPU
            vertices = vertices.cuda()
            indices = indices.cuda()
            if uvs is not None:
                uvs = uvs.cuda()
            if normals is not None:
                normals = normals.cuda()
        return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
    # Add instance support
    # TODO (simply transform & create a new shape now)
    elif node.attrib['type'] == 'instance':
        shape = None
        for child in node:
            if 'name' in child.attrib:
                if child.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child)
                    if pyredner.get_use_gpu():
                        to_world = to_world.cuda()
            if child.tag == 'ref':
                shape = shape_group_dict[child.attrib['id']]
        # transform instance
        vertices = shape.vertices
        normals = shape.normals
        vector1 = torch.ones(vertices.shape[0], 1)
        vertices = torch.cat(
            (vertices, vector1.cuda() if pyredner.get_use_gpu() else vector1),
            dim=1)
        vertices = vertices @ torch.transpose(to_world, 0, 1)
        vertices = vertices / vertices[:, 3:4]
        vertices = vertices[:, 0:3].contiguous()
        if normals is not None:
            normals = normals @ (torch.inverse(torch.transpose(to_world, 0,
                                                               1))[:3, :3])
            normals = normals.contiguous()
        # assert(vertices is not None)
        # assert(indices is not None)
        # lgt = None
        # if light_intensity is not None:
        #     lgt = pyredner.AreaLight(shape_id, light_intensity)

        return pyredner.Shape(vertices, shape.indices, shape.uvs, normals,
                              shape.material_id), None
    else:
        print('Shape type {} is not supported!'.format(node.attrib['type']))
        assert (False)
# I think this is known and not optimized

cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -5.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

folder_name = 'Uranus_lr4_Color'

#%%
material_map, mesh_list, light_map = pyredner.load_obj('sphere.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices / 10,
                                                  mesh.indices)
    print(_)  # None

diffuse_reflectance = torch.tensor([1.0, 1.0, 0.0],
                                   device=pyredner.get_device())

mat_grey = pyredner.Material(diffuse_reflectance)

# The material list of the scene #
Example #22
0
# I think this is known and not optimized

cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
                      look_at = torch.tensor([0.0, 0.0, 0.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      fov = torch.tensor([45.0]), # in degree
                      clip_near = 1e-2, # needs to > 0
                      resolution = (256, 256),
                      fisheye = False)


folder_name= 'Cube_1'


material_map, mesh_list, light_map = pyredner.load_obj('ReferenceOutputMeshes/cylinderHighNVO.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)
    print (_) # None
    
    
diffuse_reflectance =torch.tensor([0.0, 0.4, 0.6], device = pyredner.get_device())

mat_grey = pyredner.Material(diffuse_reflectance)

# The material list of the scene # 
materials = [mat_grey]
Example #23
0
os.system("rm -rf " + output_path)

pyredner.set_print_timing(False)
'''
tgt_vertices = torch.tensor([[-5, -1, -3],
                         [0, -5, 2],
                         [0, 4, 3],
                         [4, 0, -4]], dtype=torch.float32, device=pyredner.get_device(), requires_grad=False)
vertices = torch.tensor([[-2.4, -2.6, 0.2],
                         [3.1, -1.8, 0],
                         [0, 2.8, 1.1],
                         [4.1, 6.8, 2.1]], dtype=torch.float32, device=pyredner.get_device(), requires_grad=True)
indices = torch.tensor([[0, 1, 2],
                        [2, 1, 3]], dtype=torch.int32, device=pyredner.get_device())
'''
obj = pyredner.load_obj('cube.obj', return_objects=True)[0]
vertices = obj.vertices * 8. - 4.
vertices.requires_grad = True
indices = obj.indices
tgt_vertices = vertices.detach() * 1.1 + 0.5 * torch.randn(vertices.shape, device=pyredner.get_device())
print(tgt_vertices)
cam_poses = torch.tensor([[0, 0, 20], [-12, 0, 16], [12, 0, 16]], dtype=torch.float32, device=pyredner.get_device(), requires_grad=False)
cam_look_at = torch.tensor([0, 0, 0], dtype=torch.float32, device=pyredner.get_device(), requires_grad=False)
dir_light_direction = torch.tensor([-0.0, -0.0, -1.0], device=pyredner.get_device(), requires_grad=False)
dir_light_intensity = torch.ones(3, device=pyredner.get_device(), requires_grad=False)
normals = pyredner.compute_vertex_normal(tgt_vertices, indices, normal_scheme)
print("finish loading")

def model(cam_pos, cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals):
   #normals = pyredner.compute_vertex_normal(vertices, indices, normal_scheme)
    m = pyredner.Material(diffuse_reflectance=torch.tensor([0.5, 0.5, 0.5]))
    gaussian_filter = torch.nn.Conv2d(in_channels=channels, out_channels=channels, groups=channels, kernel_size=kernel_size, bias=False, stride=2, padding=[int((kernel_size - 1)/2), int((kernel_size - 1)/2)])    # with torch.no_grad():

    gaussian_filter.weight.data = gaussian_kernel
    gaussian_filter.weight.requires_grad = True
    return gaussian_filter
gaussian_func = getGaussianFilter()

# Parse cmdline
path = sys.argv[1]
path = path + "/"
target_obj_file = sys.argv[2]
init_obj_file = sys.argv[3]
face_target = int(sys.argv[4])

# Load Target model
target_objects = pyredner.load_obj(target_obj_file, return_objects=True)
print(target_objects)

normal_map = None
diffuse = torch.tensor([0.7, 0.0, 0.0])
specular_target = torch.tensor([0.0, 0.0, 0.0])
roughness = torch.tensor([0.6])

diffuse = torch.tensor([0.0, 0.0, 1.0])
target_objects[0].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

diffuse = torch.tensor([1.0, 0.0, 0.0])
target_objects[1].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)

diffuse = torch.tensor([0.7,0.7,0.7])
target_objects[2].material = pyredner.Material(diffuse_reflectance=diffuse, specular_reflectance=specular_target, roughness=roughness, normal_map=normal_map, two_sided=True)
Example #25
0
import pyredner
import redner
import numpy as np
import torch
import skimage.transform

# Optimize depth and normal of a teapot

# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Set up the pyredner scene for rendering:
material_map, mesh_list, light_map = pyredner.load_obj('scenes/teapot.obj')
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(
    position=torch.tensor([0.0, 30.0, 200.0]),
    look_at=torch.tensor([0.0, 30.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

# Setup materials
material_id_map = {}
materials = []
count = 0
for key, value in material_map.items():
Example #26
0
# For loop progress bar.
from tqdm import tqdm

#from chamferdist.chamferdist import ChamferDistance

# GROUND TRUTH.
meshfile1 = sys.argv[1]
# RECONSTRUCTED.
meshfile2 = sys.argv[2]

#torch.cuda.init()

########################## LOAD TARGET #########################

print("Loading target: " + meshfile1)
_, mesh_list1, _ = pyredner.load_obj(meshfile1)

print("Loading target: " + meshfile2)
_, mesh_list2, _ = pyredner.load_obj(meshfile2)

_, mesh1 = mesh_list1[0]
_, mesh2 = mesh_list2[0]

print("Putting vertices on GPU...")
verts1 = mesh1.vertices.cuda().contiguous()
verts2 = mesh2.vertices.cuda().contiguous()
print("Done")

print("Computing chamfer distance from ground truth -> reconstructed")
v12_mins = torch.zeros(verts1.shape[0])
for i in tqdm(range(verts1.shape[0])):
                 int((kernel_size - 1) / 2)])  # with torch.no_grad():

    gaussian_filter.weight.data = gaussian_kernel
    gaussian_filter.weight.requires_grad = True
    return gaussian_filter


# Load everything in from command line.
output_path = sys.argv[1]
target_path = sys.argv[2]
diffuse_texels_path = sys.argv[3]
specular_texels_path = sys.argv[4]

gaussian_func = getGaussianFilter()

target_objects = pyredner.load_obj(target_path, return_objects=True)
print(target_objects[0].vertices.shape)

# Load in the mesh colors info.
mesh_colors_resolution = 1
diffuse_texels = torch.tensor(torch.load(diffuse_texels_path),
                              device=pyredner.get_device())
specular_texels = torch.tensor(torch.load(specular_texels_path),
                               device=pyredner.get_device())
target_diffuse = pyredner.Texture(
    diffuse_texels, mesh_colors_resolution=mesh_colors_resolution)
target_specular = pyredner.Texture(
    specular_texels, mesh_colors_resolution=mesh_colors_resolution)
target_roughness = torch.tensor([0.6])  # For now, roughness is fixed.
target_objects[0].material = pyredner.Material(
    diffuse_reflectance=target_diffuse,
Example #28
0
# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())

# Load from the teapot Wavefront object file.
# load_obj function returns three lists/dicts
# material_map is a dict containing all the materials used in the obj file,
# where the key is the name of material, and the value is a pyredner.Material
#
# mesh_list is a list containing all the meshes in the obj file, grouped by use_mtl calls.
# Each element in the list is a tuple with length 2, the first is the name of material,
# the second is a pyredner.TriangleMesh with mesh information.
#
# light_map is a Python dict, where the key is the material names with non zeros Ke,
# and the values are the Ke
material_map, mesh_list, light_map = pyredner.load_obj('teapot.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)

# Setup camera
cam = pyredner.Camera(position = torch.tensor([0.0, 30.0, 200.0]),
                      look_at = torch.tensor([0.0, 30.0, 0.0]),
                      up = torch.tensor([0.0, 1.0, 0.0]),
                      fov = torch.tensor([45.0]), # in degree
                      clip_near = 1e-2, # needs to > 0
                      resolution = (256, 256),
                      fisheye = False)
Example #29
0
#First, we setup a camera, by constructing a pyredner.Camera object

# I think this is known and not optimized

cam = pyredner.Camera(
    position=torch.tensor([0.0, 0.0, -5.0]),
    look_at=torch.tensor([0.0, 0.0, 0.0]),
    up=torch.tensor([0.0, 1.0, 0.0]),
    fov=torch.tensor([45.0]),  # in degree
    clip_near=1e-2,  # needs to > 0
    resolution=(256, 256),
    fisheye=False)

#%%
material_map, mesh_list, light_map = pyredner.load_obj('diamond.obj')
# The teapot we loaded is relatively low-poly and doesn't have vertex normal.
# Fortunately we can compute the vertex normal from the neighbor vertices.
# We can use pyredner.compute_vertex_normal for this task:
# (Try commenting out the following two lines to see the differences in target images!)
for _, mesh in mesh_list:
    mesh.normals = pyredner.compute_vertex_normal(mesh.vertices / 20,
                                                  mesh.indices)
    print(_)  # None

diffuse_reflectance = torch.tensor([0.0, 1.0, 0.0],
                                   device=pyredner.get_device(),
                                   requires_grad=True)

mat_grey = pyredner.Material(diffuse_reflectance)
#background = pyredner.imread('mandrill.tiff')
background = pyredner.imread('Target_Images_Cropped/background_cropped.exr')
# Visualize background
from matplotlib.pyplot import imshow
# %matplotlib inline
# Redner's imread automatically gamma decompress the image to linear space.
# You'll have to compress it back to sRGB space for display.
pyredner.imwrite(
    torch.pow(background.data, 1.0 / 2.2).cpu(),
    'results/' + folder_name + '/background.png')  # saves an exr image as png

#imshow(torch.pow(background, 1.0/2.2))
# Convert background to current device
background = background.to(pyredner.get_device())

objects = pyredner.load_obj('ReferenceOutputMeshes/cubeNVO.obj',
                            return_objects=True)
#camera = pyredner.automatic_camera_placement(objects, resolution=(512, 512))

#"""Next, we define a `model` function that takes the objects, camera, and pose parameters, and output an image."""

# Obtain the teapot vertices we want to apply the transformation on.

#material_map2, mesh_list2, light_map2 = pyredner.load_obj('ReferenceOutputMeshes/cubeNVO.obj')
#for _, mesh2 in mesh_list2:
#    mesh2.normals = pyredner.compute_vertex_normal(mesh2.vertices/3, mesh2.indices)
#%%
#diffuse_reflectance_green =torch.tensor([0.65, 0.32, 0.16], device = pyredner.get_device())
#mat_green = pyredner.Material(diffuse_reflectance_green)
#materials = [mat_green]

#shape_cube = pyredner.Shape(vertices = mesh2.vertices/3,