Exemple #1
0
def load_envmap(envmap_path, signal_mean, rangle, device):
    envmap = imread(envmap_path)
    envmap = envmap / np.mean(envmap) * signal_mean
    env_to_world = torch.tensor(get_rotation_matrix_y(rangle),
                                dtype=torch.float32)
    return pyredner.EnvironmentMap(torch.tensor(envmap,
                                                dtype=torch.float32,
                                                device=device),
                                   env_to_world=env_to_world)
Exemple #2
0
def parse_scene(node):
    cam = None
    resolution = None
    materials = []
    material_dict = {}
    shapes = []
    lights = []
    shape_group_dict = {}
    envmap = None

    for child in node:
        if child.tag == 'sensor':
            cam = parse_camera(child)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        # shapegroup for instancing
        elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
            for child_s in child:
                if child_s.tag == 'shape':
                    shape_group_dict[child.attrib['id']] = parse_shape(
                        child_s, material_dict, None)[0]
        elif child.tag == 'shape':
            shape, light = parse_shape(
                child, material_dict, len(shapes), shape_group_dict
                if child.attrib['type'] == 'instance' else None)
            if isinstance(shape, list):
                shapes = shapes + shape
            else:
                shapes.append(shape)
            if light is not None:
                lights.append(light)
        # Add envmap loading support
        elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
            # read envmap params from xml
            scale = 1.0
            envmap_filename = None
            to_world = torch.eye(4)
            for child_s in child:
                if child_s.attrib['name'] == 'scale':
                    assert child_s.tag == 'float'
                    scale = float(child_s.attrib['value'])
                if child_s.attrib['name'] == 'filename':
                    assert child_s.tag == 'string'
                    envmap_filename = child_s.attrib['value']
                if child_s.attrib['name'] == 'toWorld':
                    to_world = parse_transform(child_s)
            # load envmap
            envmap = scale * pyredner.imread(envmap_filename)
            if pyredner.get_use_gpu():
                envmap = envmap.cuda()
            envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)
    return pyredner.Scene(cam, shapes, materials, lights, envmap)
        y = ((i * offset) - 1) + (offset / 2);
        r = math.sqrt(1 - pow(y,2))

        phi = ((i + rnd) % samples) * increment

        x = math.cos(phi) * r
        z = math.sin(phi) * r

        points.append(torch.tensor([x,y,z]))

    return points
envmap_cathedral = pyredner.imread('resources/grace-new.exr')
envmap_cathedral = torch.ones(envmap_cathedral.shape, device=pyredner.get_device())
if pyredner.get_use_gpu():
    envmap_cathedral = envmap_cathedral.cuda()
envmap_cathedral = pyredner.EnvironmentMap(envmap_cathedral)

def generate_scenes(camLocs,objects,envmap=None, lightLocs=None):
  scenes = []
  up = torch.tensor([0.0, 1.0, 0.0])
  offset_factor = 0.0
  light_intensity = 100.0            

  for ind, loc in enumerate(camLocs):
    camera = pyredner.Camera(position = loc,
                          look_at = torch.tensor([0.0, 0.0, 0.0]),
                          up = camera0.up,
                          fov = torch.tensor([90.0]), #fov = camera0.fov,
                          resolution = camera0.resolution)
    
    normal = camera.position.div(torch.norm(camera.position))
Exemple #4
0
materials = [mat_grey]

vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
shape_sphere = pyredner.Shape(\
    vertices = vertices,
    indices = indices,
    uvs = uvs,
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

envmap_texels = torch.tensor(
    0.5 * torch.ones([32, 64, 3], device=pyredner.get_device()),
Exemple #5
0
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        material_id = material_id_map[mtl_name],
        uvs = mesh.uvs,
        normals = mesh.normals,
        uv_indices = mesh.uv_indices))

# The previous tutorial used a mesh area light for the scene lighting,
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap)
# Like the previous tutorial, we serialize and render the scene,
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')
target = pyredner.imread('results/pose_estimation/target.exr')
if pyredner.get_use_gpu():
Exemple #6
0
def parse_xml(node, device, param_dict, 
    cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap):
    
    for child in node:
        # print(child.tag)
        if child.tag == 'include':
            filename = check_default(child.attrib['filename'], param_dict)
            tree = etree.parse(filename)
            root = tree.getroot()
            cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap = \
                            parse_xml(root, device, param_dict, 
                                cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap)
        elif child.tag == 'default':
            # check if it is already in param_dict
            default_n = child.attrib['name']
            # print("default check for %s"%default_n)
            if default_n not in param_dict:
                param_dict[default_n] = check_default(child.attrib['value'], param_dict)
        elif child.tag == 'sensor':
            cam = parse_camera(child, param_dict)
        elif child.tag == 'bsdf':
            node_id, material = parse_material(child, device, param_dict)
            if node_id is not None:
                material_dict[node_id] = len(materials)
                materials.append(material)
        # shapegroup for instancing
        elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
            for child_s in child:
                if child_s.tag == 'shape':
                    shape_group_dict[child.attrib['id']] = parse_shape(child_s, material_dict, param_dict, None)[0]
        elif child.tag == 'shape':
            shape, light = parse_shape(child, material_dict, param_dict, len(shapes), device, shape_group_dict if child.attrib['type'] == 'instance' else None)
            shapes.append(shape)
            # only shape
            if('id' in child.attrib):
                shape_id[child.attrib['id']] = len(shapes) - 1
            else:
                shape_id[len(shapes)] = len(shapes) - 1

            if light is not None:
                lights.append(light)
        # Add envmap loading support
        elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
            # read envmap params from xml
            scale = 1.0
            envmap_filename = None
            to_world = torch.eye(4)
            for child_s in child:
                if child_s.attrib['name'] == 'scale':
                    assert child_s.tag == 'float'
                    scale = float(child_s.attrib['value'])
                if child_s.attrib['name'] == 'filename':
                    assert child_s.tag == 'string'
                    envmap_filename = child_s.attrib['value']
                if child_s.attrib['name'] == 'toWorld' or child_s.attrib['name'] == 'to_world':
                    to_world = parse_transform(child_s, param_dict)
            # load envmap
            envmap = scale * pyredner.imread(envmap_filename).to(device)
            envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)

    return cam, materials, material_dict, shapes, shape_id, lights, shape_group_dict, envmap
Exemple #7
0
m = pyredner.Material(diffuse_reflectance=texels, specular_reflectance=torch.tensor([0.05, 0.05, 0.05]), roughness=torch.tensor([0.02]))

vertices.requires_grad = True


cam_poses, cam_look_ats, resolution = np.load(target_data_path + "env_data.npy", allow_pickle=True)
num_views = len(cam_poses)

target = []
for i in range(num_views):
    target.append(pyredner.imread(target_data_path + 'tgt_img{:0>2d}.png'.format(i)).to(pyredner.get_device()))

tgt_envmap_img = pyredner.imread(target_data_path + 'env_map.png')
envmap_img = (torch.zeros((64, 128, 3), dtype=torch.float32) + 0.5).detach()
envmap_img.requires_grad = True
envmap = pyredner.EnvironmentMap(envmap_img)

print('Finish loading')
# </editor-fold>

def deringing(coeffs, window):
    deringed_coeffs = torch.zeros_like(coeffs)
    deringed_coeffs[:, 0] += coeffs[:, 0]
    deringed_coeffs[:, 1:1 + 3] += \
        coeffs[:, 1:1 + 3] * math.pow(math.sin(math.pi * 1.0 / window) / (math.pi * 1.0 / window), 4.0)
    deringed_coeffs[:, 4:4 + 5] += \
        coeffs[:, 4:4 + 5] * math.pow(math.sin(math.pi * 2.0 / window) / (math.pi * 2.0 / window), 4.0)
    #deringed_coeffs[:, 9:9 + 7] += \
       # coeffs[:, 9:9 + 7] * math.pow(math.sin(math.pi * 3.0 / window) / (math.pi * 3.0 / window), 4.0)
    return deringed_coeffs
Exemple #8
0
def generate_poses(model_path, output_path):
    # Init logger
    log = dict()

    # Load renderer configs
    material_map, mesh_list, light_map = pyredner.load_obj(model_path)
    material_id_map = {}
    materials = []
    count = 0
    for key, value in material_map.items():
        material_id_map[key] = count
        count += 1
        materials.append(value)

    shapes = []
    for mtl_name, mesh in mesh_list:
        shapes.append(
            pyredner.Shape(vertices=mesh.vertices,
                           indices=mesh.indices,
                           uvs=mesh.uvs,
                           normals=mesh.normals,
                           material_id=material_id_map[mtl_name]))

    envmap = pyredner.EnvironmentMap(
        torch.tensor(imread('./datasets/envmaps/one/sunsky.exr'),
                     dtype=torch.float32,
                     device=pyredner.get_device()))

    # Object pose parameters
    euler_angles = [0.0, 0.0, 0.0]
    translation = [0.0, -0.75, 0.0]
    up = [0.0, 1.0, 0.0]
    distance = 7.0

    # Setup base scene to modify during iterations
    cam_params = camera_parameters(euler_angles, translation, distance, up)

    camera = pyredner.Camera(position=torch.tensor(cam_params[0],
                                                   dtype=torch.float32),
                             look_at=torch.tensor(cam_params[1],
                                                  dtype=torch.float32),
                             up=torch.tensor(cam_params[2],
                                             dtype=torch.float32),
                             fov=torch.tensor([45.0]),
                             clip_near=1e-2,
                             resolution=(opt.resolution, opt.resolution),
                             fisheye=False)

    scene = pyredner.Scene(camera,
                           shapes,
                           materials,
                           area_lights=[],
                           envmap=envmap)

    # Generate alphamasks
    for i in range(opt.num_elev):
        # Set elevation angle
        elev_pc = i / opt.num_elev
        elevation = opt.max_elev * elev_pc + opt.min_elev * (1 - elev_pc)
        euler_angles[1] = elevation

        # Calculate number of azimuthal iterations
        num_azimuth = int(opt.num_elev * math.sin(math.pi / 2 - elevation))
        for j in range(num_azimuth):
            # Set azimuthal angle
            azimuth_pc = j / num_azimuth
            azimuth = math.pi * 2 * azimuth_pc

            euler_angles[0] = azimuth

            print('Params: Elevation - {:.4f}\tAzimuth - {:.4f}'\
                    .format(elevation, azimuth))

            # Set Camera params
            cam_params = camera_parameters(euler_angles, translation, distance,
                                           up)

            # Update scene params
            scene.camera = pyredner.Camera(
                position=torch.tensor(cam_params[0], dtype=torch.float32),
                look_at=torch.tensor(cam_params[1], dtype=torch.float32),
                up=torch.tensor(cam_params[2], dtype=torch.float32),
                fov=torch.tensor([45.0]),
                clip_near=1e-2,
                resolution=(opt.resolution, opt.resolution),
                fisheye=False)
            args = pyredner.RenderFunction.serialize_scene(
                scene=scene,
                num_samples=1,
                max_bounces=1,
                channels=[redner.channels.alpha])

            out = pyredner.RenderFunction.apply(1, *args)

            fn = gen_hash(6)
            imwrite(out, os.path.join(output_path, '{}.png'.format(fn)))
            log[fn] = {'elevation': elevation, 'azimuth': azimuth}
    return log