Пример #1
0
def visual_vertex_grad(vertices: torch.Tensor,
                       indices: torch.Tensor,
                       cam: pyredner.Camera = None):
    if not hasattr(visual_vertex_grad, 'x'):
        visual_vertex_grad.x = 0
    else:
        visual_vertex_grad.x += 1
    cam = pyredner.Camera(
        position=cam_pos,
        look_at=cam_look_at,  # Center of the vertices
        up=torch.tensor([0.0, 1.0, 0.0]),
        fov=torch.tensor([45.0]),
        resolution=(1000, 1000))
    m = pyredner.Material(use_vertex_color=True)
    obj = pyredner.Object(vertices=vertices, indices=indices, material=m)
    coe = 500000.
    color_reps = torch.tensor([[[1., 0., 0.], [0., -1., -1.]],
                               [[0., 1., 0.], [-1., 0., -1.]],
                               [[0., 0., 1.], [-1., -1.,
                                               0.]]]).to(pyredner.get_device())
    grad_imgs = []
    for d in range(3):
        colors = torch.where(
            vertices.grad[:, d:d + 1].expand(-1, 3) > 0,
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 0],
            vertices.grad[:, d:d + 1].expand(-1, 3) * color_reps[d, 1]) * coe

        obj.colors = colors
        scene = pyredner.Scene(camera=cam, objects=[obj])
        grad_imgs.append(pyredner.render_albedo(scene=scene))
    for d in range(3):
        pyredner.imwrite(
            grad_imgs[d].cpu(), output_path +
            '/grad_imgs/{:0>2d}{:0>2d}.png'.format(d, visual_vertex_grad.x))
    return grad_imgs
Пример #2
0
def view_face(cam_pos, cam_look_at, vertices, indices, ambient_color,
              dir_light_intensity, dir_light_directions, normals, colors):
    cam_pos = torch.tensor(cam_pos)
    cam_look_at = torch.tensor(cam_look_at)
    up = torch.tensor([0., 0., 1.])
    pos = cam_pos - cam_look_at
    relx = -torch.cross(pos, up)
    relx = relx / relx.norm()
    rely = torch.cross(pos, relx)
    rely = rely / rely.norm()
    len = pos.norm()
    angle = torch.tensor(0.5236)
    for i in range(-2, 3):
        pos_i = pos * torch.cos(i * angle) + len * relx * torch.sin(i * angle)
        for j in range(-2, 3):
            pos_j = pos_i * torch.cos(j * angle) + len * rely * torch.sin(
                i * angle)

            for k in range(1):
                dir_light_direction = torch.tensor(dir_light_directions[k])
                img = model(cam_look_at + pos_j, cam_look_at, vertices,
                            indices, ambient_color, dir_light_intensity,
                            dir_light_direction, normals, colors)
                pyredner.imwrite(
                    img.data.cpu(),
                    'views/view{}_{}_{}.png'.format(i + 2, j + 2, k))
Пример #3
0
def save_mtl(m: pyredner.Material, filename: str):
    if filename[-4:] != '.mtl':
        filename = filename + '.mtl'
    path = os.path.dirname(filename)

    directory = os.path.dirname(filename)
    if directory != '' and not os.path.exists(directory):
        os.makedirs(directory)
    with open(filename, 'w') as f:
        f.write('newmtl mtl_1\n')

        if m.diffuse_reflectance is not None:
            texels = m.diffuse_reflectance.texels
            if len(texels.size()) == 1:
                f.write('Kd {} {} {}\n'.format(texels[0], texels[1],
                                               texels[2]))
            else:
                f.write('map_Kd Kd_texels.png\n')
                pyredner.imwrite(texels.data.cpu(), path + '/Kd_texels.png')
Пример #4
0
shapes = [shape_plane, shape_light]
light_intensity = torch.tensor([20.0, 20.0, 20.0])
# The first argument is the shape id of the light
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_texture/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_texture/target.png')
target = pyredner.imread('results/test_texture/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_plane.vertices = torch.tensor(\
    [[-1.1,-1.2,0.0], [-1.3,1.1,0.0], [1.1,-1.1,0.0], [0.8,1.2,0.0]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)
# Render the initial guess
Пример #5
0
# Serialize the scene
# Here we specify the output channels as "depth", "shading_normal"
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 0,
    channels = [redner.channels.depth, redner.channels.shading_normal])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Save the images.
depth = img[:, :, 0]
normal = img[:, :, 1:4]
pyredner.imwrite(depth.cpu(), 'results/test_g_buffer/target_depth.exr')
pyredner.imwrite(depth.cpu(),
                 'results/test_g_buffer/target_depth.png',
                 normalize=True)
pyredner.imwrite(normal.cpu(), 'results/test_g_buffer/target_normal.exr')
pyredner.imwrite(normal.cpu(),
                 'results/test_g_buffer/target_normal.png',
                 normalize=True)
# Read the target image we just saved.
target_depth = pyredner.imread('results/test_g_buffer/target_depth.exr')
target_depth = target_depth[:, :, 0]
target_normal = pyredner.imread('results/test_g_buffer/target_normal.exr')
if pyredner.get_use_gpu():
    target_depth = target_depth.cuda()
    target_normal = target_normal.cuda()
Пример #6
0
    if smooth_scheme != 'None':# and t > 20:
        pyredner.smooth(vertices, indices, smooth_lmd, smooth_scheme, bound)
        pyredner.smooth(vertices, indices, smooth_lmd, smooth_scheme, bound)

    if t == 200:
        ver_optimizer=torch.optim.Adam([vertices], lr=0.05)

    print("{:.^16}total_loss = {:.6f}".format(t, total_loss))
    print((normals - pyredner.compute_vertex_normal(vertices, indices, max)).pow(2).sum())

pyredner.save_obj(obj, output_path + '/final.obj')
print(output_path + '/final.obj')

for i in range(len(cam_poses)):
    img, obj = model(cam_poses[i], cam_look_at, vertices, ambient_color, dir_light_intensity, dir_light_direction, normals)
    pyredner.imwrite(img.data.cpu(), output_path + '/view0{}.png'.format(i))

    plt.plot(losses[i], label='view0{}'.format(i))

plt.legend()
plt.ylabel("loss")
plt.xlabel("iterations")
plt.savefig(output_path + "/lossCurve.png", dpi=800)
xlim = plt.xlim()
ylim = plt.ylim()

for i in range(len(cam_poses)):
    from matplotlib import animation

    fig, (img_plot, diff_plot, loss_curve) = plt.subplots(1, 3, figsize=(20, 10))
    im = img_plot.imshow(imgs[i][0].clamp(0.0, 1.0), animated=True)
Пример #7
0
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap)
# Like the previous tutorial, we serialize and render the scene,
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')
target = pyredner.imread('results/pose_estimation/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Now we want to generate the initial guess.
# We want to rotate and translate the teapot. We do this by declaring
# PyTorch tensors of translation and rotation parameters,
# then apply them to all teapot vertices.
# The translation and rotation parameters have very different ranges, so we normalize them
# by multiplying the translation parameters 100 to map to the actual translation amounts.
translation_params = torch.tensor([0.1, -0.1, 0.1],
                                  device=pyredner.get_device(),
                                  requires_grad=True)
translation = translation_params * 100.0
Пример #8
0
shape_light = pyredner.Shape(light_vertices, light_indices, None, None, 0)
shapes = [shape_triangle, shape_light]
light_intensity = torch.tensor([20.0,20.0,20.0])
light = pyredner.AreaLight(1, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_clipped/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle_clipped/target.png')
target = pyredner.imread('results/test_single_triangle_clipped/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_triangle.vertices = torch.tensor(\
    [[-1.0,1.5,0.3], [0.9,1.2,-0.3], [0.0,-3.0,-6.5]],
    device = pyredner.get_device(),
    requires_grad=True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
    deringed_coeffs[:, 0] += coeffs[:, 0]
    deringed_coeffs[:, 1:1 + 3] += \
        coeffs[:, 1:1 + 3] * math.pow(math.sin(math.pi * 1.0 / window) / (math.pi * 1.0 / window), 4.0)
    deringed_coeffs[:, 4:4 + 5] += \
        coeffs[:, 4:4 + 5] * math.pow(math.sin(math.pi * 2.0 / window) / (math.pi * 2.0 / window), 4.0)
    deringed_coeffs[:, 9:9 + 7] += \
        coeffs[:, 9:9 + 7] * math.pow(math.sin(math.pi * 3.0 / window) / (math.pi * 3.0 / window), 4.0)
    return deringed_coeffs


deringed_coeffs = deringing(coeffs, 6.0)
res = (128, 128)
# We call the utility function SH_reconstruct to rasterize the coefficients into an envmap
envmap = pyredner.SH_reconstruct(deringed_coeffs, res)
# Save the target envmap
pyredner.imwrite(envmap.cpu(),
                 'results/joint_material_envmap_sh/target_envmap.exr')
# Convert the PyTorch tensor into pyredner compatible envmap
envmap = pyredner.EnvironmentMap(envmap)
# Setup the scene
scene = pyredner.Scene(camera=cam,
                       shapes=shapes,
                       materials=materials,
                       envmap=envmap)
# Serialize the scene
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
# Render the target
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
# - Incorporate box constraints in your optimizer

# In addition to Wavefront obj file, redner also supports loading from a Mitsuba
# scene file. Currently we only support a limited amount of features. In particular
# we only support two kinds of materials: diffuse and roughplastic. Note that the
# "alpha" values in roughplastic is the square root of the roughness. See cbox.xml
# for how a Mitsuba scene file should look like.
# We can load a scene using pyredner.load_mitsuba() utility, and render it as usual.
scene = pyredner.load_mitsuba('cbox/cbox.xml')
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 5) # Set max_bounces = 5 for global illumination
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/coarse_to_fine_estimation/target.png')
target = pyredner.imread('results/coarse_to_fine_estimation/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Now let's generate an initial guess by perturbing the reference.
# Let's set all the diffuse color to gray by manipulating material.diffuse_reflectance.
# We also store all the material variables to optimize in a list.
material_vars = []
for mi, m in enumerate(scene.materials):
    var = torch.tensor([0.5, 0.5, 0.5],
                       device = pyredner.get_device(),
                       requires_grad = True)
    material_vars.append(var)
    m.diffuse_reflectance = pyredner.Texture(var)
#    envmap = envmap.cuda()
#envmap = pyredner.EnvironmentMap(envmap)
#
## Finally we construct our scene using all the variables we setup previously.
#scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(1, *scene_args)

#%
pyredner.imwrite(img.cpu(), 'results/' + folder_name + '/init_guess.exr')
pyredner.imwrite(img.cpu(), 'results/' + folder_name + '/init_guess.png')

#Loading it again
target = pyredner.imread('results/' + folder_name + '/init_guess.exr')
target_p = pyredner.imread('results/' + folder_name + '/init_guess.png')
if pyredner.get_use_gpu():
    target = target.cuda()

#%%
im = Image.open("results/uranus.jpg")
#imm= transforms.CenterCrop((256,256))(im)
#im= im.resize((256,256))

im = transforms.Resize((256, 256), interpolation=Image.BILINEAR)(im)
#
#An important thing to keep in mind is that alpha blending is only *correct* in a [linear color space](https://www.kinematicsoup.com/news/2016/6/15/gamma-and-linear-space-what-they-are-how-they-differ). Natural 8-bit images you download from the internet is usually gamma compressed. Redner's `imread` function automatically converts the image to linear space (assuming gamma=2.2), so when displaying them you want to convert them back to the gamma compressed space.
#"""

# Commented out IPython magic to ensure Python compatibility.
#import urllib
#filedata = urllib.request.urlretrieve('http://sipi.usc.edu/database/download.php?vol=misc&img=4.2.03', 'mandrill.tiff')
#background = pyredner.imread('mandrill.tiff')
background = pyredner.imread('Target_Images_Cropped/background_cropped.exr')
# Visualize background
from matplotlib.pyplot import imshow
# %matplotlib inline
# Redner's imread automatically gamma decompress the image to linear space.
# You'll have to compress it back to sRGB space for display.
pyredner.imwrite(
    torch.pow(background.data, 1.0 / 2.2).cpu(),
    'results/' + folder_name + '/background.png')  # saves an exr image as png

#imshow(torch.pow(background, 1.0/2.2))
# Convert background to current device
background = background.to(pyredner.get_device())

objects = pyredner.load_obj('ReferenceOutputMeshes/cubeNVO.obj',
                            return_objects=True)
#camera = pyredner.automatic_camera_placement(objects, resolution=(512, 512))

#"""Next, we define a `model` function that takes the objects, camera, and pose parameters, and output an image."""

# Obtain the teapot vertices we want to apply the transformation on.

#material_map2, mesh_list2, light_map2 = pyredner.load_obj('ReferenceOutputMeshes/cubeNVO.obj')
Пример #13
0
import torch
import pyredner

vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128)
m = pyredner.Material(diffuse_reflectance=torch.tensor(
    (0.5, 0.5, 0.5), device=pyredner.get_device()))
obj = pyredner.Object(vertices=vertices,
                      indices=indices,
                      uvs=uvs,
                      normals=normals,
                      material=m)
cam = pyredner.automatic_camera_placement([obj], resolution=(480, 640))
scene = pyredner.Scene(objects=[obj], camera=cam)

img = pyredner.render_g_buffer(
    scene, channels=[pyredner.channels.uv, pyredner.channels.shading_normal])
uv_img = torch.cat([img[:, :, :2], torch.zeros(480, 640, 1)], dim=2)
normal_img = img[:, :, 2:]
pyredner.imwrite(uv_img, 'results/test_sphere/uv.png')
pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
Пример #14
0
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1,
    channels = [redner.channels.radiance, redner.channels.vertex_color])
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.exr')
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.png')
pyredner.imwrite(img_vertex_color.cpu(),
                 'results/test_vertex_color/target_color.png')
target_radiance = pyredner.imread('results/test_vertex_color/target.exr')
if pyredner.get_use_gpu():
    target_radiance = target_radiance.cuda()

# Initial guess. Set to 0.5 for all vertices.
shape_sphere.colors = \
    torch.zeros_like(vertices, device = pyredner.get_device()) + 0.5
shape_sphere.colors.requires_grad = True
# We render both the radiance and the vertex color here.
# The vertex color is only for visualization.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
Пример #15
0
pyredner.set_use_gpu(torch.cuda.is_available())

scene = pyredner.load_mitsuba('scenes/bunny_box.xml')

scene.shapes[-1].vertices += torch.tensor([0, 0.01, 0],
                                          device=pyredner.get_device())

args=pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 1024,
    max_bounces = 6)
render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.png')
target = pyredner.imread('results/test_bunny_box/target.exr')
target = target.cuda(pyredner.get_device())

bunny_vertices = scene.shapes[-1].vertices.clone()
bunny_translation = torch.tensor([0.1, 0.4, 0.1],
                                 device=pyredner.get_device(),
                                 requires_grad=True)
bunny_rotation = torch.tensor([-0.2, 0.1, -0.1],
                              device=pyredner.get_device(),
                              requires_grad=True)
bunny_rotation_matrix = pyredner.gen_rotate_matrix(bunny_rotation)

scene.shapes[-1].vertices = \
    (bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \
Пример #16
0
pyredner.set_use_gpu(torch.cuda.is_available())

# Load the scene from a Mitsuba scene file
scene = pyredner.load_mitsuba('scenes/living-room-3/scene.xml')
print('scene loaded')

max_bounces = 6
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = max_bounces)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.png')
target = pyredner.imread('results/test_living_room/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

scene.camera.look_at = torch.tensor([-0.556408, 0.951295, -3.98066],
                                    requires_grad=True)
scene.camera.position = torch.tensor([0.00419251, 0.973707, -4.80844],
                                     requires_grad=True)
scene.camera.up = torch.tensor([-0.00920347, 0.999741, 0.020835],
                               requires_grad=True)

args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
Пример #17
0
# use per pixel and the number of bounces for indirect illumination here
# (one bounce means only direct illumination).
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

# Render the scene as our target image.
# To render the scene, we use our custom PyTorch function in pyredner/render_pytorch.py
# First setup the alias of the render function
render = pyredner.RenderFunction.apply
# Render. The first argument is the seed for RNG in the renderer.
img = render(0, *scene_args)
# Save the images.
# The output image is in the GPU memory if you are using GPU.
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_single_triangle/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess.
shape_triangle.vertices = torch.tensor(\
    [[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],
    device = pyredner.get_device(),
    requires_grad = True) # Set requires_grad to True since we want to optimize this
# We need to serialize the scene again to get the new arguments.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
Пример #18
0
pyredner.set_use_gpu(torch.cuda.is_available())

# Load the scene from a Mitsuba scene file
scene = pyredner.load_mitsuba('scenes/living-room-3/scene.xml')
print('scene loaded')

max_bounces = 6
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = max_bounces)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_living_room/target.png')
target = pyredner.imread('results/test_living_room/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

scene.camera.look_at = torch.tensor([-0.556408, 0.951295, -3.98066], requires_grad=True)
scene.camera.position = torch.tensor([0.00419251, 0.973707, -4.80844], requires_grad=True)
scene.camera.up = torch.tensor([-0.00920347, 0.999741, 0.020835], requires_grad=True)

args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = max_bounces)

img = render(1, *args)
Пример #19
0
import torch
import pyredner
import math
import os
import sys

img1 = sys.argv[1]
img2 = sys.argv[2]
out = sys.argv[3]

img1 = pyredner.imread(img1)
img2 = pyredner.imread(img2)

out_img = 3.0 * torch.abs(img1 - img2)
out_img = 1.0 - out_img
pyredner.imwrite(out_img, out)
Пример #20
0
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)
# Like the previous tutorial, we serialize and render the scene, 
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')
target = pyredner.imread('results/pose_estimation/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Now we want to generate the initial guess.
# We want to rotate and translate the teapot. We do this by declaring
# PyTorch tensors of translation and rotation parameters,
# then apply them to all teapot vertices.
# The translation and rotation parameters have very different ranges, so we normalize them
# by multiplying the translation parameters 100 to map to the actual translation amounts.
translation_params = torch.tensor([0.1, -0.1, 0.1],
    device = pyredner.get_device(), requires_grad=True)
translation = translation_params * 100.0
euler_angles = torch.tensor([0.1, -0.1, 0.1], requires_grad=True)
Пример #21
0
#cam_pos = torch.tensor([-80.2697, -55.7891, 373.9277])
#cam_look_at = torch.tensor([-0.2697, -5.7891, 54.7918])
#img = model(cam_pos, cam_look_at, torch.zeros(199, device=pyredner.get_device()), torch.zeros(199, device=pyredner.get_device()), torch.ones(3), torch.zeros(3))
#pyredner.imwrite(img.cpu(), 'img.png')
data_path = "generated/dataset2/"
c_p, cam_look_at, dir_light_intensity, dir_light_direction = np.load(
    data_path + "env_data.npy", allow_pickle=True)
cam_poses = torch.tensor(c_p, requires_grad=True)

#target = pyredner.imread('generated/img03.png').to(pyredner.get_device())
target = []
for i in range(len(cam_poses)):
    target.append(
        pyredner.imread(data_path + 'target_img{:0>2d}.png'.format(i)).to(
            pyredner.get_device()))
pyredner.imwrite(target[i].cpu(), 'process/target_img{:0>2d}.png'.format(i))

#cam_pos = torch.tensor([-0.2697, -5.7891, 373.9277], requires_grad=True)
#cam_look_at = torch.tensor([-0.2697, -5.7891, 54.7918], requires_grad=True)
#shape_coeffs = torch.zeros(199, device=pyredner.get_device(), requires_grad=True)
color_coeffs = torch.zeros(199,
                           device=pyredner.get_device(),
                           requires_grad=True)
ambient_color = torch.zeros(3,
                            device=pyredner.get_device(),
                            requires_grad=True)
#dir_light_intensity = torch.ones(3, device=pyredner.get_device(), requires_grad=True)
#dir_light_direction = torch.tensor([0.0, 0.0, -1.0], device=pyredner.get_device(), requires_grad=True)
vertices = (shape_mean +
            shape_basis @ torch.zeros(199, device=pyredner.get_device())).view(
                -1, 3)
Пример #22
0
                      uv_indices=uv_indices)  # , colors=colors)
pyredner.save_obj(obj, output_path + '/final.obj')
# pyredner.imwrite(texels.data.cpu(), output_path + '/texels.png')

import matplotlib.pyplot as plt

plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
plt.suptitle(description + '\n' + str(vars(args))[1:-1].replace("'", ""),
             fontsize=13,
             color='blue')

plt.sca(ax1)

for i in range(num_views):
    pyredner.imwrite(imgs[i].data.cpu(),
                     output_path + '/final_views/view{:0>2d}.png'.format(i))
    plt.plot(all_losses[:, i],
             label='view{:0>2d}:{:.6f}'.format(i, all_losses[-1, i]))

plt.legend()
plt.ylabel("losses")
plt.xlabel("iterations")

plt.ylim(ymin=0.)
xlim = plt.xlim()
ylim = plt.ylim()

plt.sca(ax2)
plt.plot(total_losses, label="total_loss:{:.6f}".format(total_losses[-1]))
plt.plot(img_losses, label="img_loss:{:.6f}".format(img_losses[-1]))
plt.plot(smooth_losses, label="smooth_loss:{:.6f}".format(smooth_losses[-1]))
Пример #23
0
# Setup the scene. We don't need lights.
scene = pyredner.Scene(cam, shapes, materials, [])
# We output the shape id, so that we can shape it later
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    # Set max bounces to 0, we don't need lighting.
    max_bounces = 0,
    # Use the diffuse color as the output
    channels = [redner.channels.diffuse_reflectance])

# Render the scene as our target image.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.png')
target = pyredner.imread('results/two_d_mesh/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device=pyredner.get_device())

# Perturb the scene, this is our initial guess
quad_vertices_2d = torch.tensor(\
    [[-0.5, 0.3], [0.3, 0.4], [-0.7, -0.2], [0.4, -0.3]],
    device = pyredner.get_device(),
    requires_grad = True)
tri_vertices_2d = torch.tensor(\
    [[-0.5, 0.4], [0.4, 0.6], [-0.0, -0.3]],
    device = pyredner.get_device(),
    requires_grad = True)
# Need to redo the concatenation
Пример #24
0
shape_light = pyredner.Shape(\
    vertices = torch.tensor([[-1.0, -1.0, -7.0],
                             [ 1.0, -1.0, -7.0],
                             [-1.0,  1.0, -7.0],
                             [ 1.0,  1.0, -7.0]], device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)

shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1, 
                           intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]

scene = pyredner.Scene(cam, shapes, materials, area_lights)

scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
Пример #25
0
# The last material is the teapot material, set it to a specular material
scene.materials[-1].diffuse_reflectance = \
    torch.tensor([0.15, 0.2, 0.15], device = pyredner.get_device())
scene.materials[-1].specular_reflectance = \
    torch.tensor([0.8, 0.8, 0.8], device = pyredner.get_device())
scene.materials[-1].roughness = \
    torch.tensor([0.0001], device = pyredner.get_device())
args=pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 2)

render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_teapot_specular/target.png')
target = pyredner.imread('results/test_teapot_specular/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

# Perturb the scene, this is our initial guess
# We perturb the last shape, which is the SIGGRAPH logo
ref_pos = scene.shapes[-1].vertices
translation = torch.tensor([20.0, 0.0, 2.0],
                           device=pyredner.get_device(),
                           requires_grad=True)
scene.shapes[-1].vertices = ref_pos + translation
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
Пример #26
0
shapes = []
for mtl_name, mesh in mesh_list:
    assert (mesh.normal_indices is None)
    shapes.append(pyredner.Shape(\
        vertices = mesh.vertices,
        indices = mesh.indices,
        material_id = material_id_map[mtl_name],
        uvs = mesh.uvs,
        normals = mesh.normals,
        uv_indices = mesh.uv_indices))

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap)

# Finally we construct our scene using all the variables we setup previously.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=envmap)
# Like the previous tutorial, we serialize and render the scene,
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_compute_uvs/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_compute_uvs/target.png')
target = pyredner.imread('results/test_compute_uvs/target.exr')
Пример #27
0
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None)
# Serialize the scene.
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 4, # Still need some samples for anti-aliasing
    max_bounces = 0,
    channels = [redner.channels.generic_texture])

# g buffer is the 16-channels texture
g_buffer = render(0, *scene_args)
print(g_buffer.shape)
img = g_buffer[:, :, 0:3]
print(img.shape)

# Save the images
pyredner.imwrite(img.cpu(), 'results/test_multichannels/target_test1.exr')
pyredner.imwrite(img.cpu(), 'results/test_multichannels/target_test1.png')

### TEST 2: 16-channels texture rasterization

generic_texture = generic_texture = torch.zeros(\
    128, 128, 16, device = pyredner.get_device())
generic_texture[:, :, 9:12] = tex_tensor

materials = [pyredner.Material(generic_texture=generic_texture)]

# Construct the scene.
# Don't setup any light sources, only use primary visibility.
scene = pyredner.Scene(cam, shapes, materials, area_lights=[], envmap=None)
# Serialize the scene.
scene_args = pyredner.RenderFunction.serialize_scene(\
                       envmap=envmap)

# Like the previous tutorial, we serialize and render the scene,
# Like the previous tutorial, we serialize and render the scene,
# save it as our target
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16, #512
    max_bounces = 1)
#
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)  # gives an exr image

#%
#pyredner.imwrite(img.cpu(), 'results/'+folder_name+'/target.exr')
pyredner.imwrite(img.cpu(), 'results/' + folder_name +
                 '/init_load_top.png')  # saves an exr image as png
#%%
##Loading it again
#target_p = pyredner.imread('results/'+folder_name+'/target.png')

#target = pyredner.imread('results/'+folder_name+'/target.exr')
target = pyredner.imread('Target_Images_Cropped/cube_front_cropped.exr')
#
if pyredner.get_use_gpu():
    target = target.cuda()
pyredner.imwrite(target.cpu(), 'results/' + folder_name +
                 '/target.png')  #saves an exr image as png
#%

#im = Image.open("results/circle_1.png")
##imm= transforms.CenterCrop((256,256))(im)
Пример #29
0
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device=pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

envmap_texels = torch.tensor(
    0.5 * torch.ones([32, 64, 3], device=pyredner.get_device()),
    requires_grad=True)
envmap = pyredner.EnvironmentMap(torch.abs(envmap_texels))
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
img = render(1, *scene_args)
Пример #30
0
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity = torch.tensor([1000.0, 1000.0, 1000.0])
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_receiver/target.png')
target = pyredner.imread('results/test_shadow_receiver/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
shape_floor.vertices = torch.tensor(\
	[[-2.0,-0.2,-2.0],[-2.0,-0.2,2.0],[2.0,-0.2,-2.0],[2.0,-0.2,2.0]],
	device = pyredner.get_device(),
    requires_grad = True)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
# Render the initial guess
Пример #31
0
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity = torch.tensor([5000.0, 5000.0, 5000.0])
# The first argument is the shape id of the light
light = pyredner.AreaLight(2, light_intensity)
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)

# Alias of the render function
render = pyredner.RenderFunction.apply
# Render our target
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_shadow_camera/target.png')
target = pyredner.imread('results/test_shadow_camera/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda(device = pyredner.get_device())

# Perturb the scene, this is our initial guess
position = torch.tensor([-2.0, 7.0, 2.0], requires_grad = True)
scene.camera = pyredner.Camera(position = position,
                               look_at = look_at,
                               up = up,
                               fov = fov,
                               clip_near = clip_near,
                               resolution = resolution)
args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
radius = float(sys.argv[5])
lightLocs = None
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
#camLocs = fibonacci_sphere(num_cameras, False)
camLocs = [torch.tensor([-0.1, 0.1, 0.1])]
target_scenes = generate_scenes(camLocs, target_objects, None, lightLocs)

max_bounces_targets = 4
max_bounces_optim = 4

# Render Targets
targets = pyredner.render_pathtracing(scene = target_scenes, num_samples = (512, 0), max_bounces=max_bounces_targets)

for ind, img in enumerate(targets):
  img = img.data.cpu()
  pyredner.imwrite(img, path + "targets/target_" + str(ind) + ".png")
  #target_data = pyredner.imread( path + "targets/target_" + str(ind) + ".png")
  #targets[ind] = target_data


target_texture = pyredner.render_albedo(scene = target_scenes, num_samples = (512, 0))

for ind, img in enumerate(target_texture):
  mask = img.clone()
  mask = mask.sum(2)/3
  mask[mask < 0.8] = 0.0
  mask = torch.stack([mask, mask, mask], dim=2)
  img = img.data.cpu()
  pyredner.imwrite(img, path + "targets/texture_" + str(ind) + ".png")

def tex_model(optim_scenes, num_samples=(64, 64), max_bounces=1):
radius = 2.0
lightLocs = None
camera0 = pyredner.automatic_camera_placement(target_objects, resolution)
camLocs = fibonacci_sphere(num_cameras, False)
target_scenes = generate_scenes(camLocs, target_objects, None, lightLocs)

# Render targets.
pyredner.render_pytorch.print_timing = False
targets = pyredner.render_pathtracing(scene=target_scenes,
                                      num_samples=(128, 0),
                                      max_bounces=1)

# Write out targets.
for ind, img in enumerate(targets):
    img = img.data.cpu()
    pyredner.imwrite(img, output_path + "/targets/target_" + str(ind) + ".png")

# Loss function definition.
num_gaussian_levels = 0


def loss_function(renders, targets):
    renders = renders.permute(0, 3, 1, 2)
    targets = targets.permute(0, 3, 1, 2)

    loss = 0
    loss = torch.sum((renders - targets)**2.0)

    for i in range(num_gaussian_levels):
        targets = gaussian_func(targets)
        renders = gaussian_func(renders)
Пример #34
0
    normals = normals,
    material_id = 0)
shapes = [shape_sphere]

envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
    envmap = envmap.cuda(device = pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_envmap/target.png')
target = pyredner.imread('results/test_envmap/target.exr')
if pyredner.get_use_gpu():
    target = target.cuda()

envmap_texels = torch.tensor(0.5 * torch.ones([32, 64, 3],
    device = pyredner.get_device()),
    requires_grad = True)
envmap = pyredner.EnvironmentMap(torch.abs(envmap_texels))
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 256,
    max_bounces = 1)
img = render(1, *scene_args)
Пример #35
0
shape_light = pyredner.Shape(\
    vertices = torch.tensor([[-1.0, -1.0, -7.0],
                             [ 1.0, -1.0, -7.0],
                             [-1.0,  1.0, -7.0],
                             [ 1.0,  1.0, -7.0]], device = pyredner.get_device()),
    indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
        dtype = torch.int32, device = pyredner.get_device()),
    uvs = None,
    normals = None,
    material_id = 0)

shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id=1,
                           intensity=torch.tensor([20.0, 20.0, 20.0]))
area_lights = [light]

scene = pyredner.Scene(cam, shapes, materials, area_lights)

scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 16,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
Пример #36
0
def deringing(coeffs, window):
    deringed_coeffs = torch.zeros_like(coeffs)
    deringed_coeffs[:, 0] += coeffs[:, 0]
    deringed_coeffs[:, 1:1 + 3] += \
        coeffs[:, 1:1 + 3] * math.pow(math.sin(math.pi * 1.0 / window) / (math.pi * 1.0 / window), 4.0)
    deringed_coeffs[:, 4:4 + 5] += \
        coeffs[:, 4:4 + 5] * math.pow(math.sin(math.pi * 2.0 / window) / (math.pi * 2.0 / window), 4.0)
    deringed_coeffs[:, 9:9 + 7] += \
        coeffs[:, 9:9 + 7] * math.pow(math.sin(math.pi * 3.0 / window) / (math.pi * 3.0 / window), 4.0)
    return deringed_coeffs
deringed_coeffs = deringing(coeffs, 6.0)
res = (256, 128)
# We call the utility function SH_reconstruct to rasterize the coefficients into an envmap
envmap = pyredner.SH_reconstruct(deringed_coeffs, res)
# Save the target envmap
pyredner.imwrite(envmap.cpu(), 'results/joint_material_envmap_sh/target_envmap.exr')
# Convert the PyTorch tensor into pyredner compatible envmap
envmap = pyredner.EnvironmentMap(envmap)
# Setup the scene
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
# Serialize the scene
scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)
# Render the target
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
# Save the target image
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.exr')
pyredner.imwrite(img.cpu(), 'results/joint_material_envmap_sh/target.png')
Пример #37
0
# here we use an environment light,
# which is a texture representing infinitely far away light sources in
# spherical coordinates.
#envmap = pyredner.imread('sunsky.exr')
#if pyredner.get_use_gpu():
#    envmap = envmap.cuda()
#envmap = pyredner.EnvironmentMap(envmap)
#
## Finally we construct our scene using all the variables we setup previously.
#scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = envmap)

scene_args = pyredner.RenderFunction.serialize_scene(\
    scene = scene,
    num_samples = 512,
    max_bounces = 1)

render = pyredner.RenderFunction.apply
img = render(0, *scene_args)

#%%
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.exr')
pyredner.imwrite(img.cpu(), 'results/pose_estimation/target.png')

#Loading it again
target = pyredner.imread('results/pose_estimation/target.exr')

if pyredner.get_use_gpu():
    target = target.cuda()

#%%
Пример #38
0
    shapes = []
    shapes.append(target_obj1[0])

    numShapes = len(shapes)
    shapes.extend(lights)

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    scene = pyredner.Scene(cam, objects = [shapes[0], shapes[1]],area_lights = [area_lights[0]], envmap = None)
    scene_intense = pyredner.Scene(cam, objects = [shapes[0], shapes[1]], area_lights = [area_lights[1]], envmap = None)

    target = pyredner.render_pathtracing(scene = [scene], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_0.png')

    area_lights = []
    for i in range(numShapes, len(shapes)):
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity, light_intensity, light_intensity])))
        area_lights.append(pyredner.AreaLight(shape_id = numShapes, intensity = torch.tensor([light_intensity*10, light_intensity*10, light_intensity*10])))

    shape0_vertices = shapes[0].vertices.clone()
    shapes[0].vertices = \
        (shape0_vertices)

    scene_3 = pyredner.Scene(cam3, objects=[shapes[0], shapes[2]], area_lights = [area_lights[2]], envmap = None)

    target2 = pyredner.render_pathtracing(scene = [scene_3], num_samples=(512, 0), max_bounces=1)[0]
    pyredner.imwrite(target2.cpu(), 'results/shadow_art/high_res/' + step + '/' + str(obj) + '_1.png')