Exemplo n.º 1
0
def main():
    filenames = [filename for filename in sys.argv[1:]]
    assert len(filenames) == 2
    fromfile, tofile = filenames
    print("Converting '%s' to '%s'" % (fromfile, tofile))
    image = imread(fromfile)
    imwrite(image, tofile)
Exemplo n.º 2
0
 def image_obj(self):
     print("Loading object image")
     if self.filename_obj:
         image = imread(self.filename_obj, True)
         if self.resolution_limit != 0:
             image = limit_size(image, self.resolution_limit)
     else:
         image = np.zeros_like(self.image_holo)
     return image
Exemplo n.º 3
0
 def image_obj(self):
     print("Loading object image")
     if self.filename_obj:
         image = imread(self.filename_obj, True)
         if self.resolution_limit != 0:
             image = limit_size(image, self.resolution_limit)
     else:
         image = np.zeros_like(self.image_holo)
     return image
Exemplo n.º 4
0
 def image_ref(self):
     print("Loading reference image")
     if self.filename_ref:
         image = imread(self.filename_ref, True)
         if self.resolution_limit != 0:
             image = limit_size(image, self.resolution_limit)
     else:
         image = np.zeros_like(self.image_holo)
     return image
Exemplo n.º 5
0
 def image_ref(self):
     print("Loading reference image")
     if self.filename_ref:
         image = imread(self.filename_ref, True)
         if self.resolution_limit != 0:
             image = limit_size(image, self.resolution_limit)
     else:
         image = np.zeros_like(self.image_holo)
     return image
Exemplo n.º 6
0
def main():
    filenames = sys.argv[1:]
    if len(filenames) < 2:
        print("Se requieren al menos dos ficheros para operar la sustracción.")
        return 1

    files = [(filename, imread(filename)) for filename in filenames]
    name, left = files.pop(0)

    for right_name, right in files:
        left = subtract(left, right)
        filename_tail = path.split(right_name)[1]
        name += "-%s" % filename_tail

    print("Writting to %s" % name)
    imwrite(left, name)
Exemplo n.º 7
0
def main():
    filenames = sys.argv[1:]
    if len(filenames) < 2:
        print("Se requieren al menos dos ficheros para operar la sustracción.")
        return 1

    files = [(filename, imread(filename)) for filename in filenames]
    name, left = files.pop(0)

    for right_name, right in files:
        left = subtract(left, right)
        filename_tail = path.split(right_name)[1]
        name += "-%s" % filename_tail

    print("Writting to %s" % name)
    imwrite(left, name)
Exemplo n.º 8
0
def main():
    """
    Le main rutine
    """
    # TODO: se puede automatizar los mejores paramétros para quitar las pertubaciones del fondo
    images = [(filename, imread(filename, False)) for filename in sys.argv[1:]]
    if not images:
        lena = misc.lena()
        images = [("lena", lena)]

    def sigmoid(x):
        result = np.abs(1 / (1 + np.exp(-x)))
        return result

    for filename, image in images:
        print("Original %s:" % filename)

        def processor(t_sigma, t_level, equalize_level):
            t_sigma = sigmoid(t_sigma) * 20 + 1
            t_level = sigmoid(t_level)
            equalize_level = sigmoid(equalize_level)
            channels = []
            for ndim in range(3):
                channel = image[:, :, ndim]
                local_context = gaussian_filter(channel, t_sigma)
                #                local_context = median_filter(channel, t_sigma)
                tonemapped = channel - local_context * t_level
                tonemapped = tonemapped.astype(int)
                tonemapped /= 1 - t_level
                equalized = equalize(tonemapped) * equalize_level
                equalized += tonemapped * (1 - equalize_level)
                channels.append(equalized)
            final = np.array(channels).swapaxes(0, 1).swapaxes(1, 2)
            final = normalize(final).astype(np.uint8)
            return final

        amoeba = Amoeba(processor)
        amoeba.iterate(distance=1)
        print(amoeba.points)
Exemplo n.º 9
0
def main():
    """
    Le main rutine
    """
    #TODO: se puede automatizar los mejores paramétros para quitar las pertubaciones del fondo
    images = [(filename, imread(filename, False)) for filename in sys.argv[1:]]
    if not images:
        lena = misc.lena()
        images = [("lena", lena)]

    def sigmoid(x):
        result = np.abs(1 / (1 + np.exp(-x)))
        return result

    for filename, image in images:
        print("Original %s:" % filename)

        def processor(t_sigma, t_level, equalize_level):
            t_sigma = sigmoid(t_sigma) * 20 + 1
            t_level = sigmoid(t_level)
            equalize_level = sigmoid(equalize_level)
            channels = []
            for ndim in range(3):
                channel = image[:, :, ndim]
                local_context = gaussian_filter(channel, t_sigma)
                #                local_context = median_filter(channel, t_sigma)
                tonemapped = channel - local_context * t_level
                tonemapped = tonemapped.astype(int)
                tonemapped /= 1 - t_level
                equalized = equalize(tonemapped) * equalize_level
                equalized += tonemapped * (1 - equalize_level)
                channels.append(equalized)
            final = np.array(channels).swapaxes(0, 1).swapaxes(1, 2)
            final = normalize(final).astype(np.uint8)
            return final

        amoeba = Amoeba(processor)
        amoeba.iterate(distance=1)
        print(amoeba.points)
Exemplo n.º 10
0
    max_bounces = 6)
render = render_pytorch.RenderFunction.apply
# img = render(0, *args)
# image.imwrite(img.data.numpy(), 'test/results/bunny_box/target.exr')

bunny_vertices = shapes[-1].vertices.clone()
bunny_translation = Variable(torch.from_numpy(\
    np.array([0.1,0.4,0.1],dtype=np.float32)), requires_grad=True)
bunny_rotation = Variable(torch.from_numpy(\
    np.array([-0.2,0.1,-0.1],dtype=np.float32)), requires_grad=True)
#bunny_translation = Variable(torch.from_numpy(\
#    np.array([0.0485, -0.1651, -0.0795],dtype=np.float32)), requires_grad=True)
#bunny_rotation = Variable(torch.from_numpy(\
#    np.array([-0.2,0.1,-0.1],dtype=np.float32)), requires_grad=True)
target = Variable(
    torch.from_numpy(image.imread('test/results/bunny_box/target.exr')))

optimizer = torch.optim.Adam([bunny_translation, bunny_rotation], lr=1e-2)
for t in range(200):
    print('iteration:', t)
    optimizer.zero_grad()
    # Forward pass: render the image
    bunny_rotation_matrix = transform.torch_rotate_matrix(bunny_rotation)

    shapes[-1].vertices = \
        (bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \
        torch.mean(bunny_vertices, 0) + bunny_translation
    args=render_pytorch.RenderFunction.serialize_scene(\
        cam, materials, shapes, lights, resolution,
        num_samples = 4,
        max_bounces = 6)
Exemplo n.º 11
0
 def image_holo(self):
     print("Loading hologram image")
     image = imread(self.filename_holo, True)
     if self.resolution_limit != 0:
         image = limit_size(image, self.resolution_limit)
     return image
Exemplo n.º 12
0
shapes = [shape_triangle, shape_light]
light_intensity=torch.from_numpy(\
    np.array([20,20,20],dtype=np.float32))
light = light.Light(1, light_intensity)
lights = [light]
args=render_pytorch.RenderFunction.serialize_scene(\
    cam,materials,shapes,lights,resolution,256,1)

# To apply our Function, we use Function.apply method. We alias this as 'render'.
render = render_pytorch.RenderFunction.apply
img = render(0, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/target.exr')
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/target.png')
target = Variable(
    torch.from_numpy(
        image.imread('test/results/test_single_triangle/target.exr')))
shape_triangle.vertices = Variable(torch.from_numpy(\
    np.array([[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],dtype=np.float32)),
    requires_grad=True)
args=render_pytorch.RenderFunction.serialize_scene(\
    cam,materials,shapes,lights,resolution,16,1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle/init.png')
diff = torch.abs(target - img)
image.imwrite(diff.data.numpy(),
              'test/results/test_single_triangle/init_diff.png')

optimizer = torch.optim.Adam([shape_triangle.vertices], lr=5e-2)
for t in range(200):
    optimizer.zero_grad()
    # Forward pass: render the image
Exemplo n.º 13
0
up = Variable(torch.from_numpy(np.array([0, 1, 0], dtype=np.float32)))
fov = Variable(torch.from_numpy(np.array([45.0], dtype=np.float32)))
clip_near = Variable(torch.from_numpy(np.array([0.01], dtype=np.float32)))
clip_far = Variable(torch.from_numpy(np.array([10000.0], dtype=np.float32)))
cam = camera.Camera(position=position,
                    look_at=look_at,
                    up=up,
                    cam_to_world=None,
                    fov=fov,
                    clip_near=clip_near,
                    clip_far=clip_far,
                    resolution=resolution)
mat_grey=material.Material(\
    diffuse_reflectance=torch.from_numpy(np.array([0.5,0.5,0.5],dtype=np.float32)))
mat_checker_board=material.Material(\
    diffuse_reflectance=torch.from_numpy(image.imread('test/results/test_texture/checker_board.exr')))
materials = [mat_grey, mat_checker_board]
vertices=Variable(torch.from_numpy(\
    np.array([[-1.0,-1.0,0.0], [-1.0,1.0,0.0], [1.0,-1.0,0.0], [1.0,1.0,0.0]],dtype=np.float32)))
indices = torch.from_numpy(np.array([[0, 1, 2], [1, 3, 2]], dtype=np.int32))
uvs = torch.from_numpy(
    np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]],
             dtype=np.float32))
shape_plane = shape.Shape(vertices, indices, uvs, None, 1)
light_vertices=Variable(torch.from_numpy(\
    np.array([[-1,-1,-7],[1,-1,-7],[-1,1,-7],[1,1,-7]],dtype=np.float32)))
light_indices=torch.from_numpy(\
    np.array([[0,1,2],[1,3,2]],dtype=np.int32))
shape_light = shape.Shape(light_vertices, light_indices, None, None, 0)
shapes = [shape_plane, shape_light]
light_intensity=torch.from_numpy(\
Exemplo n.º 14
0
optimizer = torch.optim.Adam([light_translation], lr=5e-2)
for t in range(200):
    print('iteration:', t)
    shape_light.vertices = light_vertices + light_translation
    args = render_pytorch.RenderFunction.serialize_scene(
        cam, materials, shapes, lights, resolution, 4, 1)
    # To apply our Function, we use Function.apply method. We alias this as 'render'.
    render = render_pytorch.RenderFunction.apply

    optimizer.zero_grad()
    # Forward pass: render the image
    img = render(t, *args)
    image.imwrite(img.data.numpy(),
                  'test/results/test_glossy/iter_{}.png'.format(t))
    target = Variable(
        torch.from_numpy(image.imread('test/results/test_glossy/target.exr')))
    loss = (img - target).pow(2).sum()
    print('loss:', loss.item())

    loss.backward()
    print('grad:', light_translation.grad)

    optimizer.step()
    print('light_translation:', light_translation)
    print('shape_light.vertices:', shape_light.vertices)

from subprocess import call

call([
    "ffmpeg", "-framerate", "24", "-i", "test/results/test_glossy/iter_%d.png",
    "-vb", "20M", "test/results/test_glossy/out.mp4"
Exemplo n.º 15
0
def parse_material(node, two_sided=False):
    node_id = None
    if 'id' in node.attrib:
        node_id = node.attrib['id']
    if node.attrib['type'] == 'diffuse':
        diffuse_reflectance = Variable(torch.from_numpy(\
            np.array([0.5, 0.5, 0.5], dtype=np.float32)))
        diffuse_uv_scale = Variable(torch.from_numpy(\
            np.array([1.0, 1.0], dtype=np.float32)))
        specular_reflectance = Variable(torch.from_numpy(\
            np.array([0.0, 0.0, 0.0], dtype=np.float32)))
        specular_uv_scale = Variable(torch.from_numpy(\
            np.array([1.0, 1.0], dtype=np.float32)))
        roughness = Variable(torch.from_numpy(\
            np.array([1.0], dtype=np.float32)))
        for child in node:
            if child.attrib['name'] == 'reflectance':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            diffuse_reflectance = Variable(torch.from_numpy(\
                                image.imread(grandchild.attrib['value'])))
                        elif grandchild.attrib['name'] == 'uscale':
                            diffuse_uv_scale.data[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            diffuse_uv_scale.data[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    diffuse_reflectance = \
                        Variable(torch.from_numpy(\
                            parse_vector(child.attrib['value'])))
            elif child.attrib['name'] == 'specular':
                if child.tag == 'texture':
                    for grandchild in child:
                        if grandchild.attrib['name'] == 'filename':
                            specular_reflectance = Variable(torch.from_numpy(\
                                image.imread(grandchild.attrib['value'])))
                        elif grandchild.attrib['name'] == 'uscale':
                            specular_uv_scale.data[0] = float(
                                grandchild.attrib['value'])
                        elif grandchild.attrib['name'] == 'vscale':
                            specular_uv_scale.data[1] = float(
                                grandchild.attrib['value'])
                elif child.tag == 'rgb':
                    specular_reflectance = \
                        Variable(torch.from_numpy(\
                            parse_vector(child.attrib['value'])))
            elif child.attrib['name'] == 'roughness':
                roughness = \
                    Variable(torch.from_numpy(\
                        float(child.attrib['value'])))
        return (node_id,
                material.Material(diffuse_reflectance,
                                  diffuse_uv_scale=diffuse_uv_scale,
                                  specular_reflectance=specular_reflectance,
                                  specular_uv_scale=specular_uv_scale,
                                  roughness=roughness,
                                  two_sided=two_sided))
    elif node.attrib['type'] == 'twosided':
        ret = parse_material(node[0], True)
        return (node_id, ret[1])
Exemplo n.º 16
0
shape_light = shape.Shape(light_vertices, light_indices, None, None, 2)
shapes = [shape_floor, shape_light, shape_blocker]
light_intensity=torch.from_numpy(\
    np.array([0.5,0.5,0.5],dtype=np.float32))
light = light.Light(1, light_intensity)
lights = [light]

render = render_pytorch.RenderFunction.apply
# args=render_pytorch.RenderFunction.serialize_scene(\
#     cam,materials,shapes,lights,resolution,256,1)
# img = render(0, *args)
# image.imwrite(img.data.numpy(), 'test/results/test_shadow_glossy/target.exr')
# image.imwrite(img.data.numpy(), 'test/results/test_shadow_glossy/target.png')
target = Variable(
    torch.from_numpy(
        image.imread('test/results/test_shadow_glossy/target.exr')))
img = image.imread('test/results/test_shadow_glossy/final.exr')
image.imwrite(np.abs(target.data.numpy() - img),
              'test/results/test_shadow_glossy/final_diff.png')

from subprocess import call
call([
    "ffmpeg", "-framerate", "24", "-i",
    "test/results/test_shadow_glossy/iter_%d.png", "-vb", "20M",
    "test/results/test_shadow_glossy/out.mp4"
])
exit()
shape_blocker.vertices=Variable(torch.from_numpy(\
    np.array([[-0.6,0.9,0.4],[-0.8,3.3,0.7],[0.2,1.1,0.6],[0.3,3.2,0.4]],dtype=np.float32)),
    requires_grad=True)
args=render_pytorch.RenderFunction.serialize_scene(\
Exemplo n.º 17
0
cam_position = cam.position
cam_translation = Variable(torch.from_numpy(\
    np.array([-0.1,0.1,-0.1],dtype=np.float32)), requires_grad=True)
materials[-1].diffuse_reflectance = \
    Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5], dtype=np.float32)),
        requires_grad = True)
materials[-1].specular_reflectance = \
    Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5], dtype=np.float32)),
        requires_grad = True)
materials[-1].roughness = \
    Variable(torch.from_numpy(np.array([0.2], dtype=np.float32)),
        requires_grad = True)
target = Variable(
    torch.from_numpy(
        image.imread('test/results/teapot_reflectance/target.exr')))
image.imwrite(target.data.numpy(),
              'test/results/teapot_reflectance/target.png')
cam = camera.Camera(position=cam_position + cam_translation,
                    look_at=cam.look_at,
                    up=cam.up,
                    cam_to_world=None,
                    fov=cam.fov,
                    clip_near=cam.clip_near,
                    clip_far=cam.clip_far,
                    resolution=resolution,
                    fisheye=False)
args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes,
                                                     lights, resolution, 256,
                                                     2)
img = render(1, *args)
Exemplo n.º 18
0
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity=torch.from_numpy(\
    np.array([5000,5000,5000],dtype=np.float32))
light = light.Light(2, light_intensity)
lights = [light]

args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes,
                                                     lights, resolution, 256,
                                                     1)
render = render_pytorch.RenderFunction.apply
img = render(0, *args)
image.imwrite(img.data.numpy(), 'test/results/test_shadow_camera/target.exr')
image.imwrite(img.data.numpy(), 'test/results/test_shadow_camera/target.png')
target = Variable(
    torch.from_numpy(
        image.imread('test/results/test_shadow_camera/target.exr')))
position = Variable(torch.from_numpy(np.array([-2, 7, 2], dtype=np.float32)),
                    requires_grad=True)
cam = camera.Camera(position=position,
                    look_at=look_at,
                    up=up,
                    cam_to_world=None,
                    fov=fov,
                    clip_near=clip_near,
                    clip_far=clip_far,
                    resolution=resolution)
args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes,
                                                     lights, resolution, 256,
                                                     1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_shadow_camera/init.png')
Exemplo n.º 19
0
materials[-1].diffuse_reflectance = \
    Variable(torch.from_numpy(np.array([0.15, 0.2, 0.15], dtype=np.float32)))
materials[-1].specular_reflectance = \
    Variable(torch.from_numpy(np.array([0.8, 0.8, 0.8], dtype=np.float32)))
materials[-1].roughness = \
    Variable(torch.from_numpy(np.array([0.0001], dtype=np.float32)))

args=render_pytorch.RenderFunction.serialize_scene(\
    cam, materials, shapes, lights, resolution,
    num_samples = 256,
    max_bounces = 2)
render = render_pytorch.RenderFunction.apply
# img = render(0, *args)
# image.imwrite(img.data.numpy(), 'test/results/teapot_specular/target.exr')
target = Variable(
    torch.from_numpy(image.imread('test/results/teapot_specular/target.exr')))
image.imwrite(target.data.numpy(), 'test/results/teapot_specular/target.png')
ref_pos = shapes[-1].vertices
translation = Variable(torch.from_numpy(
    np.array([20.0, 0.0, 2.0], dtype=np.float32)),
                       requires_grad=True)
shapes[-1].vertices = ref_pos + translation
args=render_pytorch.RenderFunction.serialize_scene(\
    cam, materials, shapes, lights, resolution,
    num_samples = 256,
    max_bounces = 2)
# img = render(1, *args)
# image.imwrite(img.data.numpy(), 'test/results/teapot_specular/init.png')
# diff = torch.abs(target - img)
# image.imwrite(diff.data.numpy(), 'test/results/teapot_specular/init_diff.png')
Exemplo n.º 20
0
for i in range(len(materials)):
    materials[i].diffuse_reflectance = torch.sigmoid(
        diffuse_reflectance_bases[i])
for i in range(len(lights)):
    lights[i].intensity = torch.abs(500 * lgt_intensity_bases[i])

args=render_pytorch.RenderFunction.serialize_scene(\
    cam, materials, shapes, lights, resolution,
    num_samples = 625,
    max_bounces = 1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/room_0/init.exr')
image.imwrite(img.data.numpy(), 'test/results/room_0/init.png')
target = Variable(
    torch.from_numpy(image.imread('test/results/room_0/target.exr')))

mat_optimizer = torch.optim.Adam(mat_variables, lr=5e-3)
lgt_optimizer = torch.optim.Adam(lgt_variables, lr=5e-3)
for t in range(2000):
    print('iteration:', t)
    mat_optimizer.zero_grad()
    lgt_optimizer.zero_grad()

    for i in range(len(materials)):
        materials[i].diffuse_reflectance = torch.sigmoid(
            diffuse_reflectance_bases[i])
    for i in range(len(lights)):
        lights[i].intensity = torch.abs(500 * lgt_intensity_bases[i])
    args=render_pytorch.RenderFunction.serialize_scene(\
        cam, materials, shapes, lights, resolution,
Exemplo n.º 21
0
        diffuse_reflectance_bases[i])
    materials[i].specular_reflectance = torch.sigmoid(
        specular_reflectance_bases[i])
    materials[i].roughness = torch.sigmoid(roughness_bases[i])
for i in range(len(lights)):
    lights[i].intensity = torch.abs(lgt_intensity_bases[i] * 10.0)
#args=render_pytorch.RenderFunction.serialize_scene(\
#                cam, materials, shapes, lights, cam.resolution,
#                num_samples = 16384,
#                max_bounces = 16)
#img = render(0, *args)
#image.imwrite(img.data.numpy(), 'test/results/perception_lab/init.exr')
#exit()

print('load target')
org_target = image.imread(
    'test/scenes/perception_lab/off_monitor_30_final.hdr')
#target = image.imread('test/results/perception_lab/target.exr')
num_scales = 1
for scale in range(num_scales):
    # linearly scale from 32x32 to 512x512
    downscale_factor = 512.0 / ((512.0 / num_scales) * scale + 32.0)
    print('downscale_factor', downscale_factor)
    res = round(512 / downscale_factor)
    print('scaling target')
    if scale < num_scales - 1:
        target = scipy.ndimage.interpolation.zoom(
            org_target, (1.0 / downscale_factor, 1.0 / downscale_factor, 1.0),
            order=1)
    else:
        downscale_factor = 1
        res = 512
Exemplo n.º 22
0
shape_light = shape.Shape(light_vertices, light_indices, None, None, 1)
shapes = [shape_floor, shape_blocker, shape_light]
light_intensity=torch.from_numpy(\
    np.array([1000,1000,1000],dtype=np.float32))
light = light.Light(2, light_intensity)
lights = [light]

args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes,
                                                     lights, resolution, 256,
                                                     1)
render = render_pytorch.RenderFunction.apply
img = render(0, *args)
image.imwrite(img.data.numpy(), 'test/results/test_shadow/target.exr')
image.imwrite(img.data.numpy(), 'test/results/test_shadow/target.png')
target = Variable(
    torch.from_numpy(image.imread('test/results/test_shadow/target.exr')))
shape_blocker.vertices=Variable(torch.from_numpy(\
    np.array([[-0.2,3.5,-0.8],[-0.8,3.0,0.3],[0.4,2.8,-0.8],[0.3,3.2,1.0]],dtype=np.float32)),
    requires_grad=True)
args = render_pytorch.RenderFunction.serialize_scene(cam, materials, shapes,
                                                     lights, resolution, 256,
                                                     1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_shadow/init.png')
diff = torch.abs(target - img)
image.imwrite(diff.data.numpy(), 'test/results/test_shadow/init_diff.png')

optimizer = torch.optim.Adam([shape_blocker.vertices], lr=1e-2)
for t in range(200):
    print('iteration:', t)
    # To apply our Function, we use Function.apply method. We alias this as 'render'.
Exemplo n.º 23
0
 def image_holo(self):
     print("Loading hologram image")
     image = imread(self.filename_holo, True)
     if self.resolution_limit != 0:
         image = limit_size(image, self.resolution_limit)
     return image
Exemplo n.º 24
0
    np.array([10000,10000,10000],dtype=np.float32))
light = light.Light(3, light_intensity)
lights = [light]

optimizer = torch.optim.Adam([light_rotation], lr=1e-2)
for t in range(100):
    print('iteration:', t)
    print('light_rotation', light_rotation)
    light_rotation_matrix = transform.torch_rotate_matrix(light_rotation)
    shape_light.vertices = light_vertices @ torch.t(
        light_rotation_matrix) + light_translation
    args = render_pytorch.RenderFunction.serialize_scene(
        cam, materials, shapes, lights, resolution, 4, 32)

    # To apply our Function, we use Function.apply method. We alias this as 'render'.
    render = render_pytorch.RenderFunction.apply

    optimizer.zero_grad()
    # Forward pass: render the image
    img = render(t, *args)
    image.imwrite(img.data.numpy(), 'results/test_gi/iter_{}.png'.format(t))
    target = Variable(
        torch.from_numpy(image.imread('results/test_gi/target.exr')))
    loss = (img - target).pow(2).sum()
    print('loss:', loss.data[0])

    loss.backward()
    print('grad:', light_rotation.grad)

    optimizer.step()
Exemplo n.º 25
0
image.imwrite(img.data.numpy(), 'test/results/test_two_triangles/target.png')
shape_tri0.vertices = Variable(torch.from_numpy(\
    np.array([[-1.3,1.5,0.1], [1.5,0.7,-0.2], [-0.8,-1.1,0.2]],dtype=np.float32)),
    requires_grad=True)
shape_tri1.vertices = Variable(torch.from_numpy(\
    np.array([[-0.5,1.2,1.2], [0.3,1.7,1.0], [0.5,-1.8,1.3]],dtype=np.float32)),
    requires_grad=True)
args=render_pytorch.RenderFunction.serialize_scene(\
    cam,materials,shapes,lights,resolution,256,1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_two_triangles/init.png')
args=render_pytorch.RenderFunction.serialize_scene(\
    cam,materials,shapes,lights,resolution,4,1)
target = Variable(
    torch.from_numpy(
        image.imread('test/results/test_two_triangles/target.exr')))
diff = torch.abs(target - img)
image.imwrite(diff.data.numpy(),
              'test/results/test_two_triangles/init_diff.png')

optimizer = torch.optim.Adam([shape_tri0.vertices, shape_tri1.vertices],
                             lr=1e-2)
for t in range(200):
    # To apply our Function, we use Function.apply method. We alias this as 'render'.
    render = render_pytorch.RenderFunction.apply

    optimizer.zero_grad()
    # Forward pass: render the image
    img = render(t + 1, *args)
    image.imwrite(img.data.numpy(),
                  'test/results/test_two_triangles/iter_{}.png'.format(t))