Ejemplo n.º 1
0
def test_construct(variant_scalar_rgb):
    from mitsuba.core.xml import load_dict
    from mitsuba.core import ScalarTransform4f

    # Test construct from to_world
    sensor = make_sensor(to_world=ScalarTransform4f.look_at(
        origin=[0, 0, 0],
        target=[0, 1, 0],
        up=[0, 0, 1]
    ))
    assert not sensor.bbox().valid()  # Degenerate bounding box
    assert ek.allclose(
        sensor.world_transform().eval(0.).matrix,
        [[-1, 0, 0, 0],
         [0, 0, 1, 0],
         [0, 1, 0, 0],
         [0, 0, 0, 1]]
    )

    # Test construct from origin and direction
    sensor = make_sensor(origin=[0, 0, 0], direction=[0, 1, 0])
    assert not sensor.bbox().valid()  # Degenerate bounding box
    assert ek.allclose(
        sensor.world_transform().eval(0.).matrix,
        [[0, 0, 1, 0],
         [1, 0, 0, 0],
         [0, 1, 0, 0],
         [0, 0, 0, 1]]
    )

    # Test to_world overriding direction + origin
    sensor = make_sensor(
        to_world=ScalarTransform4f.look_at(
            origin=[0, 0, 0],
            target=[0, 1, 0],
            up=[0, 0, 1]
        ),
        origin=[1, 0, 0],
        direction=[4, 1, 0]
    )
    assert not sensor.bbox().valid()  # Degenerate bounding box
    assert ek.allclose(
        sensor.world_transform().eval(0.).matrix,
        [[-1, 0, 0, 0],
         [0, 0, 1, 0],
         [0, 1, 0, 0],
         [0, 0, 0, 1]]
    )

    # Test raise on missing direction or origin
    with pytest.raises(RuntimeError):
        sensor = make_sensor(direction=[0, 1, 0])

    with pytest.raises(RuntimeError):
        sensor = make_sensor(origin=[0, 1, 0])

    # Test raise on wrong film size
    with pytest.raises(RuntimeError):
        sensor = make_sensor(pixels=2)
Ejemplo n.º 2
0
    def _kernel_dict(self, sensor_id, spp):
        from mitsuba.core import ScalarTransform4f

        target = self.target.m_as(uck.get("length"))
        origin = self.origin.m_as(uck.get("length"))

        result = {
            "type":
            "perspective",
            "id":
            sensor_id,
            "far_clip":
            self.far_clip.m_as(uck.get("length")),
            "fov":
            self.fov.m_as(ureg.deg),
            "to_world":
            ScalarTransform4f.look_at(origin=origin, target=target,
                                      up=self.up),
            "sampler": {
                "type": "independent",
                "sample_count": spp,
            },
            "film": {
                "type": "hdrfilm",
                "width": self.film_resolution[0],
                "height": self.film_resolution[1],
                "pixel_format": "luminance",
                "component_format": "float32",
                "rfilter": {
                    "type": "box"
                },
            },
        }

        return result
Ejemplo n.º 3
0
def create_camera(o,
                  d,
                  fov=34,
                  fov_axis="x",
                  s_open=1.5,
                  s_close=5,
                  aperture=0.1,
                  focus_dist=15):
    from mitsuba.core.xml import load_dict
    from mitsuba.core import ScalarTransform4f, ScalarVector3f
    t = [o[0] + d[0], o[1] + d[1], o[2] + d[2]]

    camera_dict = {
        "type": "thinlens",
        "near_clip": 1.0,
        "far_clip": 35.0,
        "focus_distance": focus_dist,
        "aperture_radius": aperture,
        "fov": fov,
        "fov_axis": fov_axis,
        "shutter_open": s_open,
        "shutter_close": s_close,
        "to_world": ScalarTransform4f.look_at(origin=o, target=t, up=[0, 1,
                                                                      0]),
        "film": {
            "type": "hdrfilm",
            "width": 512,
            "height": 256,
        }
    }

    return load_dict(camera_dict)
Ejemplo n.º 4
0
    def shapes(self, ctx: KernelDictContext) -> t.Dict:
        """
        Return shape plugin specifications.

        Parameters
        ----------
        ctx : :class:`.KernelDictContext`
            A context data structure containing parameters relevant for kernel
            dictionary generation.

        Returns
        -------
        dict
            A dictionary suitable for merge with a :class:`.KernelDict`
            containing all the shapes in the leaf cloud.
        """
        from mitsuba.core import ScalarTransform4f, coordinate_system

        kernel_length = uck.get("length")
        shapes_dict = {}

        if ctx.ref:
            bsdf = {"type": "ref", "id": f"bsdf_{self.id}"}
        else:
            bsdf = self.bsdfs(ctx=ctx)[f"bsdf_{self.id}"]

        for i_leaf, (position, normal, radius) in enumerate(
            zip(
                self.leaf_positions.m_as(kernel_length),
                self.leaf_orientations,
                self.leaf_radii.m_as(kernel_length),
            )
        ):
            _, up = coordinate_system(normal)
            to_world = ScalarTransform4f.look_at(
                origin=position, target=position + normal, up=up
            ) * ScalarTransform4f.scale(radius)

            shapes_dict[f"{self.id}_leaf_{i_leaf}"] = {
                "type": "disk",
                "bsdf": bsdf,
                "to_world": to_world,
            }

        return shapes_dict
Ejemplo n.º 5
0
def camera(origin,
           lookat,
           up,
           fov,
           ext=None,
           near=0.01,
           far=1000.0,
           w=256,
           h=256,
           nsamples=4):
    if (not (ext is None)):
        transform = ScalarTransform4f(ext.cpu().numpy()[0])
    elif (type(origin) == torch.Tensor):
        origin = origin[0]
        lookat = lookat[0]
        up = up[0]

        transform = ScalarTransform4f.look_at(origin=origin,
                                              target=lookat,
                                              up=up)
    film = """<film type="hdrfilm">
            <integer name="width" value="%i"/>
            <integer name="height" value="%i"/>

            <rfilter type="gaussian"/>
        </film>""" % (w, h)
    sampler = """<sampler type="independent">
    <integer name="sample_count" value="%i"/>
        </sampler>""" % (nsamples)

    transform = """<transform version="2.0.0" name="to_world">\n
            <lookat origin="%f, %f, %f" target="%f, %f, %f" up="%f, %f, %f"/>\n
        </transform>""" % (origin[0], origin[1], origin[2], lookat[0],
                           lookat[1], lookat[2], up[0], up[1], up[2])

    xmlstr = """<sensor version="2.0.0" type="perspective">\n
        %s\n
        <float name="near_clip" value="%f"/>\n
        <float name="far_clip" value="%f"/>\n
        <float name="fov" value="%f"/>\n
        %s\n
        %s\n
    </sensor>""" % (transform, near, far, fov, film, sampler)
    return xmlstr
Ejemplo n.º 6
0
def genXML(lx, ly, material, scale):
    scene = xml.load_dict({
        "type": "scene",
        "myintegrator": {
            "type": "path",
        },
        "mysensor": {
            "type":
            "perspective",
            "near_clip":
            0.1,
            "far_clip":
            1000.0,
            "to_world":
            ScalarTransform4f.look_at(origin=[0.0, 0.001, 1],
                                      target=[0, 0, 0],
                                      up=[0, 0, 1]),
            "myfilm": {
                "type": "hdrfilm",
                "rfilter": {
                    "type": "box"
                },
                "width": 256,
                "height": 256,
            },
            "mysampler": {
                "type": "independent",
                "sample_count": 4,
            },
        },
        "myemitter": {
            "type": "point",
            "intensity": 1.0,
            'position': [lx * scale, ly * scale, 1.1]
        },
        "myshape": {
            "type": "sphere",
            "radius": 0.2,
            "mybsdf": material,
        }
    })
    return scene
Ejemplo n.º 7
0
    def _kernel_dict(self, sensor_id, spp):
        from mitsuba.core import ScalarTransform4f, ScalarVector3f, coordinate_system

        _, up = coordinate_system(self.direction)

        result = {
            "type":
            "distantflux",
            "id":
            sensor_id,
            "to_world":
            ScalarTransform4f.look_at(
                origin=[0, 0, 0],
                target=ScalarVector3f(self.direction),
                up=up,
            ),
            "sampler": {
                "type": "independent",
                "sample_count": spp,
            },
            "film": {
                "type": "hdrfilm",
                "width": self.film_resolution[0],
                "height": self.film_resolution[1],
                "pixel_format": "luminance",
                "component_format": "float32",
                "rfilter": {
                    "type": "box"
                },
            },
        }

        if self.target is not None:
            result["target"] = self.target.kernel_item()

        return result
Ejemplo n.º 8
0
def scene_dict(sensor_to_world=None):
    if sensor_to_world is None:
        sensor_to_world = ScalarTransform4f.look_at(
            origin=[0, 0, 0],
            target=[0, 0, 1],
            up=[0, 1, 0],
        )

    return {
        "type": "scene",
        "shape": {
            "type": "rectangle",
            "bsdf": {
                "type": "roughconductor"
            },
        },
        "illumination_r": {
            "type": "directional",
            "direction": direction_r,
            "irradiance": {
                "type": "rgb",
                "value": [1, 0, 0],
            },
        },
        "illumination_g": {
            "type": "directional",
            "direction": direction_g,
            "irradiance": {
                "type": "rgb",
                "value": [0, 1, 0],
            },
        },
        "illumination_b": {
            "type": "directional",
            "direction": direction_b,
            "irradiance": {
                "type": "rgb",
                "value": [0, 0, 1],
            },
        },
        "hdistant": {
            "type": "hdistant",
            "to_world": sensor_to_world,
            "sampler": {
                "type": "independent",
                "sample_count": 3200,
            },
            "film": {
                "type": "hdrfilm",
                "width": film_resolution,
                "height": film_resolution,
                "pixel_format": "rgb",
                "component_format": "float32",
                "rfilter": {
                    "type": "box"
                },
            }
        },
        #"camera": {
        #    "type": "perspective",
        #    "to_world": ScalarTransform4f.look_at(
        #        origin=[5, 5, 5],
        #        target=[0, 0, 0],
        #        up=[0, 0, 1],
        #    ),
        #    "sampler": {
        #        "type": "independent",
        #        "sample_count": 32,
        #    },
        #    "film": {
        #        "type": "hdrfilm",
        #        "width": 320,
        #        "height": 240,
        #        "pixel_format": "luminance",
        #        "component_format": "float32",
        #    }
        #},
        "integrator": {
            "type": "path"
        },
    }
Ejemplo n.º 9
0
        #        "height": 240,
        #        "pixel_format": "luminance",
        #        "component_format": "float32",
        #    }
        #},
        "integrator": {
            "type": "path"
        },
    }


for name, sensor_to_world in {
        "default":
        ScalarTransform4f.look_at(
            origin=[0, 0, 0],
            target=[0, 0, 1],
            up=[0, 1, 0],
        ),
        "rotated":
        ScalarTransform4f.look_at(
            origin=[0, 0, 0],
            target=[0, 0, 1],
            up=[1, 1, 0],
        ),
}.items():
    scene = load_dict(scene_dict(sensor_to_world=sensor_to_world))
    sensor = scene.sensors()[0]
    scene.integrator().render(scene, sensor)

    # Plot recorded leaving radiance
    img = np.array(sensor.film().bitmap()).squeeze()
Ejemplo n.º 10
0
def test7_dict_scene(variant_scalar_rgb):
    from mitsuba.core import xml, ScalarTransform4f

    s1 = xml.load_dict({
        "type" : "scene",
        "myintegrator" : {
            "type" : "path",
        },
        "mysensor" : {
            "type" : "perspective",
            "near_clip": 1.0,
            "far_clip": 1000.0,
            "to_world" : ScalarTransform4f.look_at(origin=[1, 1, 1],
                                                target=[0, 0, 0],
                                                up=[0, 0, 1]),
            "myfilm" : {
                "type" : "hdrfilm",
                "rfilter" : { "type" : "box"},
                "width" : 1024,
                "height" : 768,
            },
            "mysampler" : {
                "type" : "independent",
                "sample_count" : 4,
            },
        },
        "myemitter" : {"type" : "constant"},
        "myshape" : {"type" : "sphere"}
    })

    s2 = xml.load_string("""
        <scene version='2.0.0'>
            <emitter type="constant"/>

            <integrator type='path'/>

            <sensor type="perspective">
                <float name="near_clip" value="1"/>
                <float name="far_clip" value="1000"/>

                <film type="hdrfilm">
                    <rfilter type="box"/>
                    <integer name="width" value="1024"/>
                    <integer name="height" value="768"/>
                </film>

                <sampler type="independent">
                    <integer name="sample_count" value="4"/>
                </sampler>

                <transform name="to_world">
                    <lookat origin="1, 1, 1"
                            target="0, 0, 0"
                            up    ="0, 0, 1"/>
                </transform>

            </sensor>

            <shape type="sphere"/>
    </scene>
    """)

    assert str(s1) == str(s2)

    scene = xml.load_dict({
        "type" : "scene",
        "mysensor0" : { "type" : "perspective" },
        "mysensor1" : { "type" : "perspective" },
        "emitter0" : { "type" : "point" },
        "emitter1" : { "type" : "directional" },
        "emitter3" : { "type" : "constant" },
        "shape0" : { "type" : "sphere" },
        "shape1" : { "type" : "rectangle" },
        "shape2" : { "type" : "disk" },
        "shape3" : { "type" : "cylinder" },
    })

    assert len(scene.sensors())  == 2
    assert len(scene.emitters()) == 3
    assert len(scene.shapes())   == 4
Ejemplo n.º 11
0
def render(scene_dict,
           info,
           in_q,
           out_q,
           dones_sh,
           texture_sh,
           render_sh,
           uv_sh,
           loop_forever=True):
    a = time()
    scene = load_dict(scene_dict)
    print(f"Scene loading time: {time() - a:.2f}s")
    bsdf_params = traverse(scene.shapes()[0].bsdf())
    integrator = scene.integrator()

    # Numpy versions of the shared tensors
    texture_sh_np = texture_sh.numpy()
    render_sh_np = render_sh.numpy()
    uv_sh_np = uv_sh.numpy()
    while True:
        camera_pos = sphere_sample(info['scale_range'])
        camera = load_dict({
            "type":
            "perspective",
            "to_world":
            ScalarTransform4f.look_at(origin=camera_pos,
                                      target=[0, 0, 0],
                                      up=[0, 0, 1]),
            "myfilm": {
                "type": "hdrfilm",
                "width": info['image_size'],
                "height": info['image_size']
            },
            "mysampler": {
                "type": "independent",
                "sample_count": info['samples']
            }
        })
        ind = in_q.get()
        tex = texture_sh_np[ind]
        bsdf_params['diffuse_reflectance.data'] = tex
        integrator.render(scene, camera)
        film = camera.film()
        im = film.bitmap(raw=False).split()
        im_arr = np.array(im[0][1].convert(Bitmap.PixelFormat.RGBA,
                                           Struct.Type.Float32,
                                           srgb_gamma=False))
        u = np.array(im[1][1].convert(Bitmap.PixelFormat.Y,
                                      Struct.Type.Float32,
                                      srgb_gamma=False))
        v = np.array(im[2][1].convert(Bitmap.PixelFormat.Y,
                                      Struct.Type.Float32,
                                      srgb_gamma=False))
        uv_arr = np.concatenate(
            [u, v, np.zeros_like(u), (u + v > 0).astype(np.float32)], axis=2)
        render_sh_np[ind, ...] = im_arr
        uv_sh_np[ind, ...] = uv_arr
        dones_sh[ind] = True
        if not loop_forever:
            print("Done")
            return
Ejemplo n.º 12
0
if config.zoom:
    origin = [10, 10, 75]
    target = [0, 0, 60]

scene_dict = {
    "type": "scene",

    "sensor":{
        "type": "perspective",
        "fov": 39.3077,
        "near_clip": 0.1,
        "far_clip": 1000,
        "fov_axis": "smaller",

        "to_world": ScalarTransform4f.look_at(origin=origin,
                                            target=target,
                                            up=up),

        "sampler": {
            "type": "independent",
            "sample_count": 16,
            "seed": config.seed
        },

        "film": {
            "type": "hdrfilm",
            "width": config.film_width,
            "height": config.film_height,
            "rfilter": {
                "type": "gaussian"
            }