Ejemplo n.º 1
0
    def parse_transform(self, node):
        from mitsuba.core import Transform4f
        trao = Transform4f()
        for child in node:
            if (child.tag == 'translate' or child.tag == 'rotate'):
                components = [0.0]*3
            elif (child.tag == "scale"):
                components = [1.0]*3
            if('x' in child.attrib): components[0] = float(self.replace_default(child.attrib['x']))
            if('y' in child.attrib): components[1] = float(self.replace_default(child.attrib['y']))
            if('z' in child.attrib): components[2] = float(self.replace_default(child.attrib['z']))
            if('value' in child.attrib and child.tag in ['scale', 'translate', 'rotate']): 
                value = float(self.replace_default(child.attrib['value']))
                components = [value]*3

            # print("components", components)
            if( child.tag == 'translate'):    
                trao = Transform4f.translate(components)*trao
            elif( child.tag == 'scale'):
                trao = Transform4f.scale(components)*trao
            elif( child.tag == 'rotate'):
                angle = float(self.replace_default(child.attrib['angle']))
                trao = Transform4f.rotate(components, angle)*trao
            elif(child.tag == "matrix"):
                mat = child.attrib["value"]
                local_trao = np.array([float(val) for val in mat.split()])
                local_trao = np.reshape(local_trao, (4,4))
                trao = Transform4f(local_trao.tolist())*trao

        return trao
def test04_surface_area(variant_scalar_rgb):
    from mitsuba.core import xml, Transform4f

    # Unifomly-scaled rectangle
    rect = xml.load_dict({
        "type":
        "rectangle",
        "to_world":
        Transform4f([[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
    })
    assert ek.allclose(rect.surface_area(), 2.0 * 2.0 * 2.0 * 2.0)

    # Rectangle sheared along the Z-axis
    rect = xml.load_dict({
        "type":
        "rectangle",
        "to_world":
        Transform4f([[1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0], [0, 0, 0, 1]])
    })
    assert ek.allclose(rect.surface_area(), 2.0 * 2.0 * ek.sqrt(2.0))

    # Rectangle sheared along the X-axis (shouldn't affect surface_area)
    rect = xml.load_dict({
        "type":
        "rectangle",
        "to_world":
        Transform4f([[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
    })
    assert ek.allclose(rect.surface_area(), 2.0 * 2.0)
def test03_ray_intersect(variant_scalar_rgb):
    from mitsuba.core import xml, Ray3f, Transform4f

    # Scalar
    scene = xml.load_dict({
        "type": "scene",
        "foo": {
            "type": "rectangle",
            "to_world": Transform4f.scale((2.0, 0.5, 1.0))
        }
    })

    n = 15
    coords = ek.linspace(Float, -1, 1, n)
    rays = [
        Ray3f(o=[a, a, 5], d=[0, 0, -1], time=0.0, wavelengths=[])
        for a in coords
    ]
    si_scalar = []
    valid_count = 0
    for i in range(n):
        its_found = scene.ray_test(rays[i])
        si = scene.ray_intersect(rays[i])

        assert its_found == (abs(coords[i]) <= 0.5)
        assert si.is_valid() == its_found
        si_scalar.append(si)
        valid_count += its_found

    assert valid_count == 7

    try:
        mitsuba.set_variant('packet_rgb')
        from mitsuba.core import xml, Ray3f as Ray3fX
    except ImportError:
        pytest.skip("packet_rgb mode not enabled")

    # Packet
    scene_p = xml.load_dict({
        "type": "scene",
        "foo": {
            "type": "rectangle",
            "to_world": Transform4f.scale((2.0, 0.5, 1.0))
        }
    })

    packet = Ray3fX.zero(n)
    for i in range(n):
        packet[i] = rays[i]

    si_p = scene_p.ray_intersect(packet)
    its_found_p = scene_p.ray_test(packet)

    assert ek.all(si_p.is_valid() == its_found_p)

    for i in range(n):
        assert ek.allclose(si_p.t[i], si_scalar[i].t)
def test03_matmul(variant_scalar_rgb):
    from mitsuba.core import Transform4f, Matrix4f

    A = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                  [13, 14, 15, 16]])
    B = np.array([[1, -2, 3, -4], [5, -6, 7, -8], [9, -10, 11, -12],
                  [13, -14, 15, -16]])
    At = Transform4f(A)
    Bt = Transform4f(B)
    assert ek.allclose(np.dot(A, B), (At * Bt).matrix)
    assert ek.allclose(np.dot(B, A), (Bt * At).matrix)
def test02_inverse(variant_scalar_rgb):
    from mitsuba.core import Transform4f, Matrix4f

    p = [1, 2, 3]

    def check_inverse(tr, expected):
        inv_trafo = tr.inverse()

        assert ek.allclose(inv_trafo.matrix, np.array(expected)), \
               '\n' + str(inv_trafo.matrix) + '\n' + str(expected)

        assert ek.allclose(inv_trafo.transform_point(tr.transform_point(p)),
                           p,
                           rtol=1e-4)

    # --- Scale
    trafo = Transform4f.scale([1.0, 10.0, 500.0])
    assert ek.all(trafo.matrix == np.array([[1, 0, 0, 0], [0, 10, 0, 0],
                                            [0, 0, 500, 0], [0, 0, 0, 1]]))
    assert ek.all(trafo.transform_point(p) == np.array([1, 20, 1500]))
    check_inverse(trafo, [[1, 0, 0, 0], [0, 1 / 10.0, 0, 0],
                          [0, 0, 1 / 500.0, 0], [0, 0, 0, 1]])
    # --- Translation
    trafo = Transform4f.translate([1, 0, 1.5])
    assert ek.all(trafo.matrix == np.array([
        [1, 0, 0, 1],
        [0, 1, 0, 0],
        [0, 0, 1, 1.5],
        [0, 0, 0, 1],
    ]))
    assert ek.all(trafo.transform_point(p) == np.array([2, 2, 4.5]))
    check_inverse(trafo,
                  [[1, 0, 0, -1], [0, 1, 0, 0], [0, 0, 1, -1.5], [0, 0, 0, 1]])
    # --- Perspective                x_fov, near clip, far clip
    trafo = Transform4f.perspective(34.022, 1, 1000)
    # Spot-checked against a known transform from Mitsuba1.
    expected = np.array([[3.26860189, 0, 0, 0], [0, 3.26860189, 0, 0],
                         [0, 0, 1.001001, -1.001001], [0, 0, 1, 0]])
    assert ek.allclose(trafo.matrix, expected)
    check_inverse(trafo, la.inv(expected))

    for i in range(10):
        mtx = np.random.random((4, 4))
        inv_ref = la.inv(mtx)
        trafo = Transform4f(mtx)
        inv_val = trafo.inverse_transpose.T
        assert ek.all(trafo.matrix == mtx)
        assert la.norm(inv_ref - inv_val, 'fro') / la.norm(inv_val,
                                                           'fro') < 5e-4

        p = np.random.random((3, ))
        res = trafo.inverse().transform_point(trafo.transform_point(p))
        assert la.norm(res - p, 2) < 5e-3
def test08_animated_transforms(variant_scalar_rgb):
    """An AnimatedTransform can be built from a given Transform."""
    from mitsuba.core import Properties as Prop, Transform4f, AnimatedTransform

    p = Prop()
    p["trafo"] = Transform4f.translate([1, 2, 3])

    atrafo = AnimatedTransform()
    atrafo.append(0, Transform4f.translate([-1, -1, -2]))
    atrafo.append(1, Transform4f.translate([4, 3, 2]))
    p["atrafo"] = atrafo

    assert type(p["trafo"]) is Transform4f
    assert type(p["atrafo"]) is AnimatedTransform
Ejemplo n.º 7
0
def test09_xml_decompose_transform(variant_scalar_rgb):
    from mitsuba.python.xml import dict_to_xml
    from mitsuba.core.xml import load_dict, load_file
    from mitsuba.core import Transform4f, Vector3f, Thread
    fr = Thread.thread().file_resolver()
    mts_root = str(fr[len(fr) - 1])
    filepath = os.path.join(mts_root, 'resources/data/scenes/dict09/dict.xml')
    fr.append(os.path.dirname(filepath))
    scene_dict = {
        'type': 'scene',
        'cam': {
            'type':
            'perspective',
            'fov_axis':
            'x',
            'fov':
            35,
            'to_world':
            Transform4f.look_at(Vector3f(15, 42.3,
                                         25), Vector3f(1.0, 0.0, 0.5),
                                Vector3f(1.0, 0.0, 0.0))
        }
    }
    dict_to_xml(scene_dict, filepath)
    s1 = load_file(filepath)
    s2 = load_dict(scene_dict)
    vects = [Vector3f(0, 0, 1), Vector3f(0, 1, 0), Vector3f(1, 0, 0)]
    tr1 = s1.sensors()[0].world_transform().eval(0)
    tr2 = s2.sensors()[0].world_transform().eval(0)
    for vec in vects:
        assert tr1.transform_point(vec) == tr2.transform_point(vec)
    rmtree(os.path.split(filepath)[0])
def test03_ray_intersect_transform(variant_scalar_rgb):
    from mitsuba.core import xml, Ray3f, Transform4f

    for r in [1, 3]:
        s = xml.load_dict({
            "type":
            "sphere",
            "radius":
            r,
            "to_world":
            Transform4f.translate([0, 1, 0]) *
            Transform4f.rotate([0, 1, 0], 30.0)
        })

        # grid size
        n = 21
        inv_n = 1.0 / n

        for x in range(n):
            for y in range(n):
                x_coord = r * (2 * (x * inv_n) - 1)
                y_coord = r * (2 * (y * inv_n) - 1)

                ray = Ray3f(o=[x_coord, y_coord + 1, -8],
                            d=[0.0, 0.0, 1.0],
                            time=0.0,
                            wavelengths=[])
                si_found = s.ray_test(ray)

                assert si_found == (x_coord ** 2 + y_coord ** 2 <= r * r) \
                    or ek.abs(x_coord ** 2 + y_coord ** 2 - r * r) < 1e-8

                if si_found:
                    si = s.ray_intersect(ray)
                    ray_u = Ray3f(ray)
                    ray_v = Ray3f(ray)
                    eps = 1e-4
                    ray_u.o += si.dp_du * eps
                    ray_v.o += si.dp_dv * eps
                    si_u = s.ray_intersect(ray_u)
                    si_v = s.ray_intersect(ray_v)
                    if si_u.is_valid():
                        du = (si_u.uv - si.uv) / eps
                        assert ek.allclose(du, [1, 0], atol=2e-2)
                    if si_v.is_valid():
                        dv = (si_v.uv - si.uv) / eps
                        assert ek.allclose(dv, [0, 1], atol=2e-2)
Ejemplo n.º 9
0
def test03_ray_intersect(variant_scalar_rgb):
    from mitsuba.core import xml, Ray3f, Transform4f

    for r in [1, 2, 4]:
        for l in [1, 5]:
            s = xml.load_dict({
                "type": "scene",
                "foo": {
                    "type": "cylinder",
                    "to_world": Transform4f.scale((r, r, l))
                }
            })

            # grid size
            n = 10
            for x in ek.linspace(Float, -1, 1, n):
                for z in ek.linspace(Float, -1, 1, n):
                    x = 1.1 * r * x
                    z = 1.1 * l * z

                    ray = Ray3f(o=[x, -10, z],
                                d=[0, 1, 0],
                                time=0.0,
                                wavelengths=[])
                    si_found = s.ray_test(ray)
                    si = s.ray_intersect(ray)

                    assert si_found == si.is_valid()
                    assert si_found == ek.allclose(si.p[0]**2 + si.p[1]**2,
                                                   r**2)

                    if si_found:
                        ray = Ray3f(o=[x, -10, z],
                                    d=[0, 1, 0],
                                    time=0.0,
                                    wavelengths=[])

                        si = s.ray_intersect(ray)
                        ray_u = Ray3f(ray)
                        ray_v = Ray3f(ray)
                        eps = 1e-4
                        ray_u.o += si.dp_du * eps
                        ray_v.o += si.dp_dv * eps
                        si_u = s.ray_intersect(ray_u)
                        si_v = s.ray_intersect(ray_v)

                        dn = si.shape.normal_derivative(si, True, True)

                        if si_u.is_valid():
                            dp_du = (si_u.p - si.p) / eps
                            dn_du = (si_u.n - si.n) / eps
                            assert ek.allclose(dp_du, si.dp_du, atol=2e-2)
                            assert ek.allclose(dn_du, dn[0], atol=2e-2)
                        if si_v.is_valid():
                            dp_dv = (si_v.p - si.p) / eps
                            dn_dv = (si_v.n - si.n) / eps
                            assert ek.allclose(dp_dv, si.dp_dv, atol=2e-2)
                            assert ek.allclose(dn_dv, dn[1], atol=2e-2)
Ejemplo n.º 10
0
    def transform_matrix(self, matrix):
        '''
        Apply coordinate shift and convert to a mitsuba Transform 4f
        '''
        from mitsuba.core import Transform4f

        if len(matrix) == 4:
            mat = self.axis_mat @ matrix
        else: #3x3
            mat = matrix.to_4x4()
        return Transform4f(list([list(x) for x in mat]))
Ejemplo n.º 11
0
def testProjectionMatrix():
    far = 3
    near = 1
    fov = 60.0
    w = 2
    h = 2
    ours = lin.perspective(fov,near,far)
    mts = Transform4f.perspective(fov,near,far)

    diff = (ours - mts).sum()

    assert(diff < 1e-4)
Ejemplo n.º 12
0
def test_sample_direction(variant_scalar_spectral, spectrum_key, it_pos,
                          wavelength_sample, cutoff_angle, lookat):
    # Check the correctness of the sample_direction() method

    import math
    from mitsuba.core import sample_shifted, Transform4f
    from mitsuba.render import SurfaceInteraction3f

    cutoff_angle_rad = math.radians(cutoff_angle)
    beam_width_rad = cutoff_angle_rad * 0.75
    inv_transition_width = 1 / (cutoff_angle_rad - beam_width_rad)
    emitter, spectrum = create_emitter_and_spectrum(lookat, cutoff_angle,
                                                    spectrum_key)
    eval_t = 0.3
    trafo = Transform4f(emitter.world_transform().eval(eval_t))

    # Create a surface iteration
    it = SurfaceInteraction3f.zero()
    it.p = it_pos
    it.time = eval_t

    # Sample a wavelength from spectrum
    wav, spec = spectrum.sample(it, sample_shifted(wavelength_sample))
    it.wavelengths = wav

    # Direction from the position to the point emitter
    d = -it.p + lookat["pos"]
    dist = ek.norm(d)
    d /= dist

    # Calculate angle between lookat direction and ray direction
    angle = ek.acos((trafo.inverse().transform_vector(-d))[2])
    angle = ek.select(
        ek.abs(angle - beam_width_rad) < 1e-3, beam_width_rad, angle)
    angle = ek.select(
        ek.abs(angle - cutoff_angle_rad) < 1e-3, cutoff_angle_rad, angle)

    # Sample a direction from the emitter
    ds, res = emitter.sample_direction(it, [0, 0])

    # Evalutate the spectrum
    spec = spectrum.eval(it)
    spec = ek.select(
        angle <= beam_width_rad, spec,
        spec * ((cutoff_angle_rad - angle) * inv_transition_width))
    spec = ek.select(angle <= cutoff_angle_rad, spec, 0)

    assert ds.time == it.time
    assert ds.pdf == 1.0
    assert ds.delta
    assert ek.allclose(ds.d, d)
    assert ek.allclose(res, spec / (dist**2))
Ejemplo n.º 13
0
def test02_sample_ray(variant_packet_spectral, origin, direction, aperture_rad,
                      focus_dist):
    # Check the correctness of the sample_ray() method

    from mitsuba.core import sample_shifted, sample_rgb_spectrum, warp, Vector3f, Transform4f

    cam = create_camera(origin,
                        direction,
                        aperture=aperture_rad,
                        focus_dist=focus_dist)

    time = 0.5
    wav_sample = [0.5, 0.33, 0.1]
    pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]
    aperture_sample = [0.5, 0.5]

    ray, spec_weight = cam.sample_ray(time, wav_sample, pos_sample,
                                      aperture_sample)

    # Importance sample wavelength and weight
    wav, spec = sample_rgb_spectrum(sample_shifted(wav_sample))

    assert ek.allclose(ray.wavelengths, wav)
    assert ek.allclose(spec_weight, spec)
    assert ek.allclose(ray.time, time)
    assert ek.allclose(ray.o, origin)

    # Check that a [0.5, 0.5] position_sample and [0.5, 0.5] aperture_sample
    # generates a ray that points in the camera direction

    ray, _ = cam.sample_ray(0, 0, [0.5, 0.5], [0.5, 0.5])
    assert ek.allclose(ray.d, direction, atol=1e-7)

    # ----------------------------------------
    # Check correctness of aperture sampling

    pos_sample = [0.5, 0.5]
    aperture_sample = [[0.9, 0.4, 0.2], [0.6, 0.9, 0.7]]
    ray, _ = cam.sample_ray(time, wav_sample, pos_sample, aperture_sample)

    ray_centered, _ = cam.sample_ray(time, wav_sample, pos_sample, [0.5, 0.5])

    trafo = Transform4f(cam.world_transform().eval(time).matrix.numpy())
    tmp = warp.square_to_uniform_disk_concentric(aperture_sample)
    aperture_v = trafo.transform_vector(aperture_rad *
                                        Vector3f(tmp.x, tmp.y, 0))

    # NOTE: here we assume near_clip = 1.0

    assert ek.allclose(ray.o, ray_centered.o + aperture_v)
    assert ek.allclose(ray.d,
                       ek.normalize(ray_centered.d * focus_dist - aperture_v))
def test02_bbox(variant_scalar_rgb):
    from mitsuba.core import xml, Vector3f, Transform4f

    sy = 2.5
    for sx in [1, 2, 4]:
        for translate in [
                Vector3f([1.3, -3.0, 5]),
                Vector3f([-10000, 3.0, 31])
        ]:
            s = xml.load_dict({
                "type":
                "rectangle",
                "to_world":
                Transform4f.translate(translate) * Transform4f.scale(
                    (sx, sy, 1.0))
            })
            b = s.bbox()

            assert ek.allclose(s.surface_area(), sx * sy * 4)

            assert b.valid()
            assert ek.allclose(b.center(), translate)
            assert ek.allclose(b.min, translate - [sx, sy, 0.0])
            assert ek.allclose(b.max, translate + [sx, sy, 0.0])
Ejemplo n.º 15
0
def test02_bbox(variant_scalar_rgb):
    from mitsuba.core import xml, Vector3f, Transform4f

    for l in [1, 5]:
        for r in [1, 2, 4]:
            s = xml.load_dict({
                "type": "cylinder",
                "to_world": Transform4f.scale((r, r, l))
            })
            b = s.bbox()

            assert ek.allclose(s.surface_area(), 2 * ek.pi * r * l)
            assert b.valid()
            assert ek.allclose(b.min, -Vector3f([r, r, 0.0]))
            assert ek.allclose(b.max, Vector3f([r, r, l]))
Ejemplo n.º 16
0
def test_sample_ray(variant_packet_spectral, spectrum_key, wavelength_sample,
                    pos_sample, cutoff_angle, lookat):
    # Check the correctness of the sample_ray() method

    import math
    from mitsuba.core import warp, sample_shifted, Transform4f
    from mitsuba.render import SurfaceInteraction3f

    cutoff_angle_rad = math.radians(cutoff_angle)
    cos_cutoff_angle_rad = math.cos(cutoff_angle_rad)
    beam_width_rad = cutoff_angle_rad * 0.75
    inv_transition_width = 1 / (cutoff_angle_rad - beam_width_rad)
    emitter, spectrum = create_emitter_and_spectrum(lookat, cutoff_angle,
                                                    spectrum_key)
    eval_t = 0.3
    trafo = Transform4f(emitter.world_transform().eval(eval_t))

    # Sample a local direction and calculate local angle
    dir_sample = pos_sample  # not being used anyway
    local_dir = warp.square_to_uniform_cone(pos_sample, cos_cutoff_angle_rad)
    angle = ek.acos(local_dir[2])
    angle = ek.select(
        ek.abs(angle - beam_width_rad) < 1e-3, beam_width_rad, angle)
    angle = ek.select(
        ek.abs(angle - cutoff_angle_rad) < 1e-3, cutoff_angle_rad, angle)

    # Sample a ray (position, direction, wavelengths) from the emitter
    ray, res = emitter.sample_ray(eval_t, wavelength_sample, pos_sample,
                                  dir_sample)

    # Sample wavelengths on the spectrum
    it = SurfaceInteraction3f.zero()
    wav, spec = spectrum.sample(it, sample_shifted(wavelength_sample))
    it.wavelengths = wav
    spec = spectrum.eval(it)
    spec = ek.select(
        angle <= beam_width_rad, spec,
        spec * ((cutoff_angle_rad - angle) * inv_transition_width))
    spec = ek.select(angle <= cutoff_angle_rad, spec, 0)

    assert ek.allclose(
        res, spec / warp.square_to_uniform_cone_pdf(
            trafo.inverse().transform_vector(ray.d), cos_cutoff_angle_rad))
    assert ek.allclose(ray.time, eval_t)
    assert ek.all(local_dir.z >= cos_cutoff_angle_rad)
    assert ek.allclose(ray.wavelengths, wav)
    assert ek.allclose(ray.d, trafo.transform_vector(local_dir))
    assert ek.allclose(ray.o, lookat["pos"])
Ejemplo n.º 17
0
    def get_diff_param(scene):
        # Create a differentiable hyperparameter
        diff_param = Float(0.0)
        ek.set_requires_gradient(diff_param)

        # Update vertices so that they depend on diff_param
        params = traverse(scene)
        t = Transform4f.translate(Vector3f(1.0) * diff_param)
        vertex_positions = params['light_shape.vertex_positions']
        vertex_positions_t = t.transform_point(vertex_positions)
        params['light_shape.vertex_positions'] = vertex_positions_t

        # Update the scene
        params.update()

        return diff_param
def test04_transform_point(variant_scalar_rgb):
    from mitsuba.core import Transform4f

    A = np.eye(4)
    A[3, 3] = 2
    assert ek.allclose(Transform4f(A).transform_point([2, 4, 6]), [1, 2, 3])

    try:
        mitsuba.set_variant("packet_rgb")
        from mitsuba.core import Transform4f as Transform4fX
    except:
        return

    assert ek.allclose(
        Transform4fX(A).transform_point([[2, 4], [4, 6], [6, 8]]),
        [[1, 2], [2, 3], [3, 4]])
def test01_basics(variant_scalar_rgb):
    from mitsuba.core import Transform4f, Matrix4f

    assert (ek.allclose(Transform4f().matrix,
                        Transform4f(Matrix4f.identity()).matrix))
    assert (ek.allclose(Transform4f().inverse_transpose,
                        Transform4f(Matrix4f.identity()).inverse_transpose))

    assert (repr(
        Transform4f(
            Matrix4f([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                      [13, 14, 15, 16]]))
    ) == "[[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]]"
            )

    m1 = [[0.295806, 0.19926196, 0.46541812, 0.31066751],
          [0.19926196, 0.4046916, 0.86071449, 0.14933838],
          [0.46541812, 0.86071449, 0.75046024, 1.90353475],
          [0.31066751, 0.14933838, 0.90353475, 0.30514665]]

    m2 = np.array(m1)
    m3 = Matrix4f(m1)
    m4 = Matrix4f(np.array(m1))

    assert (ek.allclose(m1, m2))
    # assert(ek.allclose(m1, m3)) # TODO not handles yet (row-major vs column major)
    # assert(ek.allclose(m1, m4)) # TODO not handles yet (row-major vs column major)
    assert (ek.allclose(m2, m3))
    assert (ek.allclose(m2, m4))
    assert (ek.allclose(m3, m4))

    trafo = Transform4f(m1)

    assert (ek.allclose(trafo.matrix, Transform4f(m2).matrix))
    assert (ek.allclose(trafo.matrix, Transform4f(m3).matrix))
    assert (ek.allclose(trafo.matrix, Transform4f(m4).matrix))

    assert la.norm(trafo.matrix - m2) < 1e-5
    assert la.norm(trafo.inverse_transpose - la.inv(m2).T) < 1e-5
def test06_transform_normal(variant_scalar_rgb):
    from mitsuba.core import Transform4f

    A = np.eye(4)
    A[3, 3] = 2
    A[1, 2] = .5
    A[1, 1] = .5
    assert ek.allclose(Transform4f(A).transform_normal([2, 4, 6]), [2, 8, 2])

    try:
        mitsuba.set_variant("packet_rgb")
        from mitsuba.core import Transform4f as Transform4fX
    except:
        return

    assert ek.allclose(
        Transform4fX(A).transform_normal([[2, 4], [4, 6], [6, 8]]),
        [[2, 4], [8, 12], [2, 2]])
Ejemplo n.º 21
0
def test01_create(variant_scalar_rgb, origin, direction, s_open, s_time):
    from mitsuba.core import BoundingBox3f, Vector3f, Transform4f

    camera = create_camera(origin,
                           direction,
                           s_open=s_open,
                           s_close=s_open + s_time)

    assert ek.allclose(camera.near_clip(), 1)
    assert ek.allclose(camera.far_clip(), 35)
    assert ek.allclose(camera.focus_distance(), 15)
    assert ek.allclose(camera.shutter_open(), s_open)
    assert ek.allclose(camera.shutter_open_time(), s_time)
    assert not camera.needs_aperture_sample()
    assert camera.bbox() == BoundingBox3f(origin, origin)
    assert ek.allclose(
        camera.world_transform().eval(0).matrix,
        Transform4f.look_at(origin,
                            Vector3f(origin) + direction, [0, 1, 0]).matrix)
Ejemplo n.º 22
0
def test03_sample_ray_diff(variant_packet_spectral, origin, direction,
                           aperture_rad, focus_dist):
    # Check the correctness of the sample_ray_differential() method

    from mitsuba.core import sample_shifted, sample_rgb_spectrum, warp, Vector3f, Transform4f

    cam = create_camera(origin,
                        direction,
                        aperture=aperture_rad,
                        focus_dist=focus_dist)

    time = 0.5
    wav_sample = [0.5, 0.33, 0.1]
    pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]
    aperture_sample = [0.5, 0.5]

    ray, spec_weight = cam.sample_ray_differential(time, wav_sample,
                                                   pos_sample, aperture_sample)

    # Importance sample wavelength and weight
    wav, spec = sample_rgb_spectrum(sample_shifted(wav_sample))

    assert ek.allclose(ray.wavelengths, wav)
    assert ek.allclose(spec_weight, spec)
    assert ek.allclose(ray.time, time)
    assert ek.allclose(ray.o, origin)

    # ----------------------------------------_
    # Check that the derivatives are orthogonal

    assert ek.allclose(ek.dot(ray.d_x - ray.d, ray.d_y - ray.d), 0, atol=1e-7)

    # Check that a [0.5, 0.5] position_sample and [0.5, 0.5] aperture_sample
    # generates a ray that points in the camera direction

    ray_center, _ = cam.sample_ray_differential(0, 0, [0.5, 0.5], [0.5, 0.5])
    assert ek.allclose(ray_center.d, direction, atol=1e-7)

    # ----------------------------------------
    # Check correctness of the ray derivatives

    aperture_sample = [[0.9, 0.4, 0.2], [0.6, 0.9, 0.7]]
    ray_center, _ = cam.sample_ray_differential(0, 0, [0.5, 0.5],
                                                aperture_sample)

    # Deltas in screen space
    dx = 1.0 / cam.film().crop_size().x
    dy = 1.0 / cam.film().crop_size().y

    # Sample the rays by offsetting the position_sample with the deltas (aperture centered)
    ray_dx, _ = cam.sample_ray_differential(0, 0, [0.5 + dx, 0.5],
                                            aperture_sample)
    ray_dy, _ = cam.sample_ray_differential(0, 0, [0.5, 0.5 + dy],
                                            aperture_sample)

    assert ek.allclose(ray_dx.d, ray_center.d_x)
    assert ek.allclose(ray_dy.d, ray_center.d_y)

    # --------------------------------------
    # Check correctness of aperture sampling

    pos_sample = [0.5, 0.5]
    aperture_sample = [[0.9, 0.4, 0.2], [0.6, 0.9, 0.7]]
    ray, _ = cam.sample_ray(time, wav_sample, pos_sample, aperture_sample)

    ray_centered, _ = cam.sample_ray(time, wav_sample, pos_sample, [0.5, 0.5])

    trafo = Transform4f(cam.world_transform().eval(time).matrix.numpy())
    tmp = warp.square_to_uniform_disk_concentric(aperture_sample)
    aperture_v = trafo.transform_vector(aperture_rad *
                                        Vector3f(tmp.x, tmp.y, 0))

    # NOTE: here we assume near_clip = 1.0

    assert ek.allclose(ray.o, ray_centered.o + aperture_v)
    assert ek.allclose(ray.d,
                       ek.normalize(ray_centered.d * focus_dist - aperture_v))
Ejemplo n.º 23
0
def test03_ray_intersect(variant_scalar_rgb):
    from mitsuba.core import xml, Ray3f, Vector3f, Transform4f
    from mitsuba.render import HitComputeFlags

    for r in [1, 3, 5]:
        for translate in [
                Vector3f([0.0, 0.0, 0.0]),
                Vector3f([1.0, -5.0, 0.0])
        ]:
            s = xml.load_dict({
                "type": "scene",
                "foo": {
                    "type":
                    "disk",
                    "to_world":
                    Transform4f.translate(translate) * Transform4f.scale(
                        (r, r, 1.0))
                }
            })

            # grid size
            n = 10
            for x in ek.linspace(Float, -1, 1, n):
                for y in ek.linspace(Float, -1, 1, n):
                    x = 1.1 * r * (x - translate[0])
                    y = 1.1 * r * (y - translate[1])

                    ray = Ray3f(o=[x, y, -10],
                                d=[0, 0, 1],
                                time=0.0,
                                wavelengths=[])
                    si_found = s.ray_test(ray)

                    assert si_found == (x**2 + y**2 <= r * r)

                    if si_found:
                        ray = Ray3f(o=[x, y, -10],
                                    d=[0, 0, 1],
                                    time=0.0,
                                    wavelengths=[])

                        si = s.ray_intersect(
                            ray, HitComputeFlags.All | HitComputeFlags.dNSdUV)
                        ray_u = Ray3f(ray)
                        ray_v = Ray3f(ray)
                        eps = 1e-4
                        ray_u.o += si.dp_du * eps
                        ray_v.o += si.dp_dv * eps
                        si_u = s.ray_intersect(ray_u)
                        si_v = s.ray_intersect(ray_v)

                        if si_u.is_valid():
                            dp_du = (si_u.p - si.p) / eps
                            dn_du = (si_u.n - si.n) / eps
                            assert ek.allclose(dp_du, si.dp_du, atol=2e-2)
                            assert ek.allclose(dn_du, si.dn_du, atol=2e-2)
                        if si_v.is_valid():
                            dp_dv = (si_v.p - si.p) / eps
                            dn_dv = (si_v.n - si.n) / eps
                            assert ek.allclose(dp_dv, si.dp_dv, atol=2e-2)
                            assert ek.allclose(dn_dv, si.dn_dv, atol=2e-2)
Ejemplo n.º 24
0
 def rotate_vector(v, axis, angle):
     return Transform4f.rotate(axis, angle).transform_vector(v)
Ejemplo n.º 25
0
def test15_differentiable_surface_interaction_params_forward(
        variant_gpu_autodiff_rgb):
    from mitsuba.core import xml, Float, Ray3f, Vector3f, UInt32, Transform4f

    # Convert flat array into a vector of arrays (will be included in next enoki release)
    def ravel(buf, dim=3):
        idx = dim * UInt32.arange(ek.slices(buf) // dim)
        if dim == 2:
            return Vector2f(ek.gather(buf, idx), ek.gather(buf, idx + 1))
        elif dim == 3:
            return Vector3f(ek.gather(buf, idx), ek.gather(buf, idx + 1),
                            ek.gather(buf, idx + 2))

    # Return contiguous flattened array (will be included in next enoki release)
    def unravel(source, target, dim=3):
        idx = UInt32.arange(ek.slices(source))
        for i in range(dim):
            ek.scatter(target, source[i], dim * idx + i)

    scene = xml.load_string('''
        <scene version="2.0.0">
            <shape type="obj" id="rect">
                <string name="filename" value="resources/data/common/meshes/rectangle.obj"/>
            </shape>
        </scene>
    ''')

    params = traverse(scene)
    shape_param_key = 'rect.vertex_positions_buf'
    positions_buf = params[shape_param_key]
    positions_initial = ravel(positions_buf)

    # Create differential parameter to be optimized
    diff_vector = Vector3f(0.0)
    ek.set_requires_gradient(diff_vector)

    # Apply the transformation to mesh vertex position and update scene
    def apply_transformation(trasfo):
        trasfo = trasfo(diff_vector)
        new_positions = trasfo.transform_point(positions_initial)
        unravel(new_positions, params[shape_param_key])
        params.set_dirty(shape_param_key)
        params.update()

    # ---------------------------------------
    # Test translation

    ray = Ray3f(Vector3f(-0.2, -0.3, -10.0), Vector3f(0.0, 0.0, 1.0), 0, [])
    pi = scene.ray_intersect_preliminary(ray)

    # # If the vertices are shifted along z-axis, so does si.t
    apply_transformation(lambda v: Transform4f.translate(v))
    si = pi.compute_surface_interaction(ray)
    ek.forward(diff_vector.z)
    assert ek.allclose(ek.gradient(si.t), 1)

    # If the vertices are shifted along z-axis, so does si.p
    apply_transformation(lambda v: Transform4f.translate(v))
    si = pi.compute_surface_interaction(ray)
    ek.forward(diff_vector.z)
    assert ek.allclose(ek.gradient(si.p), [0.0, 0.0, 1.0])

    # If the vertices are shifted along x-axis, so does si.uv (times 0.5)
    apply_transformation(lambda v: Transform4f.translate(v))
    si = pi.compute_surface_interaction(ray)
    ek.forward(diff_vector.x)
    assert ek.allclose(ek.gradient(si.uv), [-0.5, 0.0])

    # If the vertices are shifted along y-axis, so does si.uv (times 0.5)
    apply_transformation(lambda v: Transform4f.translate(v))
    si = pi.compute_surface_interaction(ray)
    ek.forward(diff_vector.y)
    assert ek.allclose(ek.gradient(si.uv), [0.0, -0.5])

    # ---------------------------------------
    # Test rotation

    ray = Ray3f(Vector3f(-0.99999, -0.99999, -10.0), Vector3f(0.0, 0.0, 1.0),
                0, [])
    pi = scene.ray_intersect_preliminary(ray)

    # If the vertices are rotated around the center, so does si.uv (times 0.5)
    apply_transformation(lambda v: Transform4f.rotate([0, 0, 1], v.x))
    si = pi.compute_surface_interaction(ray)
    ek.forward(diff_vector.x)
    du = 0.5 * ek.sin(2 * ek.pi / 360.0)
    assert ek.allclose(ek.gradient(si.uv), [-du, du], atol=1e-6)