Exemplo n.º 1
0
    def __init__(self, props):
        super(MyPointEmitter, self).__init__(props)
        # Emitter.__init__(self, props)
        # Bug - have to query it once
        self.m_intensity = props[
            'intensity']  # assumption that a texture is returned
        # self.m_intensity = Texture.D65(1)
        print(self.m_intensity)
        self.m_needs_sample_3 = False
        self.m_flags = +EmitterFlags.DeltaPosition

        # should ideally be inherited from cpp parent class
        self.m_world_transform = AnimatedTransform(props["to_world"])
Exemplo n.º 2
0
            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform
Exemplo n.º 3
0
def create_animated_sensors(trajectory: List[Transform],
                            shutter_time: float = 1.,
                            width: int = 1920,
                            height: int = 1440,
                            fov: float = 45.,
                            num_samples: int = 256) -> List[Sensor]:
    """
    Create an animated sensor (Applies motion blur to the rendered image)
    :param trajectory: The trajectory containing all Transforms
    :param shutter_time: The shutter time to be used to set the Mitsuba setShutterOpenTime
    :param width: Width of the generated image
    :param height: Height of the generated image
    :param fov: The sensor field of view
    :param num_samples: Number of samples per pixel (controls the noise in the resulting image)
    :return: A list of Mitsuba animated sensors
    """
    animated_sensors = []
    for transform_idx in range(len(trajectory)):
        atrafo = AnimatedTransform()
        atrafo.appendTransform(0, trajectory[transform_idx])
        atrafo.appendTransform(
            1, trajectory[min(transform_idx + 1,
                              len(trajectory) - 1)])
        atrafo.sortAndSimplify()
        sensor = create_sensor_from_transform(atrafo, width, height, fov,
                                              num_samples)
        sensor.setShutterOpen(0)
        sensor.setShutterOpenTime(shutter_time)
        animated_sensors.append(sensor)

    return animated_sensors
Exemplo n.º 4
0
            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform
Exemplo n.º 5
0
            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(t, self.transform_lookAt(origin, target, up, scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up, scale)

                return transform
Exemplo n.º 6
0
            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(
                            t, self.transform_lookAt(origin, target, up,
                                                     scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up,
                                                      scale)

                return transform
def test08_animated_transforms(variant_scalar_rgb):
    """An AnimatedTransform can be built from a given Transform."""
    from mitsuba.core import Properties as Prop, Transform4f, AnimatedTransform

    p = Prop()
    p["trafo"] = Transform4f.translate([1, 2, 3])

    atrafo = AnimatedTransform()
    atrafo.append(0, Transform4f.translate([-1, -1, -2]))
    atrafo.append(1, Transform4f.translate([4, 3, 2]))
    p["atrafo"] = atrafo

    assert type(p["trafo"]) is Transform4f
    assert type(p["atrafo"]) is AnimatedTransform
Exemplo n.º 8
0
class MyPointEmitter(Emitter):
    """docstring for MyPointEmitter"""
    def __init__(self, props):
        super(MyPointEmitter, self).__init__(props)
        # Emitter.__init__(self, props)
        # Bug - have to query it once
        self.m_intensity = props[
            'intensity']  # assumption that a texture is returned
        # self.m_intensity = Texture.D65(1)
        print(self.m_intensity)
        self.m_needs_sample_3 = False
        self.m_flags = +EmitterFlags.DeltaPosition

        # should ideally be inherited from cpp parent class
        self.m_world_transform = AnimatedTransform(props["to_world"])

    def sample_ray(
            self,
            time,
            sample1,  # wavelength
            sample2,  # pos
            sample3,  # dir
            active):
        wavelengths, spec_weight = self.m_intensity.sample(
            SurfaceInteraction3f(), ek.arange(sample1), active)
        trafo = self.m_world_transform.eval(ref.time)
        ray = Ray3f(trafo * Point3f(0), warp.square_to_uniform_sphere(sample3),
                    time, wavelengths)

        # print(spec_weight.class_().name())
        return (ray, spec_weight * 4.0 * Pi)

    def sample_direction(self, ref, sample, active):
        trafo = self.m_world_transform.eval(ref.time, active)

        ds = DirectionSample3f()
        ds.p = trafo.matrix[3][:3]
        ds.n = 0
        ds.uv = 0
        ds.time = ref.time
        ds.pdf = 1
        ds.delta = True
        ds.d = ds.p - ref.p
        ds.dist = ek.norm(ds.d)
        inv_dist = ek.rcp(ds.dist)
        ds.d *= inv_dist

        si = SurfaceInteraction3f()
        si.wavelengths = ref.wavelengths

        spec = self.m_intensity.eval(si, active) * (inv_dist * inv_dist)
        return (ds, spec)

    def pdf_direction(self, ref, ds, active):
        return 0

    def eval(self, si, active):
        return 0

    def bbox(self):
        return self.m_world_transform.translation_bounds()

    def to_string(self):
        mystr = "MyPointLight\n"
        mystr.append(" world_transform" + self.m_world_transform.to_string())
        mystr.append(" intensity" + self.m_intensity + "\n")
        return mystr