def test02_eval_grad(variant_scalar_rgb):
    # Tests evaluating the texture gradient under different rotation
    from mitsuba.render import SurfaceInteraction3f
    from mitsuba.core.xml import load_string
    from mitsuba.core import Vector2f
    import numpy as np
    import enoki as ek
    delta = 1e-4
    si = SurfaceInteraction3f()
    for u01 in np.random.rand(10, 1):
        angle = 360.0*u01[0]
        bitmap = load_string("""
        <texture type="bitmap" version="2.0.0">
            <string name="filename" value="resources/data/common/textures/noise_8x8.png"/>
            <transform name="to_uv">
                <rotate angle="%f"/>
            </transform>
        </texture>""" % angle).expand()[0]
        for uv in np.random.rand(10, 2):
            si.uv = Vector2f(uv)
            f = bitmap.eval_1(si)
            si.uv = uv + Vector2f(delta, 0)
            fu = bitmap.eval_1(si)
            si.uv = uv + Vector2f(0, delta)
            fv = bitmap.eval_1(si)
            gradient_finite_difference = Vector2f((fu - f)/delta, (fv - f)/delta)
            gradient_analytic = bitmap.eval_1_grad(si)
            assert ek.allclose(0, ek.abs(gradient_finite_difference/gradient_analytic - 1.0), atol = 1e04)
def test02_unit_frame(variant_scalar_rgb):
    from mitsuba.core import Frame3f, Vector2f, Vector3f

    for theta in [30 * mitsuba.core.math.Pi / 180, 95 * mitsuba.core.math.Pi / 180]:
        phi = 73 * mitsuba.core.math.Pi / 180
        sin_theta, cos_theta = ek.sin(theta), ek.cos(theta)
        sin_phi, cos_phi = ek.sin(phi), ek.cos(phi)

        v = Vector3f(
            cos_phi * sin_theta,
            sin_phi * sin_theta,
            cos_theta
        )
        f = Frame3f(Vector3f(1.0, 2.0, 3.0) / ek.sqrt(14))

        v2 = f.to_local(v)
        v3 = f.to_world(v2)

        assert ek.allclose(v3, v)

        assert ek.allclose(Frame3f.cos_theta(v), cos_theta)
        assert ek.allclose(Frame3f.sin_theta(v), sin_theta)
        assert ek.allclose(Frame3f.cos_phi(v), cos_phi)
        assert ek.allclose(Frame3f.sin_phi(v), sin_phi)
        assert ek.allclose(Frame3f.cos_theta_2(v), cos_theta * cos_theta)
        assert ek.allclose(Frame3f.sin_theta_2(v), sin_theta * sin_theta)
        assert ek.allclose(Frame3f.cos_phi_2(v), cos_phi * cos_phi)
        assert ek.allclose(Frame3f.sin_phi_2(v), sin_phi * sin_phi)
        assert ek.allclose(Vector2f(Frame3f.sincos_phi(v)), [sin_phi, cos_phi])
        assert ek.allclose(Vector2f(Frame3f.sincos_phi_2(v)), [sin_phi * sin_phi, cos_phi * cos_phi])
Exemple #3
0
def generate_masks(owners, scene):
    #Generate rays that are used for both the binary mask and the distance mask
    sensor = scene.sensors()[0]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    spp = 3

    # Seed the sampler
    total_sample_count = ek.hprod(film_size) * spp

    if sampler.wavefront_size() != total_sample_count:
        sampler.seed(UInt64.arange(total_sample_count))

    # Enumerate discrete sample & pixel indices, and uniformly sample
    # positions within each pixel.
    pos = ek.arange(UInt32, total_sample_count)
    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
    pos = Vector2f(Float(pos % int(film_size[0])),
                   Float(pos // int(film_size[0])))

    pos += sampler.next_2d()

    # Sample rays starting from the camera sensor
    rays, weights = sensor.sample_ray_differential(time=0,
                                                   sample1=sampler.next_1d(),
                                                   sample2=pos * scale,
                                                   sample3=0)

    #Masks dictionary containas a mask for each params
    masks = dict()
    #For each parameter owner, we generate a mask
    for key in owners:
        scene_dict = dict()
        scene_dict["type"] = "scene"
        scene_dict["camera"] = scene.sensors()[0]
        for item in owners[key]:
            scene_dict[item.id()] = item
        #dummy_scene contains only camera and objects for associated with this owner
        dummy_scene = load_dict(scene_dict)
        surface_interaction = dummy_scene.ray_intersect(rays)

        result = surface_interaction.t + 1.0
        result[~(surface_interaction.t is Infinity)] = 1
        result[~surface_interaction.is_valid()] = 0

        masks[key] = result
    return masks
def ravel(buf, dim=3):
    idx = dim * UInt32.arange(ek.slices(buf) // dim)
    if dim == 2:
        return Vector2f(ek.gather(buf, idx), ek.gather(buf, idx + 1))
    elif dim == 3:
        return Vector3f(ek.gather(buf, idx), ek.gather(buf, idx + 1),
                        ek.gather(buf, idx + 2))
def check_vectorization(func_str, wrapper = (lambda f: lambda x: f(x)) , resolution = 2):
    """
    Helper routine which compares evaluations of the vectorized and
    non-vectorized version of a warping routine
    """

    import importlib
    mitsuba.set_variant('scalar_rgb')
    func     = wrapper(getattr(importlib.import_module('mitsuba.core').warp, func_str))
    pdf_func = wrapper(getattr(importlib.import_module('mitsuba.core').warp, func_str + "_pdf"))

    try:
        mitsuba.set_variant('packet_rgb')
    except:
        pytest.skip("packet_rgb mode not enabled")

    func_vec     = wrapper(getattr(importlib.import_module('mitsuba.core').warp, func_str))
    pdf_func_vec = wrapper(getattr(importlib.import_module('mitsuba.core').warp, func_str + "_pdf"))
    from mitsuba.core import Vector2f

    # Generate resolution^2 test points on a 2D grid
    t = ek.linspace(Float, 1e-3, 1, resolution)
    x, y = ek.meshgrid(t, t)
    samples = Vector2f(x, y)

    # Run the sampling routine
    result = func_vec(samples)

    # Evaluate the PDF
    pdf = pdf_func_vec(result)

    # Check against the scalar version
    for i in range(resolution):
        assert ek.allclose(result.numpy()[i, :], func(samples.numpy()[i, :]), atol=1e-4)
        assert ek.allclose(pdf.numpy()[i], pdf_func(result.numpy()[i, :]), atol=1e-6)
Exemple #6
0
    def tabulate_pdf(self):
        """
        Numerically integrate the provided probability density function over
        each cell to generate an array resembling the histogram computed by
        ``tabulate_histogram()``. The function uses the trapezoid rule over
        intervals discretized into ``self.ires`` separate function evaluations.
        """

        from mitsuba.core import Float, Vector2f, ScalarVector2f

        extents = self.bounds.extents()
        endpoint = self.bounds.max - extents / ScalarVector2f(self.res)

        # Compute a set of nodes where the PDF should be evaluated
        x, y = ek.meshgrid(
            ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),
            ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y))

        endpoint = extents / ScalarVector2f(self.res)
        eps = 1e-4
        nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)
        ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)
        wx = [1 / (self.ires - 1)] * self.ires
        wy = [1 / (self.ires - 1)] * self.ires
        wx[0] = wx[-1] = wx[0] * .5
        wy[0] = wy[-1] = wy[0] * .5

        integral = 0

        self.histogram_start = time.time()
        for yi, dy in enumerate(ny):
            for xi, dx in enumerate(nx):
                xy = self.domain.map_forward(Vector2f(x + dx, y + dy))
                pdf = self.pdf_func(xy)
                integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)
        self.histogram_end = time.time()

        self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res)) *
                               self.sample_count)

        # A few sanity checks
        pdf_min = ek.hmin(self.pdf) / self.sample_count
        if not pdf_min >= 0:
            self._log('Failure: Encountered a cell with a '
                      'negative PDF value: %f' % pdf_min)
            self.fail = True

        self.pdf_sum = ek.hsum(self.pdf) / self.sample_count
        if self.pdf_sum > 1.1:
            self._log('Failure: PDF integrates to a value greater '
                      'than 1.0: %f' % self.pdf_sum)
            self.fail = True
Exemple #7
0
def test06_legendre_pd_diff(variant_scalar_rgb):
    from mitsuba.core import math, Vector2f
    assert ek.allclose(
        math.legendre_pd_diff(1, 1),
        Vector2f(math.legendre_pd(1 + 1, 1)) -
        Vector2f(math.legendre_pd(1 - 1, 1)))
    assert ek.allclose(
        math.legendre_pd_diff(2, 1),
        Vector2f(math.legendre_pd(2 + 1, 1)) -
        Vector2f(math.legendre_pd(2 - 1, 1)))
    assert ek.allclose(
        math.legendre_pd_diff(3, 0),
        Vector2f(math.legendre_pd(3 + 1, 0)) -
        Vector2f(math.legendre_pd(3 - 1, 0)))
    assert ek.allclose(
        math.legendre_pd_diff(4, 0.1),
        Vector2f(math.legendre_pd(4 + 1, 0.1)) -
        Vector2f(math.legendre_pd(4 - 1, 0.1)))
Exemple #8
0
# Instead of calling the scene's integrator, we build our own small integrator
# This integrator simply computes the depth values per pixel
sensor = scene.sensors()[0]
film = sensor.film()
sampler = sensor.sampler()
film_size = film.crop_size()
spp = 32

# Enumerate discrete sample & pixel indices, and uniformly sample
# positions within each pixel.
pos = ek.arange(UInt64, ek.hprod(film_size) * spp)
if not sampler.ready():
    sampler.seed(pos)

pos //= spp
scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0])))

pos += sampler.next_2d()

# Sample rays starting from the camera sensor
rays, weights = sensor.sample_ray_differential(time=0,
                                               sample1=sampler.next_1d(),
                                               sample2=pos * scale,
                                               sample3=0)

# Intersect rays with the scene geometry
surface_interaction = scene.ray_intersect(rays)

# Given intersection, compute the final pixel values as the depth t
# of the sampled surface interaction
Exemple #9
0
def _render_helper(scene, spp=None, sensor_index=0):
    """
    Internally used function: render the specified Mitsuba scene and return a
    floating point array containing RGB values and AOVs, if applicable
    """
    from mitsuba.core import (Float, UInt32, UInt64, Vector2f,
                              is_monochromatic, is_rgb, is_polarized)
    from mitsuba.render import ImageBlock

    sensor = scene.sensors()[sensor_index]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    if spp is None:
        spp = sampler.sample_count()

    total_sample_count = ek.hprod(film_size) * spp

    if sampler.wavefront_size() != total_sample_count:
        sampler.seed(ek.arange(UInt64, total_sample_count))

    pos = ek.arange(UInt32, total_sample_count)
    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
    pos = Vector2f(Float(pos % int(film_size[0])),
                   Float(pos // int(film_size[0])))

    pos += sampler.next_2d()

    rays, weights = sensor.sample_ray_differential(
        time=0,
        sample1=sampler.next_1d(),
        sample2=pos * scale,
        sample3=0
    )

    spec, mask, aovs = scene.integrator().sample(scene, sampler, rays)
    spec *= weights
    del mask

    if is_polarized:
        from mitsuba.core import depolarize
        spec = depolarize(spec)

    if is_monochromatic:
        rgb = [spec[0]]
    elif is_rgb:
        rgb = spec
    else:
        from mitsuba.core import spectrum_to_xyz, xyz_to_srgb
        xyz = spectrum_to_xyz(spec, rays.wavelengths)
        rgb = xyz_to_srgb(xyz)
        del xyz

    aovs.insert(0, Float(1.0))
    for i in range(len(rgb)):
        aovs.insert(i + 1, rgb[i])
    del rgb, spec, weights, rays

    block = ImageBlock(
        size=film.crop_size(),
        channel_count=len(aovs),
        filter=film.reconstruction_filter(),
        warn_negative=False,
        warn_invalid=False,
        border=False
    )

    block.clear()
    block.put(pos, aovs)

    del pos
    del aovs

    data = block.data()

    ch = block.channel_count()
    i = UInt32.arange(ek.hprod(block.size()) * (ch - 1))

    weight_idx = i // (ch - 1) * ch
    values_idx = (i * ch) // (ch - 1) + 1

    weight = ek.gather(data, weight_idx)
    values = ek.gather(data, values_idx)

    return values / (weight + 1e-8)
Exemple #10
0
def render(scene, spp, sample_per_pass, bdata):
    """
    Generate basic objects and rays.
    Throw render job

    Args:
        scene: Target scene object
        spp: Sample per pixel
        sample_per_pass: a number of sample per pixel in one vectored
                         calculation with GPU
        bdata: BSSRDF Data object. Refer data_pipeline.py
    """

    # Generate basic objects for rendering
    sensor = scene.sensors()[0]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    blocks = utils_render.gen_blocks(
        film.crop_size(),
        film.reconstruction_filter(),
        channel_count=5,
        border=False,
        aovs=config.aovs,
        invalid_sample=config.visualize_invalid_sample)

    bssrdf = None
    if (config.enable_bssrdf):
        bssrdf = BSSRDF(config.model_name)

    np.random.seed(seed=config.seed)

    # The number of samples in one gpu rendering
    total_sample_count = int(ek.hprod(film_size) * spp)

    if total_sample_count % sample_per_pass != 0:
        sys.exit("total_sample_count is not multilple of sample_per_pass")
    n_ite = int(total_sample_count / sample_per_pass)

    # Seed the sampler
    if sampler.wavefront_size() != sample_per_pass:
        sampler.seed(0, sample_per_pass)

    # pos = ek.arange(UInt32, total_sample_count)
    pos = np.arange(total_sample_count, dtype=np.uint32)

    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])

    pos_x = (pos % int(film_size[0])).astype(np.float)
    pos_x += np.random.rand(total_sample_count)
    pos_y = (pos // int(film_size[0])).astype(np.float)
    pos_y += np.random.rand(total_sample_count)

    cnt = 0

    heightmap_pybind = bdata.get_heightmap_pybind()

    for ite in range(n_ite):
        # Get position for this iteration
        pos_ite = Vector2f(
            pos_x[ite * sample_per_pass:ite * sample_per_pass +
                  sample_per_pass],
            pos_y[ite * sample_per_pass:ite * sample_per_pass +
                  sample_per_pass])

        # Get rays for path tracing
        rays, weights = sensor.sample_ray_differential(
            time=0,
            sample1=sampler.next_1d(),
            sample2=pos_ite * scale,
            sample3=0)

        results = render_sample(scene, sampler, rays, bdata, heightmap_pybind,
                                bssrdf)

        utils_render.postprocess_render(
            results,
            weights,
            blocks,
            pos_ite,
            aovs=config.aovs,
            invalid_sample=config.visualize_invalid_sample)
        sampler.advance()
        cnt += sample_per_pass
        print(f"done {cnt} / {total_sample_count}")

    utils_render.imaging(blocks,
                         film_size,
                         aovs=config.aovs,
                         invalid_sample=config.visualize_invalid_sample)
Exemple #11
0
    def tabulate_histogram(self):
        """
        Invoke the provided sampling strategy many times and generate a
        histogram in the parameter domain. If ``sample_func`` returns a tuple
        ``(positions, weights)`` instead of just positions, the samples are
        considered to be weighted.
        """

        # Generate a table of uniform variates
        from mitsuba.core import Float, Vector2f, Vector2u, Float32, \
            UInt64, PCG32

        rng = PCG32(initseq=ek.arange(UInt64, self.sample_count))

        samples_in = getattr(mitsuba.core, 'Vector%if' % self.sample_dim)()
        for i in range(self.sample_dim):
            samples_in[i] = rng.next_float32() if Float is Float32 \
                else rng.next_float64()

        self.pdf_start = time.time()

        # Invoke sampling strategy
        samples_out = self.sample_func(samples_in)

        if type(samples_out) is tuple:
            weights_out = samples_out[1]
            samples_out = samples_out[0]
        else:
            weights_out = Float(1.0)

        # Map samples into the parameter domain
        xy = self.domain.map_backward(samples_out)

        # Sanity check
        eps = self.bounds.extents() * 1e-4
        in_domain = ek.all((xy >= self.bounds.min - eps)
                           & (xy <= self.bounds.max + eps))
        if not ek.all(in_domain):
            self._log('Encountered samples outside of the specified '
                      'domain: %s' % str(ek.compress(xy, ~in_domain)))
            self.fail = True

        # Normalize position values
        xy = (xy - self.bounds.min) / self.bounds.extents()
        xy = Vector2u(
            ek.clamp(xy * Vector2f(self.res), 0, Vector2f(self.res - 1)))

        # Compute a histogram of the positions in the parameter domain
        self.histogram = ek.zero(Float, ek.hprod(self.res))

        ek.scatter_add(target=self.histogram,
                       index=xy.x + xy.y * self.res.x,
                       source=weights_out)

        self.pdf_end = time.time()

        histogram_min = ek.hmin(self.histogram)
        if not histogram_min >= 0:
            self._log('Encountered a cell with negative sample '
                      'weights: %f' % histogram_min)
            self.fail = True

        self.histogram_sum = ek.hsum(self.histogram) / self.sample_count
        if self.histogram_sum > 1.1:
            self._log('Sample weights add up to a value greater '
                      'than 1.0: %f' % self.histogram_sum)
            self.fail = True
Exemple #12
0
 def map_backward(self, p):
     from mitsuba.core import Vector2f
     return Vector2f(ek.atan2(y=p.y, x=p.x), -p.z)
Exemple #13
0
 def map_backward(self, p):
     from mitsuba.core import Vector2f, Float
     return Vector2f(p.x, ek.zero(Float, len(p.x)))