Exemple #1
0
def array_from_dlpack(t, capsule):
    descr = enoki.detail.from_dlpack(capsule)

    device_type = descr['device_type']
    data = descr['data']
    dtype = descr['dtype']
    shape = descr['shape']
    ndim = len(shape)
    strides = descr['strides']

    if strides is None:
        tmp = 1
        strides = [0] * ndim
        for i in reversed(range(ndim)):
            strides[i] = tmp
            tmp *= shape[i]

    if t.IsCUDA and device_type != 2:
        raise enoki.Exception("Cannot create an Enoki GPU array from a "
                              "DLPack CPU tensor!")
    elif not t.IsCUDA and device_type != 1:
        raise enoki.Exception("Cannot create an Enoki CPU array from a "
                              "DLPack GPU tensor!")

    if dtype != t.Type:
        raise enoki.Exception("Incompatible type!")

    shape_target = list(reversed(enoki.shape(t())))
    if len(shape_target) != ndim:
        raise enoki.Exception("Incompatible dimension!")
    for i in range(ndim):
        if shape_target[i] != shape[i] and shape_target[i] != 0:
            raise enoki.Exception("Incompatible shape!")

    value = t
    while issubclass(value.Value, enoki.ArrayBase):
        value = value.Value

    descr['consume'](capsule)
    data = value.map_(data, enoki.hprod(shape), descr['release'])

    def load(t, i, offset):
        size = shape[-1 - i]
        stride = strides[-1 - i]

        if i == ndim - 1:
            if type(offset) is int and stride == 1:
                return data
            else:
                i = enoki.arange(enoki.int32_array_t(t), size)
                return t.gather_(data, offset + stride * i, True, False)
        else:
            result = t()
            for j in range(size):
                result[j] = load(t.Value, i + 1, offset + stride * j)
            return result

    return load(t, 0, 0)
Exemple #2
0
    def tabulate_pdf(self):
        """
        Numerically integrate the provided probability density function over
        each cell to generate an array resembling the histogram computed by
        ``tabulate_histogram()``. The function uses the trapezoid rule over
        intervals discretized into ``self.ires`` separate function evaluations.
        """

        from mitsuba.core import Float, Vector2f, ScalarVector2f

        extents = self.bounds.extents()
        endpoint = self.bounds.max - extents / ScalarVector2f(self.res)

        # Compute a set of nodes where the PDF should be evaluated
        x, y = ek.meshgrid(
            ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),
            ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y))

        endpoint = extents / ScalarVector2f(self.res)
        eps = 1e-4
        nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)
        ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)
        wx = [1 / (self.ires - 1)] * self.ires
        wy = [1 / (self.ires - 1)] * self.ires
        wx[0] = wx[-1] = wx[0] * .5
        wy[0] = wy[-1] = wy[0] * .5

        integral = 0

        self.histogram_start = time.time()
        for yi, dy in enumerate(ny):
            for xi, dx in enumerate(nx):
                xy = self.domain.map_forward(Vector2f(x + dx, y + dy))
                pdf = self.pdf_func(xy)
                integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)
        self.histogram_end = time.time()

        self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res)) *
                               self.sample_count)

        # A few sanity checks
        pdf_min = ek.hmin(self.pdf) / self.sample_count
        if not pdf_min >= 0:
            self._log('Failure: Encountered a cell with a '
                      'negative PDF value: %f' % pdf_min)
            self.fail = True

        self.pdf_sum = ek.hsum(self.pdf) / self.sample_count
        if self.pdf_sum > 1.1:
            self._log('Failure: PDF integrates to a value greater '
                      'than 1.0: %f' % self.pdf_sum)
            self.fail = True
Exemple #3
0
def generate_masks(owners, scene):
    #Generate rays that are used for both the binary mask and the distance mask
    sensor = scene.sensors()[0]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    spp = 3

    # Seed the sampler
    total_sample_count = ek.hprod(film_size) * spp

    if sampler.wavefront_size() != total_sample_count:
        sampler.seed(UInt64.arange(total_sample_count))

    # Enumerate discrete sample & pixel indices, and uniformly sample
    # positions within each pixel.
    pos = ek.arange(UInt32, total_sample_count)
    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
    pos = Vector2f(Float(pos % int(film_size[0])),
                   Float(pos // int(film_size[0])))

    pos += sampler.next_2d()

    # Sample rays starting from the camera sensor
    rays, weights = sensor.sample_ray_differential(time=0,
                                                   sample1=sampler.next_1d(),
                                                   sample2=pos * scale,
                                                   sample3=0)

    #Masks dictionary containas a mask for each params
    masks = dict()
    #For each parameter owner, we generate a mask
    for key in owners:
        scene_dict = dict()
        scene_dict["type"] = "scene"
        scene_dict["camera"] = scene.sensors()[0]
        for item in owners[key]:
            scene_dict[item.id()] = item
        #dummy_scene contains only camera and objects for associated with this owner
        dummy_scene = load_dict(scene_dict)
        surface_interaction = dummy_scene.ray_intersect(rays)

        result = surface_interaction.t + 1.0
        result[~(surface_interaction.t is Infinity)] = 1
        result[~surface_interaction.is_valid()] = 0

        masks[key] = result
    return masks
Exemple #4
0
Thread.thread().file_resolver().append(os.path.dirname(filename))

# Load the scene
scene = load_file(filename)

# Instead of calling the scene's integrator, we build our own small integrator
# This integrator simply computes the depth values per pixel
sensor = scene.sensors()[0]
film = sensor.film()
sampler = sensor.sampler()
film_size = film.crop_size()
spp = 32

# Enumerate discrete sample & pixel indices, and uniformly sample
# positions within each pixel.
pos = ek.arange(UInt64, ek.hprod(film_size) * spp)
if not sampler.ready():
    sampler.seed(pos)

pos //= spp
scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0])))

pos += sampler.next_2d()

# Sample rays starting from the camera sensor
rays, weights = sensor.sample_ray_differential(time=0,
                                               sample1=sampler.next_1d(),
                                               sample2=pos * scale,
                                               sample3=0)
Exemple #5
0
def _render_helper(scene, spp=None, sensor_index=0):
    """
    Internally used function: render the specified Mitsuba scene and return a
    floating point array containing RGB values and AOVs, if applicable
    """
    from mitsuba.core import (Float, UInt32, UInt64, Vector2f,
                              is_monochromatic, is_rgb, is_polarized)
    from mitsuba.render import ImageBlock

    sensor = scene.sensors()[sensor_index]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    if spp is None:
        spp = sampler.sample_count()

    total_sample_count = ek.hprod(film_size) * spp

    if sampler.wavefront_size() != total_sample_count:
        sampler.seed(ek.arange(UInt64, total_sample_count))

    pos = ek.arange(UInt32, total_sample_count)
    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
    pos = Vector2f(Float(pos % int(film_size[0])),
                   Float(pos // int(film_size[0])))

    pos += sampler.next_2d()

    rays, weights = sensor.sample_ray_differential(
        time=0,
        sample1=sampler.next_1d(),
        sample2=pos * scale,
        sample3=0
    )

    spec, mask, aovs = scene.integrator().sample(scene, sampler, rays)
    spec *= weights
    del mask

    if is_polarized:
        from mitsuba.core import depolarize
        spec = depolarize(spec)

    if is_monochromatic:
        rgb = [spec[0]]
    elif is_rgb:
        rgb = spec
    else:
        from mitsuba.core import spectrum_to_xyz, xyz_to_srgb
        xyz = spectrum_to_xyz(spec, rays.wavelengths)
        rgb = xyz_to_srgb(xyz)
        del xyz

    aovs.insert(0, Float(1.0))
    for i in range(len(rgb)):
        aovs.insert(i + 1, rgb[i])
    del rgb, spec, weights, rays

    block = ImageBlock(
        size=film.crop_size(),
        channel_count=len(aovs),
        filter=film.reconstruction_filter(),
        warn_negative=False,
        warn_invalid=False,
        border=False
    )

    block.clear()
    block.put(pos, aovs)

    del pos
    del aovs

    data = block.data()

    ch = block.channel_count()
    i = UInt32.arange(ek.hprod(block.size()) * (ch - 1))

    weight_idx = i // (ch - 1) * ch
    values_idx = (i * ch) // (ch - 1) + 1

    weight = ek.gather(data, weight_idx)
    values = ek.gather(data, values_idx)

    return values / (weight + 1e-8)
# Add the scene directory to the FileResolver's search path
Thread.thread().file_resolver().append(os.path.dirname(filename))

# Load the scene
scene = load_file(filename)

# Instead of calling the scene's integrator, we build our own small integrator
# This integrator simply computes the depth values per pixel
sensor = scene.sensors()[0]
film = sensor.film()
sampler = sensor.sampler()
film_size = film.crop_size()
spp = 32

# Seed the sampler
total_sample_count = ek.hprod(film_size) * spp

if sampler.wavefront_size() != total_sample_count:
    sampler.seed(UInt64.arange(total_sample_count))

# Enumerate discrete sample & pixel indices, and uniformly sample
# positions within each pixel.
pos = ek.arange(UInt32, total_sample_count)
pos //= spp
scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0])))

pos += sampler.next_2d()

# Sample rays starting from the camera sensor
rays, weights = sensor.sample_ray_differential(time=0,
Exemple #7
0
def test_render(variants_all, scene_fname):
    from mitsuba.core import Bitmap, Struct, Thread, set_thread_count

    scene_dir = dirname(scene_fname)

    if os.path.split(scene_dir)[1] in EXCLUDE_FOLDERS:
        pytest.skip(f"Skip rendering scene {scene_fname}")

    Thread.thread().file_resolver().prepend(scene_dir)

    ref_fname, ref_var_fname = get_ref_fname(scene_fname)
    if not (exists(ref_fname) and exists(ref_var_fname)):
        pytest.skip("Non-existent reference data.")

    ref_bmp = read_rgb_bmp_to_xyz(ref_fname)
    ref_img = np.array(ref_bmp, copy=False)

    ref_var_bmp = read_rgb_bmp_to_xyz(ref_var_fname)
    ref_var_img = np.array(ref_var_bmp, copy=False)

    significance_level = 0.01

    # Compute spp budget
    sample_budget = int(2e6)
    pixel_count = ek.hprod(ref_bmp.size())
    spp = sample_budget // pixel_count

    # Load and render
    scene = mitsuba.core.xml.load_file(scene_fname, spp=spp)
    scene.integrator().render(scene, scene.sensors()[0])

    # Compute variance image
    bmp = scene.sensors()[0].film().bitmap(raw=False)
    img, var_img = bitmap_extract(bmp)

    # Compute Z-test p-value
    p_value = z_test(img, spp, ref_img, ref_var_img)

    # Apply the Sidak correction term, since we'll be conducting multiple independent
    # hypothesis tests. This accounts for the fact that the probability of a failure
    # increases quickly when several hypothesis tests are run in sequence.
    alpha = 1.0 - (1.0 - significance_level) ** (1.0 / pixel_count)

    success = (p_value > alpha)

    if (np.count_nonzero(success) / 3) >= (0.9975 * pixel_count):
        print('Accepted the null hypothesis (min(p-value) = %f, significance level = %f)' %
              (np.min(p_value), alpha))
    else:
        print('Reject the null hypothesis (min(p-value) = %f, significance level = %f)' %
              (np.min(p_value), alpha))

        output_dir = join(scene_dir, 'error_output')

        if not exists(output_dir):
            os.makedirs(output_dir)

        output_prefix = join(output_dir, splitext(
            basename(scene_fname))[0] + '_' + mitsuba.variant())

        img_rgb_bmp = xyz_to_rgb_bmp(img)

        fname = output_prefix + '_img.exr'
        img_rgb_bmp.write(fname)
        print('Saved rendered image to: ' + fname)

        var_fname = output_prefix + '_var.exr'
        xyz_to_rgb_bmp(var_img).write(var_fname)
        print('Saved variance image to: ' + var_fname)

        err_fname = output_prefix + '_error.exr'
        err_img = 0.02 * np.array(img_rgb_bmp)
        err_img[~success] = 1.0
        err_bmp = Bitmap(err_img).write(err_fname)
        print('Saved error image to: ' + err_fname)

        pvalue_fname = output_prefix + '_pvalue.exr'
        xyz_to_rgb_bmp(p_value).write(pvalue_fname)
        print('Saved error image to: ' + pvalue_fname)

        assert False
Exemple #8
0
def render(scene, spp, sample_per_pass, bdata):
    """
    Generate basic objects and rays.
    Throw render job

    Args:
        scene: Target scene object
        spp: Sample per pixel
        sample_per_pass: a number of sample per pixel in one vectored
                         calculation with GPU
        bdata: BSSRDF Data object. Refer data_pipeline.py
    """

    # Generate basic objects for rendering
    sensor = scene.sensors()[0]
    film = sensor.film()
    sampler = sensor.sampler()
    film_size = film.crop_size()
    blocks = utils_render.gen_blocks(
        film.crop_size(),
        film.reconstruction_filter(),
        channel_count=5,
        border=False,
        aovs=config.aovs,
        invalid_sample=config.visualize_invalid_sample)

    bssrdf = None
    if (config.enable_bssrdf):
        bssrdf = BSSRDF(config.model_name)

    np.random.seed(seed=config.seed)

    # The number of samples in one gpu rendering
    total_sample_count = int(ek.hprod(film_size) * spp)

    if total_sample_count % sample_per_pass != 0:
        sys.exit("total_sample_count is not multilple of sample_per_pass")
    n_ite = int(total_sample_count / sample_per_pass)

    # Seed the sampler
    if sampler.wavefront_size() != sample_per_pass:
        sampler.seed(0, sample_per_pass)

    # pos = ek.arange(UInt32, total_sample_count)
    pos = np.arange(total_sample_count, dtype=np.uint32)

    pos //= spp
    scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])

    pos_x = (pos % int(film_size[0])).astype(np.float)
    pos_x += np.random.rand(total_sample_count)
    pos_y = (pos // int(film_size[0])).astype(np.float)
    pos_y += np.random.rand(total_sample_count)

    cnt = 0

    heightmap_pybind = bdata.get_heightmap_pybind()

    for ite in range(n_ite):
        # Get position for this iteration
        pos_ite = Vector2f(
            pos_x[ite * sample_per_pass:ite * sample_per_pass +
                  sample_per_pass],
            pos_y[ite * sample_per_pass:ite * sample_per_pass +
                  sample_per_pass])

        # Get rays for path tracing
        rays, weights = sensor.sample_ray_differential(
            time=0,
            sample1=sampler.next_1d(),
            sample2=pos_ite * scale,
            sample3=0)

        results = render_sample(scene, sampler, rays, bdata, heightmap_pybind,
                                bssrdf)

        utils_render.postprocess_render(
            results,
            weights,
            blocks,
            pos_ite,
            aovs=config.aovs,
            invalid_sample=config.visualize_invalid_sample)
        sampler.advance()
        cnt += sample_per_pass
        print(f"done {cnt} / {total_sample_count}")

    utils_render.imaging(blocks,
                         film_size,
                         aovs=config.aovs,
                         invalid_sample=config.visualize_invalid_sample)
Exemple #9
0
    def tabulate_histogram(self):
        """
        Invoke the provided sampling strategy many times and generate a
        histogram in the parameter domain. If ``sample_func`` returns a tuple
        ``(positions, weights)`` instead of just positions, the samples are
        considered to be weighted.
        """

        # Generate a table of uniform variates
        from mitsuba.core import Float, Vector2f, Vector2u, Float32, \
            UInt64, PCG32

        rng = PCG32(initseq=ek.arange(UInt64, self.sample_count))

        samples_in = getattr(mitsuba.core, 'Vector%if' % self.sample_dim)()
        for i in range(self.sample_dim):
            samples_in[i] = rng.next_float32() if Float is Float32 \
                else rng.next_float64()

        self.pdf_start = time.time()

        # Invoke sampling strategy
        samples_out = self.sample_func(samples_in)

        if type(samples_out) is tuple:
            weights_out = samples_out[1]
            samples_out = samples_out[0]
        else:
            weights_out = Float(1.0)

        # Map samples into the parameter domain
        xy = self.domain.map_backward(samples_out)

        # Sanity check
        eps = self.bounds.extents() * 1e-4
        in_domain = ek.all((xy >= self.bounds.min - eps)
                           & (xy <= self.bounds.max + eps))
        if not ek.all(in_domain):
            self._log('Encountered samples outside of the specified '
                      'domain: %s' % str(ek.compress(xy, ~in_domain)))
            self.fail = True

        # Normalize position values
        xy = (xy - self.bounds.min) / self.bounds.extents()
        xy = Vector2u(
            ek.clamp(xy * Vector2f(self.res), 0, Vector2f(self.res - 1)))

        # Compute a histogram of the positions in the parameter domain
        self.histogram = ek.zero(Float, ek.hprod(self.res))

        ek.scatter_add(target=self.histogram,
                       index=xy.x + xy.y * self.res.x,
                       source=weights_out)

        self.pdf_end = time.time()

        histogram_min = ek.hmin(self.histogram)
        if not histogram_min >= 0:
            self._log('Encountered a cell with negative sample '
                      'weights: %f' % histogram_min)
            self.fail = True

        self.histogram_sum = ek.hsum(self.histogram) / self.sample_count
        if self.histogram_sum > 1.1:
            self._log('Sample weights add up to a value greater '
                      'than 1.0: %f' % self.histogram_sum)
            self.fail = True