def test03_put_values_basic(variant_scalar_rgb): from mitsuba.core import srgb_to_xyz from mitsuba.core.xml import load_string from mitsuba.render import ImageBlock # Recall that we must pass a reconstruction filter to use the `put` methods. rfilter = load_string("""<rfilter version="2.0.0" type="box"> <float name="radius" value="0.4"/> </rfilter>""") im = ImageBlock([10, 8], 5, filter=rfilter) im.clear() # From a spectrum & alpha value border = im.border_size() ref = np.zeros(shape=(im.height() + 2 * border, im.width() + 2 * border, 3 + 1 + 1)) for i in range(border, im.height() + border): for j in range(border, im.width() + border): spectrum = np.random.uniform(size=(3, )) ref[i, j, :3] = srgb_to_xyz(spectrum) ref[i, j, 3] = 1 # Alpha ref[i, j, 4] = 1 # Weight # To avoid the effects of the reconstruction filter (simpler test), # we'll just add one sample right in the center of each pixel. im.put([j + 0.5, i + 0.5], [], spectrum, alpha=1.0) check_value(im, ref, atol=1e-6)
def test04_put_packets_basic(variant_scalar_rgb): from mitsuba.core import srgb_to_xyz try: mitsuba.set_variant("packet_rgb") from mitsuba.core.xml import load_string from mitsuba.render import ImageBlock except ImportError: pytest.skip("packet_rgb mode not enabled") # Recall that we must pass a reconstruction filter to use the `put` methods. rfilter = load_string("""<rfilter version="2.0.0" type="box"> <float name="radius" value="0.4"/> </rfilter>""") im = ImageBlock([10, 8], 5, filter=rfilter) im.clear() n = 29 positions = np.random.uniform(size=(n, 2)) positions[:, 0] = np.floor(positions[:, 0] * (im.width() - 1)).astype(np.int) positions[:, 1] = np.floor(positions[:, 1] * (im.height() - 1)).astype(np.int) # Repeat some of the coordinates to make sure that we test the case where # the same pixel receives several values positions[-3:, :] = positions[:3, :] spectra = np.arange(n * 3).reshape((n, 3)) alphas = np.ones(shape=(n, )) border = im.border_size() ref = np.zeros(shape=(im.height() + 2 * border, im.width() + 2 * border, 3 + 1 + 1)) for i in range(n): (x, y) = positions[i, :] + border ref[int(y), int(x), :3] += srgb_to_xyz(spectra[i, :]) ref[int(y), int(x), 3] += 1 # Alpha ref[int(y), int(x), 4] += 1 # Weight # Vectorized `put` im.put(positions + 0.5, [], spectra, alphas) check_value(im, ref, atol=1e-6)
def test02_put_image_block(variant_scalar_rgb): from mitsuba.core.xml import load_string from mitsuba.render import ImageBlock rfilter = load_string("""<rfilter version="2.0.0" type="box"/>""") # TODO: test with varying `offset` values im = ImageBlock([10, 5], 4, filter=rfilter) im.clear() check_value(im, 0) im2 = ImageBlock(im.size(), 4, filter=rfilter) im2.clear() ref = 3.14 * np.arange(im.height() * im.width() * im.channel_count()) \ .reshape(im.height(), im.width(), im.channel_count()) for x in range(im.height()): for y in range(im.width()): im2.put([y + 0.5, x + 0.5], ref[x, y, :]) check_value(im2, ref) for i in range(5): im.put(im2) check_value(im, (i + 1) * ref)
def gen_blocks(crop_size, filter, channel_count=5, border=False, aovs=False, invalid_sample=False): """Generate image blocks for aovs""" blocks = {} block = ImageBlock(crop_size, channel_count=channel_count, filter=filter, border=border) block.clear() blocks["result"] = block if not aovs and not invalid_sample: return block if aovs: block_scatter = ImageBlock(crop_size, channel_count=channel_count, filter=filter, border=border) block_scatter.clear() blocks["scatter"] = block_scatter block_nonscatter = ImageBlock(crop_size, channel_count=channel_count, filter=filter, border=border) block_nonscatter.clear() blocks["non_scatter"] = block_nonscatter if invalid_sample: block_invalid = ImageBlock(crop_size, channel_count=channel_count, filter=filter, border=border) block_invalid.clear() blocks["invalid"] = block_invalid return blocks
sample3=0) # Intersect rays with the scene geometry surface_interaction = scene.ray_intersect(rays) # Given intersection, compute the final pixel values as the depth t # of the sampled surface interaction result = surface_interaction.t # Set to zero if no intersection was found result[~surface_interaction.is_valid()] = 0 block = ImageBlock(film.crop_size(), channel_count=5, filter=film.reconstruction_filter(), border=False) block.clear() # ImageBlock expects RGB values (Array of size (n, 3)) block.put(position_sample, rays.wavelengths, Vector3f(result, result, result), 1) # Write out the result from the ImageBlock # Internally, ImageBlock stores values in XYZAW format # (color XYZ, alpha value A and weight W) xyzaw_np = block.data().reshape([film_size[1], film_size[0], 5]) # We then create a Bitmap from these values and save it out as EXR file bmp = Bitmap(xyzaw_np, Bitmap.PixelFormat.XYZAW) bmp = bmp.convert(Bitmap.PixelFormat.Y, Struct.Type.Float32, srgb_gamma=False) bmp.write('depth.exr')
def _render_helper(scene, spp=None, sensor_index=0): """ Internally used function: render the specified Mitsuba scene and return a floating point array containing RGB values and AOVs, if applicable """ from mitsuba.core import (Float, UInt32, UInt64, Vector2f, is_monochromatic, is_rgb, is_polarized) from mitsuba.render import ImageBlock sensor = scene.sensors()[sensor_index] film = sensor.film() sampler = sensor.sampler() film_size = film.crop_size() if spp is None: spp = sampler.sample_count() total_sample_count = ek.hprod(film_size) * spp if sampler.wavefront_size() != total_sample_count: sampler.seed(ek.arange(UInt64, total_sample_count)) pos = ek.arange(UInt32, total_sample_count) pos //= spp scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1]) pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0]))) pos += sampler.next_2d() rays, weights = sensor.sample_ray_differential( time=0, sample1=sampler.next_1d(), sample2=pos * scale, sample3=0 ) spec, mask, aovs = scene.integrator().sample(scene, sampler, rays) spec *= weights del mask if is_polarized: from mitsuba.core import depolarize spec = depolarize(spec) if is_monochromatic: rgb = [spec[0]] elif is_rgb: rgb = spec else: from mitsuba.core import spectrum_to_xyz, xyz_to_srgb xyz = spectrum_to_xyz(spec, rays.wavelengths) rgb = xyz_to_srgb(xyz) del xyz aovs.insert(0, Float(1.0)) for i in range(len(rgb)): aovs.insert(i + 1, rgb[i]) del rgb, spec, weights, rays block = ImageBlock( size=film.crop_size(), channel_count=len(aovs), filter=film.reconstruction_filter(), warn_negative=False, warn_invalid=False, border=False ) block.clear() block.put(pos, aovs) del pos del aovs data = block.data() ch = block.channel_count() i = UInt32.arange(ek.hprod(block.size()) * (ch - 1)) weight_idx = i // (ch - 1) * ch values_idx = (i * ch) // (ch - 1) + 1 weight = ek.gather(data, weight_idx) values = ek.gather(data, values_idx) return values / (weight + 1e-8)
def test03_develop(variant_scalar_rgb, file_format, tmpdir): from mitsuba.core.xml import load_string from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype from mitsuba.render import ImageBlock import numpy as np """Create a test image. Develop it to a few file format, each time reading it back and checking that contents are unchanged.""" np.random.seed(12345 + ord(file_format[0])) # Note: depending on the file format, the alpha channel may be automatically removed. film = load_string("""<film version="2.0.0" type="hdrfilm"> <integer name="width" value="41"/> <integer name="height" value="37"/> <string name="file_format" value="{}"/> <string name="pixel_format" value="rgba"/> <string name="component_format" value="float32"/> <rfilter type="box"/> </film>""".format(file_format)) # Regardless of the output file format, values are stored as XYZAW (5 channels). contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5)) # RGBE and will only reconstruct well images that have similar scales on # all channel (because exponent is shared between channels). if file_format is "rgbe": contents = 1 + 0.1 * contents # Use unit weights. contents[:, :, 4] = 1.0 block = ImageBlock(film.size(), 5, film.reconstruction_filter()) block.clear() for x in range(film.size()[1]): for y in range(film.size()[0]): block.put([y + 0.5, x + 0.5], contents[x, y, :]) film.prepare(['X', 'Y', 'Z', 'A', 'W']) film.put(block) with pytest.raises(RuntimeError): # Should raise when the destination file hasn't been specified. film.develop() filename = str(tmpdir.join('test_image.' + file_format)) film.set_destination_file(filename) film.develop() # Read back and check contents other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False) img = np.array(other, copy=False) if False: import matplotlib.pyplot as plt plt.figure() plt.subplot(1, 3, 1) plt.imshow(contents[:, :, :3]) plt.subplot(1, 3, 2) plt.imshow(img[:, :, :3]) plt.subplot(1, 3, 3) plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm') plt.colorbar() plt.show() if file_format == "exr": assert ek.allclose(img, contents, atol=1e-5) else: if file_format == "rgbe": assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \ '\n{}\nvs\n{}\n'.format(img[:4, :4, :3], contents[:4, :4, :3]) else: assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5) # Alpha channel was ignored, alpha and weights should default to 1.0. assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)
def test05_put_with_filter(variant_scalar_rgb): from mitsuba.core import srgb_to_xyz from mitsuba.core.xml import load_string from mitsuba.render import ImageBlock """The previous tests used a very simple box filter, parametrized so that it essentially had no effect. In this test, we use a more realistic Gaussian reconstruction filter, with non-zero radius.""" try: mitsuba.set_variant("packet_rgb") from mitsuba.core.xml import load_string as load_string_packet from mitsuba.render import ImageBlock as ImageBlockP except ImportError: pytest.skip("packet_rgb mode not enabled") rfilter = load_string("""<rfilter version="2.0.0" type="gaussian"> <float name="stddev" value="0.5"/> </rfilter>""") rfilter_p = load_string_packet("""<rfilter version="2.0.0" type="gaussian"> <float name="stddev" value="0.5"/> </rfilter>""") size = [12, 12] im = ImageBlockP(size, 5, filter=rfilter_p) im.clear() im2 = ImageBlock(size, 5, filter=rfilter) im2.clear() positions = np.array([[5, 6], [0, 1], [5, 6], [1, 11], [11, 11], [0, 1], [2, 5], [4, 1], [0, 11], [5, 4]], dtype=np.float) n = positions.shape[0] positions += np.random.uniform(size=positions.shape, low=0, high=0.95) spectra = np.arange(n * 3).reshape((n, 3)) alphas = np.ones(shape=(n, )) radius = int(math.ceil(rfilter.radius())) border = im.border_size() ref = np.zeros(shape=(im.height() + 2 * border, im.width() + 2 * border, 3 + 1 + 1)) for i in range(n): # -- Scalar `put` im2.put(positions[i, :], [], spectra[i, :], alpha=1.0) # Fractional part of the position offset = positions[i, :] - positions[i, :].astype(np.int) # -- Reference # Reconstruction window around the pixel position pos = positions[i, :] - 0.5 + border lo = np.ceil(pos - radius).astype(np.int) hi = np.floor(pos + radius).astype(np.int) for dy in range(lo[1], hi[1] + 1): for dx in range(lo[0], hi[0] + 1): r_pos = np.array([dx, dy]) w_pos = r_pos - pos if (np.any(r_pos < 0) or np.any(r_pos >= ref.shape[:2])): continue weight = rfilter.eval_discretized( w_pos[0]) * rfilter.eval_discretized(w_pos[1]) xyz = srgb_to_xyz(spectra[i, :]) ref[r_pos[1], r_pos[0], :3] += weight * xyz ref[r_pos[1], r_pos[0], 3] += weight * 1 # Alpha ref[r_pos[1], r_pos[0], 4] += weight * 1 # Weight # -- Vectorized `put` im.put(positions, [], spectra, alphas) check_value(im, ref, atol=1e-6) check_value(im2, ref, atol=1e-6)