def integrator_sample(scene, sampler, rays, medium, active=True): si = scene.ray_intersect(rays) active = si.is_valid() & active # Visible emitters emitter_vis = si.emitter(scene, active) result = ek.select(active, Emitter.eval_vec(emitter_vis, si, active), Vector3f(0.0)) ctx = BSDFContext() bsdf = si.bsdf(rays) # Emitter sampling sample_emitter = active & has_flag(BSDF.flags_vec(bsdf), BSDFFlags.Smooth) ds, emitter_val = scene.sample_emitter_direction(si, sampler.next_2d(sample_emitter), True, sample_emitter) active_e = sample_emitter & ek.neq(ds.pdf, 0.0) wo = si.to_local(ds.d) bsdf_val = BSDF.eval_vec(bsdf, ctx, si, wo, active_e) bsdf_pdf = BSDF.pdf_vec(bsdf, ctx, si, wo, active_e) mis = ek.select(ds.delta, Float(1), mis_weight(ds.pdf, bsdf_pdf)) result += ek.select(active_e, emitter_val * bsdf_val * mis, Vector3f(0)) # BSDF sampling active_b = active bs, bsdf_val = BSDF.sample_vec(bsdf, ctx, si, sampler.next_1d(active), sampler.next_2d(active), active_b) si_bsdf = scene.ray_intersect(si.spawn_ray(si.to_world(bs.wo)), active_b) emitter = si_bsdf.emitter(scene, active_b) active_b &= ek.neq(emitter, 0) emitter_val = Emitter.eval_vec(emitter, si_bsdf, active_b) delta = has_flag(bs.sampled_type, BSDFFlags.Delta) ds = DirectionSample3f(si_bsdf, si) ds.object = emitter emitter_pdf = ek.select(delta, Float(0), scene.pdf_emitter_direction(si, ds, active_b)) result += ek.select(active_b, bsdf_val * emitter_val * mis_weight(bs.pdf, emitter_pdf), Vector3f(0)) return result, si.is_valid(), ek.select(si.is_valid(), si.t, Float(0.0))
def test04_discr_basic(variant_packet_rgb): # Validate discrete distribution cdf/pmf against hand-computed reference from mitsuba.core import DiscreteDistribution, Float x = DiscreteDistribution([1, 3, 2]) assert len(x) == 3 assert x.sum() == 6 assert ek.allclose(x.normalization(), 1.0 / 6.0) assert x.pmf() == [1, 3, 2] assert x.cdf() == [1, 4, 6] assert x.eval_pmf([1, 2, 0]) == [3, 2, 1] assert ek.allclose(x.eval_pmf_normalized([1, 2, 0]), Float([3, 2, 1]) / 6.0) assert ek.allclose(x.eval_cdf_normalized([1, 2, 0]), Float([4, 6, 1]) / 6.0) assert repr(x) == 'DiscreteDistribution[\n size = 3,' \ '\n sum = 6,\n pmf = [1, 3, 2]\n]' x.pmf()[:] = [1, 1, 1] x.update() assert x.cdf() == [1, 2, 3] assert x.sum() == 3 assert ek.allclose(x.normalization(), 1.0 / 3.0)
def check_uniform_wavefront_sampler(sampler, res=16, atol=0.5): from mitsuba.core import Float, UInt32, UInt64, Vector2u sample_count = sampler.sample_count() sampler.set_samples_per_wavefront(sample_count) sampler.seed(0, sample_count) hist_1d = ek.zero(UInt32, res) hist_2d = ek.zero(UInt32, res * res) v_1d = ek.clamp(sampler.next_1d() * res, 0, res) ek.scatter_add( target=hist_1d, index=UInt32(v_1d), source=UInt32(1.0) ) v_2d = Vector2u(ek.clamp(sampler.next_2d() * res, 0, res)) ek.scatter_add( target=hist_2d, index=UInt32(v_2d.x * res + v_2d.y), source=UInt32(1.0) ) assert ek.allclose(Float(hist_1d), float(sample_count) / res, atol=atol) assert ek.allclose(Float(hist_2d), float(sample_count) / (res * res), atol=atol)
def generate_masks(owners, scene): #Generate rays that are used for both the binary mask and the distance mask sensor = scene.sensors()[0] film = sensor.film() sampler = sensor.sampler() film_size = film.crop_size() spp = 3 # Seed the sampler total_sample_count = ek.hprod(film_size) * spp if sampler.wavefront_size() != total_sample_count: sampler.seed(UInt64.arange(total_sample_count)) # Enumerate discrete sample & pixel indices, and uniformly sample # positions within each pixel. pos = ek.arange(UInt32, total_sample_count) pos //= spp scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1]) pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0]))) pos += sampler.next_2d() # Sample rays starting from the camera sensor rays, weights = sensor.sample_ray_differential(time=0, sample1=sampler.next_1d(), sample2=pos * scale, sample3=0) #Masks dictionary containas a mask for each params masks = dict() #For each parameter owner, we generate a mask for key in owners: scene_dict = dict() scene_dict["type"] = "scene" scene_dict["camera"] = scene.sensors()[0] for item in owners[key]: scene_dict[item.id()] = item #dummy_scene contains only camera and objects for associated with this owner dummy_scene = load_dict(scene_dict) surface_interaction = dummy_scene.ray_intersect(rays) result = surface_interaction.t + 1.0 result[~(surface_interaction.t is Infinity)] = 1 result[~surface_interaction.is_valid()] = 0 masks[key] = result return masks
def backward(ctx, grad_output): try: ek.set_gradient(ctx.output, ek.detach(Float(grad_output))) Float.backward() result = tuple(ek.gradient(i).torch() if i is not None else None for i in ctx.inputs) del ctx.output del ctx.inputs ek.cuda_malloc_trim() return result except Exception as e: print("render_torch(): critical exception during " "backward pass: %s" % str(e)) raise e
def set_learning_rate(self, lr): """Set the learning rate.""" from mitsuba.core import Float # Ensure that the JIT compiler does merge 'lr' into the PTX code # (this would trigger a recompile every time it is changed) self.lr = lr self.lr_v = ek.detach(Float(lr, literal=False))
def step(self): """ Take a gradient step """ self.t += 1 from mitsuba.core import Float lr_t = ek.detach(Float(self.lr * ek.sqrt(1 - self.beta_2**self.t) / (1 - self.beta_1**self.t), literal=False)) for k, p in self.params.items(): g_p = ek.gradient(p) size = ek.slices(g_p) if size == 0: continue elif size != ek.slices(self.state[k][0]): # Reset state if data size has changed self._reset(k) m_tp, v_tp = self.state[k] m_t = self.beta_1 * m_tp + (1 - self.beta_1) * g_p v_t = self.beta_2 * v_tp + (1 - self.beta_2) * ek.sqr(g_p) self.state[k] = (m_t, v_t) u = ek.detach(p) - lr_t * m_t / (ek.sqrt(v_t) + self.epsilon) u = type(p)(u) ek.set_requires_gradient(u) self.params[k] = u
def test18_irrcont_simple_function(variant_packet_rgb): # Reference from Mathematica from mitsuba.core import IrregularContinuousDistribution, Float d = IrregularContinuousDistribution([1, 1.5, 1.8, 5], [1, 3, 0, 1]) assert ek.allclose(d.integral(), 3.05) assert ek.allclose(d.eval_pdf([0, 1, 2, 3, 4, 5, 6]), [0, 1, 0.0625, 0.375, 0.6875, 1, 0]) assert ek.allclose(d.eval_cdf([0, 1, 2, 3, 4, 5, 6]), [0, 0, 1.45625, 1.675, 2.20625, 3.05, 3.05]) assert ek.allclose(d.sample(ek.linspace(Float, 0, 1, 11)), [ 1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282, 3.35949, 3.8938, 4.31714, 4.67889, 5. ]) assert ek.allclose(d.sample_pdf(ek.linspace(Float, 0, 1, 11)), ([ 1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282, 3.35949, 3.8938, 4.31714, 4.67889, 5. ], Float([ 1., 1.85472, 2.42487, 2.88444, 2.14476, 0.216506, 0.48734, 0.654313, 0.786607, 0.899653, 1. ]) * d.normalization()))
def sample_functor(sample, *args): n = ek.slices(sample) plugin = instantiate(args) mi, ctx = make_context(n) wo, pdf = plugin.sample(ctx, mi, [sample[0], sample[1]]) w = Float.full(1.0, ek.slices(pdf)) w[ek.eq(pdf, 0)] = 0 return wo, w
def sample_functor(sample, *args): n = ek.slices(sample) plugin = instantiate(args) (si, ctx) = make_context(n) bs, weight = plugin.sample(ctx, si, sample[0], [sample[1], sample[2]]) w = Float.full(1.0, ek.slices(weight)) w[ek.all(ek.eq(weight, 0))] = 0 return bs.wo, w
def check_zero_scatter(sampler, si, bs, channel, active): """Check a ray whether go through medium without scattering or not""" sigma_t = index_spectrum(bs.sigma_t, channel) # Ray passes through medium w/o scattering? is_zero_scatter = (sampler.next_1d(active) > Float(1) - ek.exp(-sigma_t * si.t)) & active return is_zero_scatter
def test05_discr_sample(variant_packet_rgb): # Validate discrete distribution sampling against hand-computed reference from mitsuba.core import DiscreteDistribution, Float eps = 1e-7 x = DiscreteDistribution([1, 3, 2]) assert x.sample([-1, 0, 1, 2]) == [0, 0, 2, 2] assert x.sample([1 / 6.0 - eps, 1 / 6.0 + eps]) == [0, 1] assert x.sample([4 / 6.0 - eps, 4 / 6.0 + eps]) == [1, 2] assert ek.allclose(x.sample_pmf([-1, 0, 1, 2]), ([0, 0, 2, 2], Float([1, 1, 2, 2]) / 6)) assert ek.allclose(x.sample_pmf([1 / 6.0 - eps, 1 / 6.0 + eps]), ([0, 1], Float([1, 3]) / 6)) assert ek.allclose(x.sample_pmf([4 / 6.0 - eps, 4 / 6.0 + eps]), ([1, 2], Float([3, 2]) / 6)) assert ek.allclose(x.sample_reuse( [0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps]), ([0, 0, 0, 1], Float([0, .5, 1, 0])), atol=3 * eps) assert ek.allclose(x.sample_reuse_pmf([ 0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps ]), ([0, 0, 0, 1], Float([0, .5, 1, 0]), Float([1, 1, 1, 3]) / 6), atol=3 * eps)
def __init__(self, props): BSDF.__init__(self, props) # BTDFFのzipファイルのパス self.m_filename = props["filename"] # 逆ガンマ補正をかけるかどうか if props.has_property("apply_inv_gamma"): self.m_apply_inv_gamma = props["apply_inv_gamma"] else: self.m_apply_inv_gamma = True # 反射率 if props.has_property("reflectance"): self.m_reflectance = Float(props["reflectance"]) else: self.m_reflectance = Float(1.0) # Power parameter if props.has_property("power_parameter"): self.m_power_parameter = Float(props["power_parameter"]) else: self.m_power_parameter = Float(4.0) # Wrap mode if props.has_property("wrap_mode"): self.m_wrap_mode = str(props["wrap_mode"]) else: self.m_wrap_mode = "repeat" # UVマップの変換 self.m_transform = Transform3f(props["to_uv"].extract()) # 読み込んだBTF self.btf = BtfInterpolator(self.m_filename, p=self.m_power_parameter) self.m_flags = BSDFFlags.DiffuseReflection | BSDFFlags.FrontSide self.m_components = [self.m_flags]
def estimate(self, in_pos, im, props, sigma_n, active): """ Estimate output position and absorption with VAE Notice that the types of arguments are in pytorch (tensor) except sigma_n, but the ones of returns are in mitsuba (Vector3f, Float) Args: in_pos: Incident position in local mesh coordinates, and the type of this is tensor im: Height map around incident position ranging to multiple of sigma_n. This map can be generated by clip_scaled_map() from data_handler, and the type is tensor props: Medium properties vector including (and following this order) - effective albedo - g - eta - incident angle (xyz) - max height , and the type of this argument is tensor sigma_n: Standard deviation of the range of medium scattering. In this method, this value is used as scale factor of coordinates in vae active: Boolean mask which indicates whether a ray is applied VAE or not Return: recon_pos: estimated outgoing position (Vector3f) recon_abs: estimated absorption probability (Spectrum) """ n_sample, _, _, _ = im.shape pos = Vector3f(in_pos) abs_prob = Float().zero(n_sample) self.model.eval() with torch.no_grad(): # Feature conversion feature = self.model.feature_conversion( im.to(self.device, dtype=torch.float), props.to(self.device, dtype=torch.float)) # Sample latent variable from normal distribution z = torch.randn(n_sample, 4).to(self.device, dtype=torch.float) # Decode and get reconstructed position and absorption recon_pos, recon_abs = self.model.decode(feature, z) # Convert from tensor to Vector3f and Float recon_pos = Vector3f(recon_pos) abs_prob = Spectrum(recon_abs.view(1, -1).squeeze()) # Reconstruct real scale position in mesh local coordinates pos += ek.select(active, sigma_n * recon_pos, 0) return pos, abs_prob
def get_diff_param(scene): # Create a differentiable hyperparameter diff_param = Float(0.0) ek.set_requires_gradient(diff_param) # Update vertices so that they depend on diff_param params = traverse(scene) t = Transform4f.translate(Vector3f(1.0) * diff_param) vertex_positions = params['light_shape.vertex_positions'] vertex_positions_t = t.transform_point(vertex_positions) params['light_shape.vertex_positions'] = vertex_positions_t # Update the scene params.update() return diff_param
def test06_discr_bruteforce(variant_packet_rgb): # Brute force validation of discrete distribution sampling from mitsuba.core import DiscreteDistribution, Float, PCG32, UInt64 rng = PCG32(initseq=UInt64.arange(50)) for size in range(2, 20): for i in range(2, 50): density = Float(rng.next_uint32_bounded(i)[0:size]) if ek.hsum(density) == 0: continue ddistr = DiscreteDistribution(density) x = ek.linspace(Float, 0, 1, 20) y = ddistr.sample(x) z = ek.gather(ddistr.cdf(), y - 1, y > 0) x *= ddistr.sum() # Did we sample the right interval? assert ek.all((x > z) | (ek.eq(x, 0) & (x >= z)))
# This integrator simply computes the depth values per pixel sensor = scene.sensors()[0] film = sensor.film() sampler = sensor.sampler() film_size = film.crop_size() spp = 32 # Enumerate discrete sample & pixel indices, and uniformly sample # positions within each pixel. pos = ek.arange(UInt64, ek.hprod(film_size) * spp) if not sampler.ready(): sampler.seed(pos) pos //= spp scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1]) pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0]))) pos += sampler.next_2d() # Sample rays starting from the camera sensor rays, weights = sensor.sample_ray_differential(time=0, sample1=sampler.next_1d(), sample2=pos * scale, sample3=0) # Intersect rays with the scene geometry surface_interaction = scene.ray_intersect(rays) # Given intersection, compute the final pixel values as the depth t # of the sampled surface interaction result = surface_interaction.t
print("[WARNING] NaNs in the vertex displacements. ({iteration:d})".format(iteration=i)) exit(-1) vertex_pos_optim = ravel(params[vertex_pos_key]) vertex_np = np.array(vertex_pos_optim) ind = np.linspace(0, vertex_np.shape[0]-1, num=vertex_np.shape[0]) ind = np.reshape(ind,(-1,1)) vertex_np = np.concatenate((vertex_np, ind), axis = 1) vertex_np = vertex_np[vertex_np[:,1].argsort()] vertex_np = vertex_np[vertex_np[:,0].argsort(kind='mergesort')] Z = np.reshape(vertex_np[:,2], (256, 256)).T Z_smooth = gaussian_filter(Z, sigma = smooth_sigma) vertex_np[:,2] = np.reshape(Z_smooth, (-1,)) vertex_np = vertex_np[vertex_np[:,3].argsort()] vertex_np = vertex_np[:, 0:3] params_opt['displacements'] = Float(vertex_np[:,2]) # Plot the loss curve fig, ax = plt.subplots() ax.plot(loss_list, '-b', label = 'Squared error between image_render and image_ref') ax.plot(diff_render_init, '-r', label = 'Squared error between image_render and image_init') leg = ax.legend() plt.title('loss') plt.xlabel('iteration') plt.savefig(output_path + 'loss.png') # Plot the difference between the vertex positions fig, ax = plt.subplots() ax.plot(diff_vertex_ref, '-b', label = 'Squared error between vertex_render and vertex_ref') ax.plot(diff_vertex_init, '-r', label = 'Squared error between vertex_render and vertex_init') leg = ax.legend()
# Load the Cornell Box Thread.thread().file_resolver().append('cbox') scene = load_file( 'C:/MyFile/code/ray tracing/misuba2/test/gpu_autodiff/cbox/cbox.xml') # Find differentiable scene parameters params = traverse(scene) # Keep track of derivatives with respect to one parameter param_0 = params['red.reflectance.value'] ek.set_requires_gradient(param_0) # Differentiable simulation image = render(scene, spp=4) # Assign the gradient [1, 1, 1] to the 'red.reflectance.value' input ek.set_gradient(param_0, [1, 1, 1], backward=False) # Forward-propagate previously assigned gradients Float.forward() # The gradients have been propagated to the output image image_grad = ek.gradient(image) # .. write them to a PNG file crop_size = scene.sensors()[0].film().crop_size() fname = 'C:/MyFile/code/ray tracing/misuba2/test/gpu_autodiff/output/out.png' write_bitmap(fname, image_grad, crop_size) print('Wrote forward differentiation image to: {}'.format(fname))
def tabulate_histogram(self): """ Invoke the provided sampling strategy many times and generate a histogram in the parameter domain. If ``sample_func`` returns a tuple ``(positions, weights)`` instead of just positions, the samples are considered to be weighted. """ # Generate a table of uniform variates from mitsuba.core import Float, Vector2f, Vector2u, Float32, \ UInt64, PCG32 rng = PCG32(initseq=ek.arange(UInt64, self.sample_count)) samples_in = getattr(mitsuba.core, 'Vector%if' % self.sample_dim)() for i in range(self.sample_dim): samples_in[i] = rng.next_float32() if Float is Float32 \ else rng.next_float64() self.pdf_start = time.time() # Invoke sampling strategy samples_out = self.sample_func(samples_in) if type(samples_out) is tuple: weights_out = samples_out[1] samples_out = samples_out[0] else: weights_out = Float(1.0) # Map samples into the parameter domain xy = self.domain.map_backward(samples_out) # Sanity check eps = self.bounds.extents() * 1e-4 in_domain = ek.all((xy >= self.bounds.min - eps) & (xy <= self.bounds.max + eps)) if not ek.all(in_domain): self._log('Encountered samples outside of the specified ' 'domain: %s' % str(ek.compress(xy, ~in_domain))) self.fail = True # Normalize position values xy = (xy - self.bounds.min) / self.bounds.extents() xy = Vector2u( ek.clamp(xy * Vector2f(self.res), 0, Vector2f(self.res - 1))) # Compute a histogram of the positions in the parameter domain self.histogram = ek.zero(Float, ek.hprod(self.res)) ek.scatter_add(target=self.histogram, index=xy.x + xy.y * self.res.x, source=weights_out) self.pdf_end = time.time() histogram_min = ek.hmin(self.histogram) if not histogram_min >= 0: self._log('Encountered a cell with negative sample ' 'weights: %f' % histogram_min) self.fail = True self.histogram_sum = ek.hsum(self.histogram) / self.sample_count if self.histogram_sum > 1.1: self._log('Sample weights add up to a value greater ' 'than 1.0: %f' % self.histogram_sum) self.fail = True
def render_sample(scene, sampler, rays, bdata, heightmap_pybind, bssrdf=None): """ Sample RTE TODO: Support multi channel sampling Args: scene: Target scene object sampler: Sampler object for random number rays: Given rays for sampling bdata: BSSRDF Data object heightmap_pybind: Object for getting height map around incident position. Refer src/librender/python/heightmap.cpp Returns: result: Sampling RTE result valid_rays: Mask data whether rays are valid or not scatter: Scatter components of Sampling RTE result non_scatter: Non scatter components of Sampling RTE result invalid_sample: Sampling RTE result with invalid sampled data by VAEBSSRDF """ eta = Float(1.0) emission_weight = Float(1.0) throughput = Spectrum(1.0) result = Spectrum(0.0) scatter = Spectrum(0.0) non_scatter = Spectrum(0.0) invalid_sample = Spectrum(0.0) active = True is_bssrdf = False ##### First interaction ##### si = scene.ray_intersect(rays, active) active = si.is_valid() & active valid_rays = si.is_valid() emitter = si.emitter(scene, active) depth = 0 # Set channel # At and after evaluating BSSRDF, a ray consider only this one channel n_channels = 3 channel = UInt32( ek.min(sampler.next_1d(active) * n_channels, n_channels - 1)) d_out_local = Vector3f().zero() d_out_pdf = Float(0) sss = Mask(False) while (True): depth += 1 if config.aovs and depth == 2: sss = is_bssrdf ##### Interaction with emitters ##### emission_val = emission_weight * throughput * Emitter.eval_vec( emitter, si, active) result += ek.select(active, emission_val, Spectrum(0.0)) invalid_sample += ek.select(active, emission_val, Spectrum(0.0)) scatter += ek.select(active & sss, emission_val, Spectrum(0.0)) non_scatter += ek.select(active & ~sss, emission_val, Spectrum(0.0)) active = active & si.is_valid() # Process russian roulette if depth > config.rr_depth: q = ek.min(ek.hmax(throughput) * ek.sqr(eta), 0.95) active = active & (sampler.next_1d(active) < q) throughput *= ek.rcp(q) # Stop if the number of bouces exceeds the given limit bounce, or # all rays are invalid. latter check is done only when the limit # bounce is infinite if depth >= config.max_depth: break ##### Emitter sampling ##### bsdf = si.bsdf(rays) ctx = BSDFContext() active_e = active & has_flag(BSDF.flags_vec(bsdf), BSDFFlags.Smooth) ds, emitter_val = scene.sample_emitter_direction( si, sampler.next_2d(active_e), True, active_e) active_e &= ek.neq(ds.pdf, 0.0) # Query the BSDF for that emitter-sampled direction wo = si.to_local(ds.d) bsdf_val = BSDF.eval_vec(bsdf, ctx, si, wo, active_e) # Determine density of sampling that same direction using BSDF sampling bsdf_pdf = BSDF.pdf_vec(bsdf, ctx, si, wo, active_e) mis = ek.select(ds.delta, Float(1), mis_weight(ds.pdf, bsdf_pdf)) emission_val = mis * throughput * bsdf_val * emitter_val result += ek.select(active, emission_val, Spectrum(0.0)) invalid_sample += ek.select(active, emission_val, Spectrum(0.0)) scatter += ek.select(active & sss, emission_val, Spectrum(0.0)) non_scatter += ek.select(active & ~sss, emission_val, Spectrum(0.0)) ##### BSDF sampling ##### bs, bsdf_val = BSDF.sample_vec(bsdf, ctx, si, sampler.next_1d(active), sampler.next_2d(active), active) ##### BSSRDF replacing ##### if (config.enable_bssrdf): # Replace bsdf samples by ones of BSSRDF bs.wo = ek.select(is_bssrdf, d_out_local, bs.wo) bs.pdf = ek.select(is_bssrdf, d_out_pdf, bs.pdf) bs.sampled_component = ek.select(is_bssrdf, UInt32(1), bs.sampled_component) bs.sampled_type = ek.select(is_bssrdf, UInt32(+BSDFFlags.DeltaTransmission), bs.sampled_type) ############################ throughput *= ek.select(is_bssrdf, Float(1.0), bsdf_val) active &= ek.any(ek.neq(throughput, 0)) eta *= bs.eta # Intersect the BSDF ray against the scene geometry rays = RayDifferential3f(si.spawn_ray(si.to_world(bs.wo))) si_bsdf = scene.ray_intersect(rays, active) ##### Checking BSSRDF ##### if (config.enable_bssrdf): # Whether the BSDF is BSS RDF or not? is_bssrdf = (active & has_flag(BSDF.flags_vec(bsdf), BSDFFlags.BSSRDF) & (Frame3f.cos_theta(bs.wo) < Float(0.0)) & (Frame3f.cos_theta(si.wi) > Float(0.0))) # Decide whether we should use 0-scattering or multiple scattering is_zero_scatter = utils_render.check_zero_scatter( sampler, si_bsdf, bs, channel, is_bssrdf) is_bssrdf = is_bssrdf & ~is_zero_scatter throughput *= ek.select(is_bssrdf, ek.sqr(bs.eta), Float(1.0)) ########################### ###### Process for BSSRDF ###### if (config.enable_bssrdf and not ek.none(is_bssrdf)): # Get projected samples from BSSRDF projected_si, project_suc, abs_prob = bssrdf.sample_bssrdf( scene, bsdf, bs, si, bdata, heightmap_pybind, channel, is_bssrdf) if config.visualize_invalid_sample and (depth <= 1): active = active & (~is_bssrdf | project_suc) invalid_sample += ek.select((is_bssrdf & (~project_suc)), Spectrum([100, 0, 0]), Spectrum(0.0)) # Sample outgoing direction from projected position d_out_local, d_out_pdf = utils_render.resample_wo( sampler, is_bssrdf) # Apply absorption probability throughput *= ek.select(is_bssrdf, Spectrum(1) - abs_prob, Spectrum(1)) # Replace interactions by sampled ones from BSSRDF si_bsdf = SurfaceInteraction3f().masked_si(si_bsdf, projected_si, is_bssrdf) ################################ # Determine probability of having sampled that same # direction using emitter sampling emitter = si_bsdf.emitter(scene, active) ds = DirectionSample3f(si_bsdf, si) ds.object = emitter delta = has_flag(bs.sampled_type, BSDFFlags.Delta) emitter_pdf = ek.select(delta, Float(0.0), scene.pdf_emitter_direction(si, ds)) emission_weight = mis_weight(bs.pdf, emitter_pdf) si = si_bsdf return result, valid_rays, scatter, non_scatter, invalid_sample
def _render_helper(scene, spp=None, sensor_index=0): """ Internally used function: render the specified Mitsuba scene and return a floating point array containing RGB values and AOVs, if applicable """ from mitsuba.core import (Float, UInt32, UInt64, Vector2f, is_monochromatic, is_rgb, is_polarized) from mitsuba.render import ImageBlock sensor = scene.sensors()[sensor_index] film = sensor.film() sampler = sensor.sampler() film_size = film.crop_size() if spp is None: spp = sampler.sample_count() total_sample_count = ek.hprod(film_size) * spp if sampler.wavefront_size() != total_sample_count: sampler.seed(ek.arange(UInt64, total_sample_count)) pos = ek.arange(UInt32, total_sample_count) pos //= spp scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1]) pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0]))) pos += sampler.next_2d() rays, weights = sensor.sample_ray_differential( time=0, sample1=sampler.next_1d(), sample2=pos * scale, sample3=0 ) spec, mask, aovs = scene.integrator().sample(scene, sampler, rays) spec *= weights del mask if is_polarized: from mitsuba.core import depolarize spec = depolarize(spec) if is_monochromatic: rgb = [spec[0]] elif is_rgb: rgb = spec else: from mitsuba.core import spectrum_to_xyz, xyz_to_srgb xyz = spectrum_to_xyz(spec, rays.wavelengths) rgb = xyz_to_srgb(xyz) del xyz aovs.insert(0, Float(1.0)) for i in range(len(rgb)): aovs.insert(i + 1, rgb[i]) del rgb, spec, weights, rays block = ImageBlock( size=film.crop_size(), channel_count=len(aovs), filter=film.reconstruction_filter(), warn_negative=False, warn_invalid=False, border=False ) block.clear() block.put(pos, aovs) del pos del aovs data = block.data() ch = block.channel_count() i = UInt32.arange(ek.hprod(block.size()) * (ch - 1)) weight_idx = i // (ch - 1) * ch values_idx = (i * ch) // (ch - 1) + 1 weight = ek.gather(data, weight_idx) values = ek.gather(data, values_idx) return values / (weight + 1e-8)
scene = load_file(xmlfilename) width, height = scene.sensors()[0].film().crop_size() # Load a reference image (no derivatives used yet) bitmap_ref = Bitmap(os.path.join('outputs', render_config['ref'])).convert( Bitmap.PixelFormat.RGB, Struct.Type.Float32, srgb_gamma=False) image_ref = np.array(bitmap_ref).flatten() # Find differentiable scene parameters params = traverse(scene) print(params) opt_param_name = 'textured_lightsource.emitter.radiance.data' # Make a backup copy param_res = params['textured_lightsource.emitter.radiance.resolution'] param_ref = Float(params[opt_param_name]) # Discord all parameters except for one we want to differentiate params.keep([opt_param_name]) # texture_bitmap = Bitmap('img/checker.jpg').convert(Bitmap.PixelFormat.RGB, Struct.Type.Float32, srgb_gamma=False) # texture_float = Float(texture_bitmap) # params['textured_lightsource.emitter.radiance.data'] = texture_bitmap params['textured_lightsource.emitter.radiance.data'] = ek.full( Float, 0.0, len(param_ref)) opt = Adam(params, lr=render_config['learning_rate']) time_a = time.time() outpath = os.path.join('outputs/invert_infloasion/', outimg_dir)
from mitsuba.core import Float, Thread from mitsuba.core.xml import load_file from mitsuba.python.util import traverse from mitsuba.python.autodiff import render, write_bitmap, Adam import time # Load example scene Thread.thread().file_resolver().append('bunny') scene = load_file('bunny/bunny.xml') # Find differentiable scene parameters params = traverse(scene) # Make a backup copy param_res = params['my_envmap.resolution'] param_ref = Float(params['my_envmap.data']) # Discard all parameters except for one we want to differentiate params.keep(['my_envmap.data']) # Render a reference image (no derivatives used yet) image_ref = render(scene, spp=16) crop_size = scene.sensors()[0].film().crop_size() write_bitmap('out_ref.png', image_ref, crop_size) # Change to a uniform white lighting environment params['my_envmap.data'] = ek.full(Float, 1.0, len(param_ref)) params.update() # Construct an Adam optimizer that will adjust the parameters 'params' opt = Adam(params, lr=.02)
def mis_weight(pdf_a, pdf_b): pdf_a *= pdf_a pdf_b *= pdf_b return ek.select(pdf_a > 0.0, pdf_a / (pdf_a + pdf_b), Float(0.0))
def sample(self, scene, sampler, ray, medium=None, active=True): result = Vector3f(0.0) si = scene.ray_intersect(ray, active) active = si.is_valid() & active # emitter = si.emitter(scene) # result = ek.select(active, Emitter.eval_vec(emitter, si, active), Vector3f(0.0)) # z_axis = np.array([0, 0, 1]) # vertex = si.p.numpy() # v_count = vertex.shape[0] # vertex = np.expand_dims(vertex, axis=1) # vertex = np.repeat(vertex, self.light.vertex_count(), axis=1) # light_vertices = self.light.vertex_positions_buffer().numpy().reshape(self.light.vertex_count(), 3) # light_vertices = np.expand_dims(light_vertices, axis=0) # light_vertices = np.repeat(light_vertices, v_count, axis=0) # sph_polygons = light_vertices - vertex # sph_polygons = sph_polygons / np.linalg.norm(sph_polygons, axis=2, keepdims=True) # z_axis = np.repeat( np.expand_dims(z_axis, axis=0), v_count, axis=0 ) # result_np = np.zeros(v_count, dtype=np.double) # for idx in range( self.light.vertex_count() ): # idx1 = (idx+1) % self.light.vertex_count() # idx2 = (idx) % self.light.vertex_count() # dp = np.sum( sph_polygons[:, idx1, :] * sph_polygons[:, idx2, :], axis=1 ) # acos = np.arccos(dp) # cp = np.cross( sph_polygons[:, idx1, :], sph_polygons[:, idx2, :] ) # cp = cp / np.linalg.norm(cp, axis=1, keepdims=True) # dp = np.sum( cp * z_axis, axis=1 ) # result_np += acos * dp # result_np *= 0.5 * 1.0/math.pi # result_np = np.repeat( result_np.reshape((v_count, 1)), 3, axis=1 ) # fin = self.light_radiance * Vector3f(result_np) # fin[fin < 0] = 0 # result += ek.select(active, fin, Vector3f(0.0)) ctx = BSDFContext() bsdf = si.bsdf(ray) # bs, bsdf_val = BSDF.sample_vec(bsdf, ctx, si, sampler.next_1d(active), sampler.next_2d(active), active) # pdf = bs.pdf # wo = si.to_world(bs.wo) ds, emitter_val = scene.sample_emitter_direction( si, sampler.next_2d(active), True, active) pdf = ds.pdf active_e = active & ek.neq(ds.pdf, 0.0) wo = ds.d bsdf_val = BSDF.eval_vec(bsdf, ctx, si, wo, active_e) emitter_val[emitter_val > 1.0] = 1.0 bsdf_val *= emitter_val # _, wi_theta, wi_phi = sph_convert(si.to_world(si.wi)) _, wo_theta, wo_phi = sph_convert(wo) y_0_0_ = y_0_0(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_1_n1_ = y_1_n1(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_1_0_ = y_1_0(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_1_p1_ = y_1_p1(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_2_n2_ = y_2_n2(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_2_n1_ = y_2_n1(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_2_0_ = y_2_0(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_2_p1_ = y_2_p1(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) y_2_p2_ = y_2_p2(bsdf_val, wo_theta, wo_phi) / ek.select( pdf > 0, pdf, Float(0.01)) return result, si.is_valid(), [ Float(y_0_0_[0]), Float(y_0_0_[1]), Float(y_0_0_[2]),\ Float(y_1_n1_[0]), Float(y_1_n1_[1]), Float(y_1_n1_[2]),\ Float(y_1_0_[0]), Float(y_1_0_[1]), Float(y_1_0_[2]),\ Float(y_1_p1_[0]), Float(y_1_p1_[1]), Float(y_1_p1_[2]),\ Float(y_2_n2_[0]), Float(y_2_n2_[1]), Float(y_2_n2_[2]),\ Float(y_2_n1_[0]), Float(y_2_n1_[1]), Float(y_2_n1_[2]),\ Float(y_2_0_[0]), Float(y_2_0_[1]), Float(y_2_0_[2]),\ Float(y_2_p1_[0]), Float(y_2_p1_[1]), Float(y_2_p1_[2]),\ Float(y_2_p2_[0]), Float(y_2_p2_[1]), Float(y_2_p2_[2]) ]
def postprocess_render(results, weights, blocks, pos, aovs=False, invalid_sample=False): """postprocessing for sampling result""" result = results[0] valid_rays = results[1] result *= weights xyz = Color3f(srgb_to_xyz(result)) aovs_result = [ xyz[0], xyz[1], xyz[2], ek.select(valid_rays, Float(1.0), Float(0.0)), 1.0 ] if not aovs and not invalid_sample: block = blocks else: block = blocks["result"] block.put(pos, aovs_result) if aovs: scatter = results[2] non_scatter = results[3] scatter *= weights non_scatter *= weights xyz_scatter = Color3f(srgb_to_xyz(scatter)) xyz_nonscatter = Color3f(srgb_to_xyz(non_scatter)) aovs_scatter = [ xyz_scatter[0], xyz_scatter[1], xyz_scatter[2], ek.select(valid_rays, Float(1.0), Float(0.0)), 1.0 ] aovs_nonscatter = [ xyz_nonscatter[0], xyz_nonscatter[1], xyz_nonscatter[2], ek.select(valid_rays, Float(1.0), Float(0.0)), 1.0 ] block_scatter = blocks["scatter"] block_nonscatter = blocks["non_scatter"] block_scatter.put(pos, aovs_scatter) block_nonscatter.put(pos, aovs_nonscatter) if invalid_sample: invalid = results[4] invalid *= weights xyz_invalid = Color3f(srgb_to_xyz(invalid)) aovs_invalid = [ xyz_invalid[0], xyz_invalid[1], xyz_invalid[2], ek.select(valid_rays, Float(1.0), Float(0.0)), 1.0 ] block_invalid = blocks["invalid"] block_invalid.put(pos, aovs_invalid)