def index_spectrum(spec, idx): m = spec[0] m[ek.eq(idx, 1)] = spec[1] m[ek.eq(idx, 2)] = spec[2] return m
def collatz(value: p.Int): counter = p.Int(0) loop = p.Loop(value, counter) while (loop.cond(ek.neq(value, 1))): is_even = ek.eq(value & 1, 0) value.assign(ek.select(is_even, value // 2, 3 * value + 1)) counter += 1 return counter
def sample_functor(sample, *args): n = ek.slices(sample) plugin = instantiate(args) mi, ctx = make_context(n) wo, pdf = plugin.sample(ctx, mi, [sample[0], sample[1]]) w = Float.full(1.0, ek.slices(pdf)) w[ek.eq(pdf, 0)] = 0 return wo, w
def test05_scalar(t): if not ek.is_array_v(t) or ek.array_size_v(t) == 0: return get_class(t.__module__) if ek.is_mask_v(t): assert ek.all_nested(t(True)) assert ek.any_nested(t(True)) assert ek.none_nested(t(False)) assert ek.all_nested(t(False) ^ t(True)) assert ek.all_nested(ek.eq(t(False), t(False))) assert ek.none_nested(ek.eq(t(True), t(False))) if ek.is_arithmetic_v(t): assert t(1) + t(1) == t(2) assert t(3) - t(1) == t(2) assert t(2) * t(2) == t(4) assert ek.min(t(2), t(3)) == t(2) assert ek.max(t(2), t(3)) == t(3) if ek.is_signed_v(t): assert t(2) * t(-2) == t(-4) assert ek.abs(t(-2)) == t(2) if ek.is_integral_v(t): assert t(6) // t(2) == t(3) assert t(7) % t(2) == t(1) assert t(7) >> 1 == t(3) assert t(7) << 1 == t(14) assert t(1) | t(2) == t(3) assert t(1) ^ t(3) == t(2) assert t(1) & t(3) == t(1) else: assert t(6) / t(2) == t(3) assert ek.sqrt(t(4)) == t(2) assert ek.fmadd(t(1), t(2), t(3)) == t(5) assert ek.fmsub(t(1), t(2), t(3)) == t(-1) assert ek.fnmadd(t(1), t(2), t(3)) == t(1) assert ek.fnmsub(t(1), t(2), t(3)) == t(-5) assert (t(1) & True) == t(1) assert (t(1) & False) == t(0) assert (t(1) | False) == t(1) assert ek.all_nested(t(3) > t(2)) assert ek.all_nested(ek.eq(t(2), t(2))) assert ek.all_nested(ek.neq(t(3), t(2))) assert ek.all_nested(t(1) >= t(1)) assert ek.all_nested(t(2) < t(3)) assert ek.all_nested(t(1) <= t(1)) assert ek.select(ek.eq(t(2), t(2)), t(4), t(5)) == t(4) assert ek.select(ek.eq(t(3), t(2)), t(4), t(5)) == t(5) t2 = t(2) assert ek.hsum(t2) == t.Value(2 * len(t2)) assert ek.dot(t2, t2) == t.Value(4 * len(t2)) assert ek.dot_async(t2, t2) == t(4 * len(t2)) value = t(1) value[ek.eq(value, t(1))] = t(2) value[ek.eq(value, t(3))] = t(5) assert value == t(2)
def sample_functor(sample, *args): n = ek.slices(sample) plugin = instantiate(args) (si, ctx) = make_context(n) bs, weight = plugin.sample(ctx, si, sample[0], [sample[1], sample[2]]) w = Float.full(1.0, ek.slices(weight)) w[ek.all(ek.eq(weight, 0))] = 0 return bs.wo, w
def sincos(x): Float = type(x) Int = ek.int_array_t(Float) xa = ek.abs(x) j = Int(xa * 1.2732395447351626862) j = (j + Int(1)) & ~Int(1) y = Float(j) Shift = Float.Type.Size * 8 - 3 sign_sin = ek.reinterpret_array(Float, j << Shift) ^ x sign_cos = ek.reinterpret_array(Float, (~(j - Int(2)) << Shift)) y = xa - y * 0.78515625 \ - y * 2.4187564849853515625e-4 \ - y * 3.77489497744594108e-8 z = y * y z |= ek.eq(xa, ek.Infinity) s = poly2(z, -1.6666654611e-1, 8.3321608736e-3, -1.9515295891e-4) * z c = poly2(z, 4.166664568298827e-2, -1.388731625493765e-3, 2.443315711809948e-5) * z s = ek.fmadd(s, y, y) c = ek.fmadd(c, z, ek.fmadd(z, -0.5, 1)) polymask = ek.eq(j & Int(2), ek.zero(Int)) return ( ek.mulsign(ek.select(polymask, s, c), sign_sin), ek.mulsign(ek.select(polymask, c, s), sign_cos) )
def test12_binary_search(cname): t = get_class(cname) import numpy as np data_np = np.float32(np.sort(np.random.normal(size=10000))) search_np = np.float32(np.random.normal(size=10000)) data = t(data_np) search = t(search_np) index = ek.binary_search(0, len(data) - 1, lambda index: ek.gather(t, data, index) < search) value = ek.gather(t, data, index) cond = ek.eq(index, len(data) - 1) | (value >= search) assert ek.all(cond)
def test06_discr_bruteforce(variant_packet_rgb): # Brute force validation of discrete distribution sampling from mitsuba.core import DiscreteDistribution, Float, PCG32, UInt64 rng = PCG32(initseq=UInt64.arange(50)) for size in range(2, 20): for i in range(2, 50): density = Float(rng.next_uint32_bounded(i)[0:size]) if ek.hsum(density) == 0: continue ddistr = DiscreteDistribution(density) x = ek.linspace(Float, 0, 1, 20) y = ddistr.sample(x) z = ek.gather(ddistr.cdf(), y - 1, y > 0) x *= ddistr.sum() # Did we sample the right interval? assert ek.all((x > z) | (ek.eq(x, 0) & (x >= z)))
def sqrt_(a0): if not a0.IsFloat: raise Exception("sqrt(): requires floating point operands!") ar, sr = _check1(a0) if not a0.IsSpecial: for i in range(sr): ar[i] = _ek.sqrt(a0[i]) elif a0.IsComplex: n = abs(a0) m = a0.real >= 0 zero = _ek.eq(n, 0) t1 = _ek.sqrt(.5 * (n + abs(a0.real))) t2 = .5 * a0.imag / t1 im = _ek.select(m, t2, _ek.copysign(t1, a0.imag)) ar.real = _ek.select(m, t1, abs(t2)) ar.imag = _ek.select(zero, 0, im) elif a0.IsQuaternion: ri = _ek.norm(a0.imag) cs = _ek.sqrt(a0.Complex(a0.real, ri)) ar.imag = a0.imag * (_ek.rcp(ri) * cs.imag) ar.real = cs.real else: raise Exception("sqrt(): unsupported array type!") return ar
def eq_(a0, a1): ar, sr = _check2_mask(a0, a1) for i in range(sr): ar[i] = _ek.eq(a0[i], a1[i]) return ar
def run(self, significance_level=0.01, test_count=1, quiet=False): """ Run the Chi^2 test Parameter ``significance_level`` (float): Denotes the desired significance level (e.g. 0.01 for a test at the 1% significance level) Parameter ``test_count`` (int): Specifies the total number of statistical tests run by the user. This value will be used to adjust the provided significance level so that the combination of the entire set of tests has the provided significance level. Returns → bool: ``True`` upon success, ``False`` if the null hypothesis was rejected. """ from mitsuba.core import UInt32, Float64 from mitsuba.core.math import chi2 from mitsuba.python.math import rlgamma if self.histogram is None: self.tabulate_histogram() if self.pdf is None: self.tabulate_pdf() index = UInt32( [i[0] for i in sorted(enumerate(self.pdf), key=lambda x: x[1])]) # Sort entries by expected frequency (increasing) pdf = Float64(ek.gather(self.pdf, index)) histogram = Float64(ek.gather(self.histogram, index)) # Compute chi^2 statistic and pool low-valued cells chi2val, dof, pooled_in, pooled_out = \ chi2(histogram, pdf, 5) if dof < 1: self._log('Failure: The number of degrees of freedom is too low!') self.fail = True if ek.any(ek.eq(pdf, 0) & ek.neq(histogram, 0)): self._log('Failure: Found samples in a cell with expected ' 'frequency 0. Rejecting the null hypothesis!') self.fail = True if pooled_in > 0: self._log('Pooled %i low-valued cells into %i cells to ' 'ensure sufficiently high expected cell frequencies' % (pooled_in, pooled_out)) pdf_time = (self.pdf_end - self.pdf_start) * 1000 histogram_time = (self.histogram_end - self.histogram_start) * 1000 self._log('Histogram sum = %f (%.2f ms), PDF sum = %f (%.2f ms)' % (self.histogram_sum, histogram_time, self.pdf_sum, pdf_time)) self._log('Chi^2 statistic = %f (d.o.f = %i)' % (chi2val, dof)) # Probability of observing a test statistic at least as # extreme as the one here assuming that the distributions match self.p_value = 1 - rlgamma(dof / 2, chi2val / 2) # Apply the Šidák correction term, since we'll be conducting multiple # independent hypothesis tests. This accounts for the fact that the # probability of a failure increases quickly when several hypothesis # tests are run in sequence. significance_level = 1.0 - \ (1.0 - significance_level) ** (1.0 / test_count) if self.fail: self._log('Not running the test for reasons listed above. Target ' 'density and histogram were written to "chi2_data.py') result = False elif self.p_value < significance_level \ or not ek.isfinite(self.p_value): self._log('***** Rejected ***** the null hypothesis (p-value = %f,' ' significance level = %f). Target density and histogram' ' were written to "chi2_data.py".' % (self.p_value, significance_level)) result = False else: self._log('Accepted the null hypothesis (p-value = %f, ' 'significance level = %f)' % (self.p_value, significance_level)) result = True if not quiet: print(self.messages) if not result: self._dump_tables() return result