def _map(self, prior, samples, weights, out): """ Emit computation of the estimated MAP parameter. """ total_k = qy.stack_allocate(float, 0.0) total_w = qy.stack_allocate(float, 0.0) @qy.for_(samples.shape[0]) def _(n): weight = weights.at(n).data.load() sample = samples.at(n).data.load().cast_to(float) (total_k.load() + sample * weight).store(total_k) (total_w.load() + weight * float(self._model._estimation_n)).store(total_w) alpha = prior.data.gep(0, 0).load() beta = prior.data.gep(0, 1).load() numerator = total_k.load() + alpha - 1.0 denominator = total_w.load() + alpha + beta - 2.0 (numerator / denominator).store(out.data.gep(0, 0)) qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1)) qy.return_()
def _(n): sample = samples.at(n) qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): responsibility = r_KN.at(k, n).data self._sub_emitter.ll( StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)), StridedArray.from_typed_pointer(sample.data), responsibility, ) log_add_double(total.load(), responsibility.load()).store(total) total_value = total.load() @qy.if_else(total_value == -numpy.inf) def _(then): if then: @qy.for_(K) def _(k): qy.value_from_any(1.0 / K).store(r_KN.at(k, n).data) else: @qy.for_(K) def _(k): responsibility = r_KN.at(k, n).data qy.exp(responsibility.load() - total_value).store(responsibility)
def _(): a = qy.value_from_any(-0.000124992188151).is_nan b = qy.value_from_any(numpy.nan).is_nan @qy.python(a, b) def _(a_py, b_py): assert_false(a_py) assert_true(b_py)
def _(): x = qy.value_from_any(3) y = qy.value_from_any(-5) @qy.python(-x, -y) def _(a_py, b_py): assert_equal(a_py, -3) assert_equal(b_py, 5)
def set(self, value): """ Change the value of the variable. """ qy.value_from_any(value).store(self._location) return self
def _(then): if then: (-qy.log(x)).store(result) (x - 1.0).store(input_) else: qy.value_from_any(0.0).store(result) (x - 2.0).store(input_)
def _(): x = qy.value_from_any(3) y = qy.value_from_any(5) z = qy.value_from_any(-2) @qy.python(x % y, y % z, z % y) def _(a_py, b_py, c_py): assert_equal(a_py, 3) assert_equal(b_py, 2) assert_equal(c_py, -2)
def binomial_ml(samples_data, weights_data, out_data): prior_data = qy.stack_allocate(self._model.prior_dtype) qy.value_from_any(1.0).store(prior_data.gep(0, 0)) qy.value_from_any(1.0).store(prior_data.gep(0, 1)) self._map( StridedArray.from_raw(prior_data, (), ()), samples.using(samples_data), weights.using(weights_data), out.using(out_data), )
def _(then): if then: (-qy.log(x)).store(result) @qy.if_(x + 1.0 == 1.0) def _(): qy.return_(result.load()) x.store(input_) else: qy.value_from_any(0.0).store(result) (x - 1.0).store(input_)
def _(i): @qy.for_(K) def _(k): # randomly assign the component j = qy.random_int(N) component = StridedArray.from_typed_pointer( out.at(k).data.gep(0, 1)) j.store(assigns.at(k).data) self._sub_emitter.map( prior.at(k), samples.at(j).envelop(), weights.at(j).envelop(), component, ) # compute our total likelihood qy.value_from_any(0.0).store(total) @qy.for_(N) def _(n): sample = samples.at(n) mixture_ll = total.load() qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): component_ll = total.load() self._sub_emitter.ll( StridedArray.from_typed_pointer( out.at(k).data.gep(0, 1)), sample, total, ) log_add_double(component_ll, total.load()).store(total) (mixture_ll + total.load()).store(total) # best observed so far? @qy.if_(total.load() >= best_ll.load()) def _(): total.load().store(best_ll) @qy.for_(K) def _(k): assigns.at(k).data.load().store(best_assigns.at(k).data)
def _(): qy.value_from_any(0.0).store(total) @qy.for_(K) def _(k): @qy.for_(N) def _(n): delta = r_KN.at(k, n).data.load() - last_r_KN.at(k, n).data.load() (total.load() + abs(delta)).store(total) @qy.if_(total.load() < 1e-12) def _(): qy.break_()
def _(i): @qy.for_(K) def _(k): # randomly assign the component j = qy.random_int(N) component = StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)) j.store(assigns.at(k).data) self._sub_emitter.map( prior.at(k), samples.at(j).envelop(), weights.at(j).envelop(), component, ) # compute our total likelihood qy.value_from_any(0.0).store(total) @qy.for_(N) def _(n): sample = samples.at(n) mixture_ll = total.load() qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): component_ll = total.load() self._sub_emitter.ll( StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)), sample, total, ) log_add_double(component_ll, total.load()).store(total) (mixture_ll + total.load()).store(total) # best observed so far? @qy.if_(total.load() >= best_ll.load()) def _(): total.load().store(best_ll) @qy.for_(K) def _(k): assigns.at(k).data.load().store(best_assigns.at(k).data)
def _(): qy.value_from_any(0.0).store(total) @qy.for_(K) def _(k): @qy.for_(N) def _(n): delta = r_KN.at(k, n).data.load() - last_r_KN.at( k, n).data.load() (total.load() + abs(delta)).store(total) @qy.if_(total.load() < 1e-12) def _(): qy.break_()
def _average(self, weights, parameters, out): """ Emit computation of the average parameter. """ qy.value_from_any(0.0).store(out.data) @qy.for_(parameters.shape[0]) def _(n): weight = weights.at(n).data.load() parameter = parameters.at(n).data.load() (out.data.load() + weight * parameter).store(out.data) qy.return_()
def __or__(self, other): """ Return the result of a bitwise or. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.or_(self._value, other._value))
def __div__(self, other): """ Return the result of a division. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.sdiv(self._value, other._value))
def __mul__(self, other): """ Return the result of a multiplication. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.mul(self._value, other._value))
def __sub__(self, other): """ Return the result of a subtraction. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.sub(self._value, other._value))
def _(k): component = out.at(k).data self._sub_emitter.map( prior.at(k), samples, r_KN.at(k), StridedArray.from_typed_pointer(component.gep(0, 1)), ) qy.value_from_any(0.0).store(total) @qy.for_(N) def _(n): (total.load() + r_KN.at(k, n).data.load()).store(total) (total.load() / float(N)).store(component.gep(0, 0))
def __add__(self, other): """ Return the result of an addition. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.add(self._value, other._value))
def __rdivmod__(self, other): """ Apply the "divmod" operator. """ other = qy.value_from_any(other).cast_to(self.type_) return divmod(other, self)
def __ror__(self, other): """ Apply the "|" operator. """ other = qy.value_from_any(other).cast_to(self.type_) return other | self
def set_to(value): """ Return a new variable, initialized. """ value = qy.value_from_any(value) return Variable(value.type_).set(value)
def _ll(self, parameter, sample, out): """ Compute log likelihood under this distribution. """ qy.value_from_any(0.0).store(out) for (i, (_, count)) in enumerate(self._model._distributions): @qy.for_(count) def _(j): previous_total = out.load() self._emitters[i].ll( StridedArray.from_typed_pointer(parameter.data.gep(0, i, j)), StridedArray.from_typed_pointer(sample.data.gep(0, i, j) ), out, ) (out.load() + previous_total).store(out)
def __mod__(self, other): """ Return the remainder of a division. Note that this operation performs C-style, not Python-style, modulo. """ other = qy.value_from_any(other).cast_to(self.type_) return IntegerValue(qy.get().builder.srem(self._value, other._value))
def _(n): sample = samples.at(n) mixture_ll = total.load() qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): component_ll = total.load() self._sub_emitter.ll( StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)), sample, total, ) log_add_double(component_ll, total.load()).store(total) (mixture_ll + total.load()).store(total)
def _ll(self, parameter, sample, out): """ Compute log likelihood under this distribution. """ qy.value_from_any(0.0).store(out) for (i, (_, count)) in enumerate(self._model._distributions): @qy.for_(count) def _(j): previous_total = out.load() self._emitters[i].ll( StridedArray.from_typed_pointer(parameter.data.gep( 0, i, j)), StridedArray.from_typed_pointer(sample.data.gep(0, i, j)), out, ) (out.load() + previous_total).store(out)
def __add__(self, other): """ Return the result of an addition. """ other = qy.value_from_any(other).cast_to(self.type_) value = RealValue(qy.get().builder.fadd(self._value, other._value)) if qy.get().test_for_nan: qy.assert_(~value.is_nan, "result of %s + %s is not a number", other, self) return value
def _(n): sample = samples.at(n) mixture_ll = total.load() qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): component_ll = total.load() self._sub_emitter.ll( StridedArray.from_typed_pointer( out.at(k).data.gep(0, 1)), sample, total, ) log_add_double(component_ll, total.load()).store(total) (mixture_ll + total.load()).store(total)
def __ge__(self, other): """ Return the result of a greater-than-or-equal comparison. """ return \ qy.Value.from_low( qy.get().builder.fcmp( llvm.FCMP_OGE, self._value, qy.value_from_any(other).cast_to(self.type_)._value, ), )
def __lt__(self, other): """ Return the result of a less-than comparison. """ return \ qy.Value.from_low( qy.get().builder.fcmp( llvm.FCMP_OLT, self._value, qy.value_from_any(other).cast_to(self.type_)._value, ), )
def __eq__(self, other): """ Return the result of an equality comparison. """ return \ qy.Value.from_low( qy.get().builder.icmp( llvm.ICMP_EQ, self._value, qy.value_from_any(other)._value, ), )
def __gt__(self, other): """ Return the result of a greater-than comparison. """ return \ qy.Value.from_low( qy.get().builder.icmp( llvm.ICMP_SGT, self._value, qy.value_from_any(other).cast_to(self.type_)._value, ), )
def __le__(self, other): """ Return the result of a less-than-or-equal comparison. """ return \ qy.Value.from_low( qy.get().builder.icmp( llvm.ICMP_SLE, self._value, qy.value_from_any(other).cast_to(self.type_)._value, ), )
def _ml(self, samples, weights, out): """ Emit computation of the estimated maximum-likelihood parameter. """ total_k = qy.stack_allocate(float, 0.0) total_w = qy.stack_allocate(float, 0.0) @qy.for_(samples.shape[0]) def _(n): weight = weights.at(n).data.load() sample = samples.at(n).data.load().cast_to(float) (total_k.load() + sample * weight).store(total_k) (total_w.load() + weight * float(self._model._estimation_n)).store(total_w) final_ratio = \ (total_k.load() + self._model._epsilon) \ / (total_w.load() + self._model._epsilon) final_ratio.store(out.data.gep(0, 0)) qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))
def _(n): sample = samples.at(n) total_ll = total.load() qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): self._sub_emitter.ll( StridedArray.from_typed_pointer( out.at(k).data.gep(0, 1)), StridedArray.from_typed_pointer(sample.data), component_ll, ) log_add_double( total.load(), qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(), ) \ .store(total) (total_ll + total.load()).store(total)
def _(n): sample = samples.at(n) qy.value_from_any(-numpy.inf).store(total) @qy.for_(K) def _(k): responsibility = r_KN.at(k, n).data self._sub_emitter.ll( StridedArray.from_typed_pointer( out.at(k).data.gep(0, 1)), StridedArray.from_typed_pointer(sample.data), responsibility, ) log_add_double(total.load(), responsibility.load()).store(total) total_value = total.load() @qy.if_else(total_value == -numpy.inf) def _(then): if then: @qy.for_(K) def _(k): qy.value_from_any(1.0 / K).store( r_KN.at(k, n).data) else: @qy.for_(K) def _(k): responsibility = r_KN.at(k, n).data qy.exp(responsibility.load() - total_value).store(responsibility)
def _(k): qy.value_from_any(1.0 / K).store( r_KN.at(k, n).data)