Exemple #1
0
    def _():
        p4 = [
            1.474502166059939948905062e4 ,
            2.426813369486704502836312e6 ,
            1.214755574045093227939592e8 ,
            2.663432449630976949898078e9 ,
            2.940378956634553899906876e10,
            1.702665737765398868392998e11,
            4.926125793377430887588120e11,
            5.606251856223951465078242e11,
            ]
        q4 = [
            2.690530175870899333379843e3 ,
            6.393885654300092398984238e5 ,
            4.135599930241388052042842e7 ,
            1.120872109616147941376570e9 ,
            1.488613728678813811542398e10,
            1.016803586272438228077304e11,
            3.417476345507377132798597e11,
            4.463158187419713286462081e11,
            ]

        y    = x - 4.0
        xnum = 0.0
        xden = -1.0

        for (p, q) in zip(p4, q4):
            xnum = xnum * y + p
            xden = xden * y + q

        d4 = 1.791759469228055000094023e0

        qy.return_(d4 + y * (xnum / xden))
Exemple #2
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated MAP parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() + weight * float(self._model._estimation_n)).store(total_w)

        alpha = prior.data.gep(0, 0).load()
        beta  = prior.data.gep(0, 1).load()

        numerator   = total_k.load() + alpha - 1.0
        denominator = total_w.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))

        qy.return_()
Exemple #3
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated MAP parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() +
             weight * float(self._model._estimation_n)).store(total_w)

        alpha = prior.data.gep(0, 0).load()
        beta = prior.data.gep(0, 1).load()

        numerator = total_k.load() + alpha - 1.0
        denominator = total_w.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))

        qy.return_()
Exemple #4
0
        def tuple_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
                )

            qy.return_()
Exemple #5
0
        def binomial_ml(samples_data, weights_data, out_data):
            self._ml(
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
            )

            qy.return_()
Exemple #6
0
        def finite_mixture_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
                )

            qy.return_()
Exemple #7
0
        def binomial_ml(samples_data, weights_data, out_data):
            self._ml(
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
                )

            qy.return_()
Exemple #8
0
        def mixed_binomial_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
            )

            qy.return_()
Exemple #9
0
        def tuple_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
            )

            qy.return_()
Exemple #10
0
        def finite_mixture_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
            )

            qy.return_()
Exemple #11
0
        def finite_mixture_given(parameter_data, samples_data, out_data):
            self._given(
                parameter.using(parameter_data),
                samples.using(samples_data),
                out.using(out_data),
            )

            qy.return_()
Exemple #12
0
        def mixed_binomial_ll(parameter_data, sample_data, out_data):
            self._ll(
                parameter.using(parameter_data),
                sample.using(sample_data),
                out_data,
                )

            qy.return_()
Exemple #13
0
        def finite_mixture_given(parameter_data, samples_data, out_data):
            self._given(
                parameter.using(parameter_data),
                samples.using(samples_data),
                out.using(out_data),
                )

            qy.return_()
Exemple #14
0
        def mixed_binomial_ml(prior_data, samples_data, weights_data, out_data):
            self._map(
                prior.using(prior_data),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
                )

            qy.return_()
Exemple #15
0
        def binomial_ll_emitted(parameter_data, sample_data, out_data):
            binomial_log_pdf(
                sample_data.load(),
                parameter_data.gep(0, 0).load(),
                parameter_data.gep(0, 1).load(),
                ) \
                .store(out_data)

            qy.return_()
Exemple #16
0
        def binomial_ll_emitted(parameter_data, sample_data, out_data):
            binomial_log_pdf(
                sample_data.load(),
                parameter_data.gep(0, 0).load(),
                parameter_data.gep(0, 1).load(),
                ) \
                .store(out_data)

            qy.return_()
Exemple #17
0
        def mixed_binomial_ml(prior_data, samples_data, weights_data,
                              out_data):
            self._map(
                prior.using(prior_data),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
            )

            qy.return_()
Exemple #18
0
    def _marginal(self, parameter, out):
        """
        Compute the marginal distribution.
        """

        self._sub_emitter.average(
            parameter.extract(0, 0),
            parameter.extract(0, 1),
            out,
        )

        qy.return_()
Exemple #19
0
    def _marginal(self, parameter, out):
        """
        Compute the marginal distribution.
        """

        self._sub_emitter.average(
            parameter.extract(0, 0),
            parameter.extract(0, 1),
            out,
            )

        qy.return_()
Exemple #20
0
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p1 = [ 
            4.945235359296727046734888e0,
            2.018112620856775083915565e2,
            2.290838373831346393026739e3,
            1.131967205903380828685045e4,
            2.855724635671635335736389e4,
            3.848496228443793359990269e4,
            2.637748787624195437963534e4,
            7.225813979700288197698961e3,
            ]
        q1 = [
            6.748212550303777196073036e1,
            1.113332393857199323513008e3,
            7.738757056935398733233834e3,
            2.763987074403340708898585e4,
            5.499310206226157329794414e4,
            6.161122180066002127833352e4,
            3.635127591501940507276287e4,
            8.785536302431013170870835e3,
            ]

        @qy.if_else(x <= 0.5)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                @qy.if_(x + 1.0 == 1.0)
                def _():
                    qy.return_(result.load())

                x.store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 1.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p1, q1):
            xnum = xnum * y + p
            xden = xden * y + q

        d1 = -5.772156649015328605195174e-1

        qy.return_(result.load() + y * (d1 + y * (xnum / xden)))
Exemple #21
0
        def mixed_binomial_ml(samples_data, weights_data, out_data):
            prior_data = qy.stack_allocate(self._model.prior_dtype)

            qy.value_from_any(1.0).store(prior_data.gep(0, 0))
            qy.value_from_any(1.0).store(prior_data.gep(0, 1))

            self._map(
                StridedArray.from_raw(prior_data, (), ()),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
            )

            qy.return_()
Exemple #22
0
        def mixed_binomial_ml(samples_data, weights_data, out_data):
            prior_data = qy.stack_allocate(self._model.prior_dtype)

            qy.value_from_any(1.0).store(prior_data.gep(0, 0))
            qy.value_from_any(1.0).store(prior_data.gep(0, 1))

            self._map(
                StridedArray.from_raw(prior_data, (), ()),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
                )

            qy.return_()
Exemple #23
0
    def _average(self, weights, parameters, out):
        """
        Emit computation of the average parameter.
        """

        qy.value_from_any(0.0).store(out.data)

        @qy.for_(parameters.shape[0])
        def _(n):
            weight    = weights.at(n).data.load()
            parameter = parameters.at(n).data.load()

            (out.data.load() + weight * parameter).store(out.data)

        qy.return_()
Exemple #24
0
    def _average(self, weights, parameters, out):
        """
        Emit computation of the average parameter.
        """

        qy.value_from_any(0.0).store(out.data)

        @qy.for_(parameters.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            parameter = parameters.at(n).data.load()

            (out.data.load() + weight * parameter).store(out.data)

        qy.return_()
Exemple #25
0
        def binomial_log_pdf_ddd(k, p, n):
            from qy.math import ln_choose

            @qy.if_(k > n)
            def _():
                qy.return_(-numpy.inf)

            @qy.if_(p == 0.0)
            def _():
                qy.return_(qy.select(k == 0.0, 0.0, -numpy.inf))

            @qy.if_(p == 1.0)
            def _():
                qy.return_(qy.select(k == n, 0.0, -numpy.inf))

            qy.return_(ln_choose(n, k) + k * qy.log(p) + (n - k) * qy.log1p(-p))
Exemple #26
0
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p2 = [
            4.974607845568932035012064e0,
            5.424138599891070494101986e2,
            1.550693864978364947665077e4,
            1.847932904445632425417223e5,
            1.088204769468828767498470e6,
            3.338152967987029735917223e6,
            5.106661678927352456275255e6,
            3.074109054850539556250927e6,
            ]
        q2 = [
            1.830328399370592604055942e2,
            7.765049321445005871323047e3,
            1.331903827966074194402448e5,
            1.136705821321969608938755e6,
            5.267964117437946917577538e6,
            1.346701454311101692290052e7,
            1.782736530353274213975932e7,
            9.533095591844353613395747e6,
            ]

        @qy.if_else(x <= a)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                (x - 1.0).store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 2.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p2, q2):
            xnum = xnum * y + p
            xden = xden * y + q

        d2 = 4.227843350984671393993777e-1

        qy.return_(result.load() + y * (d2 + y * (xnum / xden)))
Exemple #27
0
        def invoke_python(*inner_arguments):
            from qy import constant_pointer_to

            call_object = \
                Function.named(
                    "PyObject_CallObject",
                    object_ptr_type,
                    [object_ptr_type, object_ptr_type],
                    )

            argument_tuple = qy.py_tuple(*inner_arguments[1:])
            call_result    = call_object(inner_arguments[0], argument_tuple)

            qy.py_dec_ref(argument_tuple)
            qy.py_check_null(call_result)
            qy.py_dec_ref(call_result)
            qy.return_()
Exemple #28
0
        def binomial_log_pdf_ddd(k, p, n):
            from qy.math import ln_choose

            @qy.if_(k > n)
            def _():
                qy.return_(-numpy.inf)

            @qy.if_(p == 0.0)
            def _():
                qy.return_(qy.select(k == 0.0, 0.0, -numpy.inf))

            @qy.if_(p == 1.0)
            def _():
                qy.return_(qy.select(k == n, 0.0, -numpy.inf))

            qy.return_(
                ln_choose(n, k) + k * qy.log(p) + (n - k) * qy.log1p(-p))
Exemple #29
0
        def _(then):
            if then:
                qy.return_(0.0)
            else:
                k = qy.Variable(float)

                @qy.if_else(m * 2.0 > n)
                def _(then):
                    if then:
                        k.set(n - m)
                    else:
                        k.set(m)

                result =                         \
                      ln_factorial(n)            \
                    - ln_factorial(k.value)      \
                    - ln_factorial(n - k.value)

                qy.return_(result)
Exemple #30
0
 def _():
     qy.return_(-numpy.inf)
Exemple #31
0
 def _():
     qy.return_(result.load())
Exemple #32
0
    def _map(self, prior, samples, weights, out, initializations):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # generate some initial parameters
        self._map_initialize(prior, samples, weights, out, initializations)

        # run EM until convergence
        total        = qy.stack_allocate(float)
        component_ll = qy.stack_allocate(float)

        this_r_KN = StridedArray.heap_allocated(float, (K, N))
        last_r_KN = StridedArray.heap_allocated(float, (K, N))

        this_r_KN_data = Variable.set_to(this_r_KN.data)
        last_r_KN_data = Variable.set_to(last_r_KN.data)

        @qy.for_(self._model._iterations)
        def _(i):
            # compute responsibilities
            r_KN = this_r_KN.using(this_r_KN_data.value)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    responsibility = r_KN.at(k, n).data

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        responsibility,
                        )

                    log_add_double(total.load(), responsibility.load()).store(total)

                total_value = total.load()

                @qy.if_else(total_value == -numpy.inf)
                def _(then):
                    if then:
                        @qy.for_(K)
                        def _(k):
                            qy.value_from_any(1.0 / K).store(r_KN.at(k, n).data)
                    else:
                        @qy.for_(K)
                        def _(k):
                            responsibility = r_KN.at(k, n).data

                            qy.exp(responsibility.load() - total_value).store(responsibility)

            # estimate new mixture and component parameters
            @qy.for_(K)
            def _(k):
                component = out.at(k).data

                self._sub_emitter.map(
                    prior.at(k),
                    samples,
                    r_KN.at(k),
                    StridedArray.from_typed_pointer(component.gep(0, 1)),
                    )

                qy.value_from_any(0.0).store(total)

                @qy.for_(N)
                def _(n):
                    (total.load() + r_KN.at(k, n).data.load()).store(total)

                (total.load() / float(N)).store(component.gep(0, 0))

            # check for termination
            last_r_KN = this_r_KN.using(last_r_KN_data.value)

            @qy.if_(i > 0)
            def _():
                qy.value_from_any(0.0).store(total)

                @qy.for_(K)
                def _(k):
                    @qy.for_(N)
                    def _(n):
                        delta = r_KN.at(k, n).data.load() - last_r_KN.at(k, n).data.load()

                        (total.load() + abs(delta)).store(total)

                @qy.if_(total.load() < 1e-12)
                def _():
                    qy.break_()

            total_delta = total.load()

            # swap the responsibility matrices
            temp_r_KN_data_value = this_r_KN_data.value

            this_r_KN_data.set(last_r_KN_data.value)
            last_r_KN_data.set(temp_r_KN_data_value)

            # compute the ll at this step
            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                total_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        component_ll,
                        )

                    log_add_double(
                        total.load(),
                        qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
                        ) \
                        .store(total)

                (total_ll + total.load()).store(total)

            total_ll = total.load()

            # be informative
            qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta, total_ll)

        # clean up
        qy.heap_free(this_r_KN.data)
        qy.heap_free(last_r_KN.data)

        qy.return_()
Exemple #33
0
def _ln_gamma(x):
    """
    Emit the log-gamma computation.

    This implementation is adapted from the same Cody & Stoltz netlib code on
    which everyone bases their implementation.
    """

    import math

    a = qy.value_from_any(0.6796875)

    @qy.if_((x <= 0.5) | ((x > a) & (x <= 1.5)))
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p1 = [ 
            4.945235359296727046734888e0,
            2.018112620856775083915565e2,
            2.290838373831346393026739e3,
            1.131967205903380828685045e4,
            2.855724635671635335736389e4,
            3.848496228443793359990269e4,
            2.637748787624195437963534e4,
            7.225813979700288197698961e3,
            ]
        q1 = [
            6.748212550303777196073036e1,
            1.113332393857199323513008e3,
            7.738757056935398733233834e3,
            2.763987074403340708898585e4,
            5.499310206226157329794414e4,
            6.161122180066002127833352e4,
            3.635127591501940507276287e4,
            8.785536302431013170870835e3,
            ]

        @qy.if_else(x <= 0.5)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                @qy.if_(x + 1.0 == 1.0)
                def _():
                    qy.return_(result.load())

                x.store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 1.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p1, q1):
            xnum = xnum * y + p
            xden = xden * y + q

        d1 = -5.772156649015328605195174e-1

        qy.return_(result.load() + y * (d1 + y * (xnum / xden)))

    @qy.if_((x <= a) | ((x > 1.5) & (x <= 4)))
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p2 = [
            4.974607845568932035012064e0,
            5.424138599891070494101986e2,
            1.550693864978364947665077e4,
            1.847932904445632425417223e5,
            1.088204769468828767498470e6,
            3.338152967987029735917223e6,
            5.106661678927352456275255e6,
            3.074109054850539556250927e6,
            ]
        q2 = [
            1.830328399370592604055942e2,
            7.765049321445005871323047e3,
            1.331903827966074194402448e5,
            1.136705821321969608938755e6,
            5.267964117437946917577538e6,
            1.346701454311101692290052e7,
            1.782736530353274213975932e7,
            9.533095591844353613395747e6,
            ]

        @qy.if_else(x <= a)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                (x - 1.0).store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 2.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p2, q2):
            xnum = xnum * y + p
            xden = xden * y + q

        d2 = 4.227843350984671393993777e-1

        qy.return_(result.load() + y * (d2 + y * (xnum / xden)))

    @qy.if_(x <= 12)
    def _():
        p4 = [
            1.474502166059939948905062e4 ,
            2.426813369486704502836312e6 ,
            1.214755574045093227939592e8 ,
            2.663432449630976949898078e9 ,
            2.940378956634553899906876e10,
            1.702665737765398868392998e11,
            4.926125793377430887588120e11,
            5.606251856223951465078242e11,
            ]
        q4 = [
            2.690530175870899333379843e3 ,
            6.393885654300092398984238e5 ,
            4.135599930241388052042842e7 ,
            1.120872109616147941376570e9 ,
            1.488613728678813811542398e10,
            1.016803586272438228077304e11,
            3.417476345507377132798597e11,
            4.463158187419713286462081e11,
            ]

        y    = x - 4.0
        xnum = 0.0
        xden = -1.0

        for (p, q) in zip(p4, q4):
            xnum = xnum * y + p
            xden = xden * y + q

        d4 = 1.791759469228055000094023e0

        qy.return_(d4 + y * (xnum / xden))

    # else
    cc = [
        -1.910444077728e-03           ,
        8.4171387781295e-04           ,
        -5.952379913043012e-04        ,
        7.93650793500350248e-04       ,
        -2.777777777777681622553e-03  ,
        8.333333333333333331554247e-02,
        ]

    y    = qy.log(x)
    r    = x * (y - 1.0) - y * 0.5 + 0.9189385332046727417803297
    s    = 1.0 / x
    z    = s * s
    xnum = 5.7083835261e-03

    for c in cc:
        xnum = xnum * z + c

    qy.return_(r + xnum * s)
Exemple #34
0
 def _():
     qy.return_(qy.select(k == n, 0.0, -numpy.inf))
Exemple #35
0
 def _(then):
     if then:
         qy.return_(-numpy.inf)
     else:
         qy.return_(a +
                    qy.log1p(qy.exp(qy.select(s, y_in, x_in) - a)))
Exemple #36
0
    def _map(self, prior, samples, weights, out, initializations):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # generate some initial parameters
        self._map_initialize(prior, samples, weights, out, initializations)

        # run EM until convergence
        total = qy.stack_allocate(float)
        component_ll = qy.stack_allocate(float)

        this_r_KN = StridedArray.heap_allocated(float, (K, N))
        last_r_KN = StridedArray.heap_allocated(float, (K, N))

        this_r_KN_data = Variable.set_to(this_r_KN.data)
        last_r_KN_data = Variable.set_to(last_r_KN.data)

        @qy.for_(self._model._iterations)
        def _(i):
            # compute responsibilities
            r_KN = this_r_KN.using(this_r_KN_data.value)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    responsibility = r_KN.at(k, n).data

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(
                            out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        responsibility,
                    )

                    log_add_double(total.load(),
                                   responsibility.load()).store(total)

                total_value = total.load()

                @qy.if_else(total_value == -numpy.inf)
                def _(then):
                    if then:

                        @qy.for_(K)
                        def _(k):
                            qy.value_from_any(1.0 / K).store(
                                r_KN.at(k, n).data)
                    else:

                        @qy.for_(K)
                        def _(k):
                            responsibility = r_KN.at(k, n).data

                            qy.exp(responsibility.load() -
                                   total_value).store(responsibility)

            # estimate new mixture and component parameters
            @qy.for_(K)
            def _(k):
                component = out.at(k).data

                self._sub_emitter.map(
                    prior.at(k),
                    samples,
                    r_KN.at(k),
                    StridedArray.from_typed_pointer(component.gep(0, 1)),
                )

                qy.value_from_any(0.0).store(total)

                @qy.for_(N)
                def _(n):
                    (total.load() + r_KN.at(k, n).data.load()).store(total)

                (total.load() / float(N)).store(component.gep(0, 0))

            # check for termination
            last_r_KN = this_r_KN.using(last_r_KN_data.value)

            @qy.if_(i > 0)
            def _():
                qy.value_from_any(0.0).store(total)

                @qy.for_(K)
                def _(k):
                    @qy.for_(N)
                    def _(n):
                        delta = r_KN.at(k, n).data.load() - last_r_KN.at(
                            k, n).data.load()

                        (total.load() + abs(delta)).store(total)

                @qy.if_(total.load() < 1e-12)
                def _():
                    qy.break_()

            total_delta = total.load()

            # swap the responsibility matrices
            temp_r_KN_data_value = this_r_KN_data.value

            this_r_KN_data.set(last_r_KN_data.value)
            last_r_KN_data.set(temp_r_KN_data_value)

            # compute the ll at this step
            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                total_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(
                            out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        component_ll,
                    )

                    log_add_double(
                        total.load(),
                        qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
                        ) \
                        .store(total)

                (total_ll + total.load()).store(total)

            total_ll = total.load()

            # be informative
            qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta,
                         total_ll)

        # clean up
        qy.heap_free(this_r_KN.data)
        qy.heap_free(last_r_KN.data)

        qy.return_()
Exemple #37
0
 def _():
     qy.return_(-numpy.inf)
Exemple #38
0
 def _(then):
     if then:
         qy.return_(-numpy.inf)
     else:
         qy.return_(a + qy.log1p(qy.exp(qy.select(s, y_in, x_in) - a)))
Exemple #39
0
 def _():
     qy.return_(qy.select(k == n, 0.0, -numpy.inf))