Esempio n. 1
0
    def _ll(self, parameter, sample, out):
        """
        Compute finite-mixture log-likelihood.
        """

        total = qy.stack_allocate(float, -numpy.inf, "total")
        component_ll = qy.stack_allocate(float)

        @qy.for_(self._model._K)
        def _(index):
            component = parameter.at(index)

            self._sub_emitter.ll(
                StridedArray.from_typed_pointer(component.data.gep(0, 1)),
                sample,
                component_ll,
            )

            log_add_double(
                total.load(),
                qy.log(component.data.gep(0, 0).load()) + component_ll.load(),
                ) \
                .store(total)

        total.load().store(out)
Esempio n. 2
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated MAP parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() +
             weight * float(self._model._estimation_n)).store(total_w)

        alpha = prior.data.gep(0, 0).load()
        beta = prior.data.gep(0, 1).load()

        numerator = total_k.load() + alpha - 1.0
        denominator = total_w.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))

        qy.return_()
Esempio n. 3
0
    def _ll(self, parameter, sample, out):
        """
        Compute finite-mixture log-likelihood.
        """

        total        = qy.stack_allocate(float, -numpy.inf, "total")
        component_ll = qy.stack_allocate(float)

        @qy.for_(self._model._K)
        def _(index):
            component = parameter.at(index)

            self._sub_emitter.ll(
                StridedArray.from_typed_pointer(component.data.gep(0, 1)),
                sample,
                component_ll,
                )

            log_add_double(
                total.load(),
                qy.log(component.data.gep(0, 0).load()) + component_ll.load(),
                ) \
                .store(total)

        total.load().store(out)
Esempio n. 4
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated MAP parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() + weight * float(self._model._estimation_n)).store(total_w)

        alpha = prior.data.gep(0, 0).load()
        beta  = prior.data.gep(0, 1).load()

        numerator   = total_k.load() + alpha - 1.0
        denominator = total_w.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))

        qy.return_()
Esempio n. 5
0
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p1 = [ 
            4.945235359296727046734888e0,
            2.018112620856775083915565e2,
            2.290838373831346393026739e3,
            1.131967205903380828685045e4,
            2.855724635671635335736389e4,
            3.848496228443793359990269e4,
            2.637748787624195437963534e4,
            7.225813979700288197698961e3,
            ]
        q1 = [
            6.748212550303777196073036e1,
            1.113332393857199323513008e3,
            7.738757056935398733233834e3,
            2.763987074403340708898585e4,
            5.499310206226157329794414e4,
            6.161122180066002127833352e4,
            3.635127591501940507276287e4,
            8.785536302431013170870835e3,
            ]

        @qy.if_else(x <= 0.5)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                @qy.if_(x + 1.0 == 1.0)
                def _():
                    qy.return_(result.load())

                x.store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 1.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p1, q1):
            xnum = xnum * y + p
            xden = xden * y + q

        d1 = -5.772156649015328605195174e-1

        qy.return_(result.load() + y * (d1 + y * (xnum / xden)))
Esempio n. 6
0
    def _():
        result = qy.stack_allocate(float)
        input_ = qy.stack_allocate(float)

        p2 = [
            4.974607845568932035012064e0,
            5.424138599891070494101986e2,
            1.550693864978364947665077e4,
            1.847932904445632425417223e5,
            1.088204769468828767498470e6,
            3.338152967987029735917223e6,
            5.106661678927352456275255e6,
            3.074109054850539556250927e6,
            ]
        q2 = [
            1.830328399370592604055942e2,
            7.765049321445005871323047e3,
            1.331903827966074194402448e5,
            1.136705821321969608938755e6,
            5.267964117437946917577538e6,
            1.346701454311101692290052e7,
            1.782736530353274213975932e7,
            9.533095591844353613395747e6,
            ]

        @qy.if_else(x <= a)
        def _(then):
            if then:
                (-qy.log(x)).store(result)

                (x - 1.0).store(input_)
            else:
                qy.value_from_any(0.0).store(result)

                (x - 2.0).store(input_)

        y    = input_.load()
        xnum = 0.0
        xden = 1.0

        for (p, q) in zip(p2, q2):
            xnum = xnum * y + p
            xden = xden * y + q

        d2 = 4.227843350984671393993777e-1

        qy.return_(result.load() + y * (d2 + y * (xnum / xden)))
Esempio n. 7
0
    def _given(self, parameter, samples, out):
        """
        Compute the conditional distribution.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # compute posterior mixture parameters
        total = qy.stack_allocate(float, -numpy.inf)

        @qy.for_(K)
        def _(k):
            prior_pi        = parameter.at(k).data.gep(0, 0)
            prior_parameter = parameter.at(k).data.gep(0, 1)
            posterior_pi    = out.at(k).data.gep(0, 0)

            qy.log(prior_pi.load()).store(posterior_pi)

            @qy.for_(N)
            def _(n):
                current_pi = posterior_pi.load()

                self._sub_emitter.ll(
                    StridedArray.from_typed_pointer(prior_parameter),
                    samples.at(n),
                    posterior_pi,
                    )

                (current_pi + posterior_pi.load()).store(posterior_pi)

            log_add_double(total.load(), posterior_pi.load()).store(total)

        total_value = total.load()

        @qy.for_(K)
        def _(k):
            posterior_pi  = out.at(k).data.gep(0, 0)
            normalized_pi = posterior_pi.load() - total_value

            qy.exp(normalized_pi).store(posterior_pi)

        # compute posterior component parameters
        @qy.for_(K)
        def _(k):
            prior_parameter     = parameter.at(k).data.gep(0, 1)
            posterior_parameter = out.at(k).data.gep(0, 1)

            self._sub_emitter.given(
                StridedArray.from_typed_pointer(prior_parameter),
                samples,
                StridedArray.from_typed_pointer(posterior_parameter),
                )
Esempio n. 8
0
    def _given(self, parameter, samples, out):
        """
        Compute the conditional distribution.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # compute posterior mixture parameters
        total = qy.stack_allocate(float, -numpy.inf)

        @qy.for_(K)
        def _(k):
            prior_pi = parameter.at(k).data.gep(0, 0)
            prior_parameter = parameter.at(k).data.gep(0, 1)
            posterior_pi = out.at(k).data.gep(0, 0)

            qy.log(prior_pi.load()).store(posterior_pi)

            @qy.for_(N)
            def _(n):
                current_pi = posterior_pi.load()

                self._sub_emitter.ll(
                    StridedArray.from_typed_pointer(prior_parameter),
                    samples.at(n),
                    posterior_pi,
                )

                (current_pi + posterior_pi.load()).store(posterior_pi)

            log_add_double(total.load(), posterior_pi.load()).store(total)

        total_value = total.load()

        @qy.for_(K)
        def _(k):
            posterior_pi = out.at(k).data.gep(0, 0)
            normalized_pi = posterior_pi.load() - total_value

            qy.exp(normalized_pi).store(posterior_pi)

        # compute posterior component parameters
        @qy.for_(K)
        def _(k):
            prior_parameter = parameter.at(k).data.gep(0, 1)
            posterior_parameter = out.at(k).data.gep(0, 1)

            self._sub_emitter.given(
                StridedArray.from_typed_pointer(prior_parameter),
                samples,
                StridedArray.from_typed_pointer(posterior_parameter),
            )
Esempio n. 9
0
        def binomial_ml(samples_data, weights_data, out_data):
            prior_data = qy.stack_allocate(self._model.prior_dtype)

            qy.value_from_any(1.0).store(prior_data.gep(0, 0))
            qy.value_from_any(1.0).store(prior_data.gep(0, 1))

            self._map(
                StridedArray.from_raw(prior_data, (), ()),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
                )
Esempio n. 10
0
        def binomial_ml(samples_data, weights_data, out_data):
            prior_data = qy.stack_allocate(self._model.prior_dtype)

            qy.value_from_any(1.0).store(prior_data.gep(0, 0))
            qy.value_from_any(1.0).store(prior_data.gep(0, 1))

            self._map(
                StridedArray.from_raw(prior_data, (), ()),
                samples.using(samples_data),
                weights.using(weights_data),
                out.using(out_data),
            )
Esempio n. 11
0
    def _ml(self, samples, weights, out):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() + weight * float(self._model._estimation_n)).store(total_w)

        final_ratio = \
              (total_k.load() + self._model._epsilon) \
            / (total_w.load() + self._model._epsilon)

        final_ratio.store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))
Esempio n. 12
0
    def _ml(self, samples, weights, out):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_w = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample = samples.at(n).data.load().cast_to(float)

            (total_k.load() + sample * weight).store(total_k)
            (total_w.load() +
             weight * float(self._model._estimation_n)).store(total_w)

        final_ratio = \
              (total_k.load() + self._model._epsilon) \
            / (total_w.load() + self._model._epsilon)

        final_ratio.store(out.data.gep(0, 0))
        qy.value_from_any(self._model._estimation_n).store(out.data.gep(0, 1))
Esempio n. 13
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_n = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight   = weights.at(n).data.load()
            sample_k = samples.at(n).data.gep(0, 0).load().cast_to(float)
            sample_n = samples.at(n).data.gep(0, 1).load().cast_to(float)

            (total_k.load() + sample_k * weight).store(total_k)
            (total_n.load() + sample_n * weight).store(total_n)

        alpha = prior.data.gep(0, 0).load()
        beta  = prior.data.gep(0, 1).load()

        numerator   = total_k.load() + alpha - 1.0
        denominator = total_n.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data)
Esempio n. 14
0
    def _map(self, prior, samples, weights, out):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        total_k = qy.stack_allocate(float, 0.0)
        total_n = qy.stack_allocate(float, 0.0)

        @qy.for_(samples.shape[0])
        def _(n):
            weight = weights.at(n).data.load()
            sample_k = samples.at(n).data.gep(0, 0).load().cast_to(float)
            sample_n = samples.at(n).data.gep(0, 1).load().cast_to(float)

            (total_k.load() + sample_k * weight).store(total_k)
            (total_n.load() + sample_n * weight).store(total_n)

        alpha = prior.data.gep(0, 0).load()
        beta = prior.data.gep(0, 1).load()

        numerator = total_k.load() + alpha - 1.0
        denominator = total_n.load() + alpha + beta - 2.0

        (numerator / denominator).store(out.data)
Esempio n. 15
0
    def __init__(self, type_):
        """
        Initialize.
        """

        self._location = qy.stack_allocate(type_)
Esempio n. 16
0
    def _map(self, prior, samples, weights, out, initializations):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # generate some initial parameters
        self._map_initialize(prior, samples, weights, out, initializations)

        # run EM until convergence
        total = qy.stack_allocate(float)
        component_ll = qy.stack_allocate(float)

        this_r_KN = StridedArray.heap_allocated(float, (K, N))
        last_r_KN = StridedArray.heap_allocated(float, (K, N))

        this_r_KN_data = Variable.set_to(this_r_KN.data)
        last_r_KN_data = Variable.set_to(last_r_KN.data)

        @qy.for_(self._model._iterations)
        def _(i):
            # compute responsibilities
            r_KN = this_r_KN.using(this_r_KN_data.value)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    responsibility = r_KN.at(k, n).data

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(
                            out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        responsibility,
                    )

                    log_add_double(total.load(),
                                   responsibility.load()).store(total)

                total_value = total.load()

                @qy.if_else(total_value == -numpy.inf)
                def _(then):
                    if then:

                        @qy.for_(K)
                        def _(k):
                            qy.value_from_any(1.0 / K).store(
                                r_KN.at(k, n).data)
                    else:

                        @qy.for_(K)
                        def _(k):
                            responsibility = r_KN.at(k, n).data

                            qy.exp(responsibility.load() -
                                   total_value).store(responsibility)

            # estimate new mixture and component parameters
            @qy.for_(K)
            def _(k):
                component = out.at(k).data

                self._sub_emitter.map(
                    prior.at(k),
                    samples,
                    r_KN.at(k),
                    StridedArray.from_typed_pointer(component.gep(0, 1)),
                )

                qy.value_from_any(0.0).store(total)

                @qy.for_(N)
                def _(n):
                    (total.load() + r_KN.at(k, n).data.load()).store(total)

                (total.load() / float(N)).store(component.gep(0, 0))

            # check for termination
            last_r_KN = this_r_KN.using(last_r_KN_data.value)

            @qy.if_(i > 0)
            def _():
                qy.value_from_any(0.0).store(total)

                @qy.for_(K)
                def _(k):
                    @qy.for_(N)
                    def _(n):
                        delta = r_KN.at(k, n).data.load() - last_r_KN.at(
                            k, n).data.load()

                        (total.load() + abs(delta)).store(total)

                @qy.if_(total.load() < 1e-12)
                def _():
                    qy.break_()

            total_delta = total.load()

            # swap the responsibility matrices
            temp_r_KN_data_value = this_r_KN_data.value

            this_r_KN_data.set(last_r_KN_data.value)
            last_r_KN_data.set(temp_r_KN_data_value)

            # compute the ll at this step
            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                total_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(
                            out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        component_ll,
                    )

                    log_add_double(
                        total.load(),
                        qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
                        ) \
                        .store(total)

                (total_ll + total.load()).store(total)

            total_ll = total.load()

            # be informative
            qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta,
                         total_ll)

        # clean up
        qy.heap_free(this_r_KN.data)
        qy.heap_free(last_r_KN.data)

        qy.return_()
Esempio n. 17
0
    def _map_initialize(self, prior, samples, weights, out, initializations):
        """
        Emit parameter initialization for EM.
        """

        # generate a random initial component assignment
        K = self._model._K
        N = samples.shape[0]

        total = qy.stack_allocate(float)
        best_ll = qy.stack_allocate(float, -numpy.inf)

        assigns = StridedArray.heap_allocated(int, (K, ))
        best_assigns = StridedArray.heap_allocated(int, (K, ))

        @qy.for_(initializations)
        def _(i):
            @qy.for_(K)
            def _(k):
                # randomly assign the component
                j = qy.random_int(N)
                component = StridedArray.from_typed_pointer(
                    out.at(k).data.gep(0, 1))

                j.store(assigns.at(k).data)

                self._sub_emitter.map(
                    prior.at(k),
                    samples.at(j).envelop(),
                    weights.at(j).envelop(),
                    component,
                )

            # compute our total likelihood
            qy.value_from_any(0.0).store(total)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                mixture_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    component_ll = total.load()

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(
                            out.at(k).data.gep(0, 1)),
                        sample,
                        total,
                    )

                    log_add_double(component_ll, total.load()).store(total)

                (mixture_ll + total.load()).store(total)

            # best observed so far?
            @qy.if_(total.load() >= best_ll.load())
            def _():
                total.load().store(best_ll)

                @qy.for_(K)
                def _(k):
                    assigns.at(k).data.load().store(best_assigns.at(k).data)

        # recompute the best observed assignment
        @qy.for_(K)
        def _(k):
            j = assigns.at(k).data.load()

            self._sub_emitter.ml(
                samples.at(j).envelop(),
                weights.at(j).envelop(),
                StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
            )

        qy.heap_free(assigns.data)
        qy.heap_free(best_assigns.data)

        # generate random initial component weights
        @qy.for_(K)
        def _(k):
            r = qy.random()

            r.store(out.at(k).data.gep(0, 0))

            (total.load() + r).store(total)

        @qy.for_(K)
        def _(k):
            p = out.at(k).data.gep(0, 0)

            (p.load() / total.load()).store(p)
Esempio n. 18
0
    def _map_initialize(self, prior, samples, weights, out, initializations):
        """
        Emit parameter initialization for EM.
        """

        # generate a random initial component assignment
        K = self._model._K
        N = samples.shape[0]

        total   = qy.stack_allocate(float)
        best_ll = qy.stack_allocate(float, -numpy.inf)

        assigns      = StridedArray.heap_allocated(int, (K,))
        best_assigns = StridedArray.heap_allocated(int, (K,))

        @qy.for_(initializations)
        def _(i):
            @qy.for_(K)
            def _(k):
                # randomly assign the component
                j         = qy.random_int(N)
                component = StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1))

                j.store(assigns.at(k).data)

                self._sub_emitter.map(
                    prior.at(k),
                    samples.at(j).envelop(),
                    weights.at(j).envelop(),
                    component,
                    )

            # compute our total likelihood
            qy.value_from_any(0.0).store(total)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                mixture_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    component_ll = total.load()

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                        sample,
                        total,
                        )

                    log_add_double(component_ll, total.load()).store(total)

                (mixture_ll + total.load()).store(total)

            # best observed so far?
            @qy.if_(total.load() >= best_ll.load())
            def _():
                total.load().store(best_ll)

                @qy.for_(K)
                def _(k):
                    assigns.at(k).data.load().store(best_assigns.at(k).data)

        # recompute the best observed assignment
        @qy.for_(K)
        def _(k):
            j = assigns.at(k).data.load()

            self._sub_emitter.ml(
                samples.at(j).envelop(),
                weights.at(j).envelop(),
                StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                )

        qy.heap_free(assigns.data)
        qy.heap_free(best_assigns.data)

        # generate random initial component weights
        @qy.for_(K)
        def _(k):
            r = qy.random()

            r.store(out.at(k).data.gep(0, 0))

            (total.load() + r).store(total)

        @qy.for_(K)
        def _(k):
            p = out.at(k).data.gep(0, 0)

            (p.load() / total.load()).store(p)
Esempio n. 19
0
    def _map(self, prior, samples, weights, out, initializations):
        """
        Emit computation of the estimated maximum-likelihood parameter.
        """

        # mise en place
        K = self._model._K
        N = samples.shape[0]

        # generate some initial parameters
        self._map_initialize(prior, samples, weights, out, initializations)

        # run EM until convergence
        total        = qy.stack_allocate(float)
        component_ll = qy.stack_allocate(float)

        this_r_KN = StridedArray.heap_allocated(float, (K, N))
        last_r_KN = StridedArray.heap_allocated(float, (K, N))

        this_r_KN_data = Variable.set_to(this_r_KN.data)
        last_r_KN_data = Variable.set_to(last_r_KN.data)

        @qy.for_(self._model._iterations)
        def _(i):
            # compute responsibilities
            r_KN = this_r_KN.using(this_r_KN_data.value)

            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    responsibility = r_KN.at(k, n).data

                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        responsibility,
                        )

                    log_add_double(total.load(), responsibility.load()).store(total)

                total_value = total.load()

                @qy.if_else(total_value == -numpy.inf)
                def _(then):
                    if then:
                        @qy.for_(K)
                        def _(k):
                            qy.value_from_any(1.0 / K).store(r_KN.at(k, n).data)
                    else:
                        @qy.for_(K)
                        def _(k):
                            responsibility = r_KN.at(k, n).data

                            qy.exp(responsibility.load() - total_value).store(responsibility)

            # estimate new mixture and component parameters
            @qy.for_(K)
            def _(k):
                component = out.at(k).data

                self._sub_emitter.map(
                    prior.at(k),
                    samples,
                    r_KN.at(k),
                    StridedArray.from_typed_pointer(component.gep(0, 1)),
                    )

                qy.value_from_any(0.0).store(total)

                @qy.for_(N)
                def _(n):
                    (total.load() + r_KN.at(k, n).data.load()).store(total)

                (total.load() / float(N)).store(component.gep(0, 0))

            # check for termination
            last_r_KN = this_r_KN.using(last_r_KN_data.value)

            @qy.if_(i > 0)
            def _():
                qy.value_from_any(0.0).store(total)

                @qy.for_(K)
                def _(k):
                    @qy.for_(N)
                    def _(n):
                        delta = r_KN.at(k, n).data.load() - last_r_KN.at(k, n).data.load()

                        (total.load() + abs(delta)).store(total)

                @qy.if_(total.load() < 1e-12)
                def _():
                    qy.break_()

            total_delta = total.load()

            # swap the responsibility matrices
            temp_r_KN_data_value = this_r_KN_data.value

            this_r_KN_data.set(last_r_KN_data.value)
            last_r_KN_data.set(temp_r_KN_data_value)

            # compute the ll at this step
            @qy.for_(N)
            def _(n):
                sample = samples.at(n)

                total_ll = total.load()

                qy.value_from_any(-numpy.inf).store(total)

                @qy.for_(K)
                def _(k):
                    self._sub_emitter.ll(
                        StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
                        StridedArray.from_typed_pointer(sample.data),
                        component_ll,
                        )

                    log_add_double(
                        total.load(),
                        qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
                        ) \
                        .store(total)

                (total_ll + total.load()).store(total)

            total_ll = total.load()

            # be informative
            qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta, total_ll)

        # clean up
        qy.heap_free(this_r_KN.data)
        qy.heap_free(last_r_KN.data)

        qy.return_()