Exemplo n.º 1
0
    def log_likelihood(self, θ, data):
        """
        Calculate the log liklihood of the data for given theta parameters.
        Σ log(p(data|θ))
        We are going to iterate over trials. For each one, we take the trial
        data and calculate the predictive_y. This gives us many values
        (corresponding to particles). We deal with these appropriately for
        'chose B' and 'chose A' trials. Then calculate the log
        likelihood, which involves summing the ll over trials so that we end
        up with a log likelihood value for all the particles.
        """

        n_trials, _ = data.shape
        n_particles, _ = θ.shape

        # TODO safety check... if no data, return ll = 0

        p_chose_B = np.zeros((n_particles, n_trials))
        ll = np.zeros((n_particles, n_trials))
        responses = data.R.values

        for trial in range(n_trials):
            trial_data = data.take([trial])
            p_chose_B[:, trial] = self.predictive_y(θ, trial_data)
            ll[:, trial] = bernoulli.logpmf(responses[trial], p_chose_B[:,
                                                                        trial])

        ll = np.sum(ll, axis=1)  # sum over trials
        return ll
Exemplo n.º 2
0
    def test_log_pdf(self, dtype, prob_true, prob_true_is_samples, rv,
                     rv_is_samples, num_samples):

        rv_shape = rv.shape[1:] if rv_is_samples else rv.shape
        n_dim = 1 + len(rv.shape) if not rv_is_samples else len(rv.shape)
        prob_true_np = numpy_array_reshape(prob_true, prob_true_is_samples,
                                           n_dim)
        rv_np = numpy_array_reshape(rv, rv_is_samples, n_dim)
        rv_full_shape = (num_samples, ) + rv_shape
        rv_np = np.broadcast_to(rv_np, rv_full_shape)

        log_pdf_np = bernoulli.logpmf(k=rv_np, p=prob_true_np)

        var = Bernoulli.define_variable(0, shape=rv_shape, dtype=dtype).factor
        prob_true_mx = mx.nd.array(prob_true, dtype=dtype)
        if not prob_true_is_samples:
            prob_true_mx = add_sample_dimension(mx.nd, prob_true_mx)
        rv_mx = mx.nd.array(rv, dtype=dtype)
        if not rv_is_samples:
            rv_mx = add_sample_dimension(mx.nd, rv_mx)
        variables = {
            var.prob_true.uuid: prob_true_mx,
            var.random_variable.uuid: rv_mx
        }
        log_pdf_rt = var.log_pdf(F=mx.nd, variables=variables)

        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
        assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy())
 def _py_log_prob(self, xs, zs):
     n_samples = zs.shape[0]
     lp = np.zeros(n_samples, dtype=np.float32)
     for s in range(n_samples):
         lp[s] = beta.logpdf(zs[s, :], a=1.0, b=1.0)
         for n in range(len(xs)):
             lp[s] += bernoulli.logpmf(xs[n], p=zs[s, :])
     return lp
Exemplo n.º 4
0
def test_bernoulli_logpdf(T=100, K=4, D=10):
    # Test single datapoint log pdf
    x = npr.rand(T, D) < 0.5
    logit_ps = npr.randn(K, D)
    ps = 1 / (1 + np.exp(-logit_ps))
    ll1 = bernoulli_logpdf(x[:, None, :], logit_ps)
    ll2 = np.sum(bernoulli.logpmf(x[:, None, :], ps[None, :, :]), axis=-1)
    assert np.allclose(ll1, ll2)
Exemplo n.º 5
0
def _test(model, xs, zs):
  val_true = beta.logpdf(zs['p'], 1.0, 1.0)
  val_true += np.sum([bernoulli.logpmf(x, zs['p'])
                      for x in list(six.itervalues(xs))[0]])
  val_ed = model.log_prob(xs, zs)
  assert np.allclose(val_ed.eval(), val_true)
  zs_tf = {key: tf.cast(value, dtype=tf.float32)
           for key, value in six.iteritems(zs)}
  val_ed = model.log_prob(xs, zs_tf)
  assert np.allclose(val_ed.eval(), val_true)
Exemplo n.º 6
0
    def compute(self, choice, t_ss, t_ll, r_ss, r_ll, r, tau):
        def discount(delay):
            return np.exp(-delay * r)

        v_ss = r_ss * discount(t_ss)
        v_ll = r_ll * discount(t_ll)

        # Probability to choose an option with late and large rewards.
        p_obs = inv_logit(tau * (v_ll - v_ss))
        return bernoulli.logpmf(choice, p_obs)
Exemplo n.º 7
0
def _test(model, xs, zs):
  val_true = beta.logpdf(zs['p'], 1.0, 1.0)
  val_true += np.sum([bernoulli.logpmf(x, zs['p'])
                      for x in xs['x']])
  val_ed = model.log_prob(xs, zs)
  assert np.allclose(val_ed.eval(), val_true)
  zs_tf = {key: tf.cast(value, dtype=tf.float32)
           for key, value in six.iteritems(zs)}
  val_ed = model.log_prob(xs, zs_tf)
  assert np.allclose(val_ed.eval(), val_true)
Exemplo n.º 8
0
 def bPFweather(self,data,sys,par):
     a   = np.zeros((self.nPart,sys.T));    
     s   = np.zeros((self.nPart,sys.T));
     p   = np.zeros((self.nPart,sys.T));
     w   = np.zeros((self.nPart,sys.T));
     xh  = np.zeros((sys.T,1));
     sh  = np.zeros((sys.T,1));
     llp = 0.0;        
     
     p[:,0] = self.xo;        
     s[:,0] = self.so;       
     
     for tt in range(0, sys.T):
         if tt != 0:
             
             # Resample (if needed by ESS criteria)
             if ((np.sum(w[:,tt-1]**2))**(-1) < (self.nPart * self.resampFactor)):
                 
                 if self.resamplingType == "systematic":
                     nIdx = self.resampleSystematic(w[:,tt-1],par);
                 elif self.resamplingType == "multinomial":
                     nIdx = self.resampleMultinomial(w[:,tt-1],par);
                 else: 
                     nIdx = self.resample(w[:,tt-1],par);
                 
                 nIdx = np.transpose(nIdx.astype(int));
             else:
                 nIdx = np.arange(0,self.nPart);
             
             
             # Propagate
             s[:,tt] = sys.h(p[nIdx,tt-1], data.u[tt-1], s[nIdx,tt-1], tt-1)
             p[:,tt] = sys.f(p[nIdx,tt-1], data.u[tt-1], s[:,tt], data.y[tt-1], tt-1) + sys.fn(p[nIdx,tt-1], s[:,tt], data.y[tt-1], tt-1) * np.random.randn(1,self.nPart);
             a[:,tt] = nIdx;
         
         # Calculate weights
         w[:,tt] = bernoulli.logpmf(data.y[tt],sys.g(p[:,tt], data.u[tt], s[:,tt], tt));
         wmax    = np.max(w[:,tt]);
         w[:,tt] = np.exp(w[:,tt] - wmax);
         
         # Estimate log-likelihood
         llp += wmax + np.log(np.sum(w[:,tt])) - np.log(self.nPart);
         
         # Estimate state
         w[:,tt] /= np.sum(w[:,tt]);
         xh[tt]  = np.sum( w[:,tt] * p[:,tt] );
         sh[tt]  = np.sum( w[:,tt] * s[:,tt] );
     
     self.xhatf = xh;
     self.shatf = sh;
     self.ll    = llp;
     self.w     = w;
     self.a     = a;
     self.p     = p;
     self.s     = s;
Exemplo n.º 9
0
    def _py_log_prob(self, zs):
        # This example is written for pedagogy. We recommend
        # vectorizing operations in practice.
        n_minibatch = zs.shape[0]
        lp = np.zeros(n_minibatch, dtype=np.float32)
        for b in range(n_minibatch):
            lp[b] = beta.logpdf(zs[b, :], a=1.0, b=1.0)
            for n in range(len(self.data)):
                lp[b] += bernoulli.logpmf(self.data[n], p=zs[b, :])

        return lp
Exemplo n.º 10
0
    def compute(self, choice, t_ss, t_ll, r_ss, r_ll, beta, delta, tau):
        def discount(delay):
            return np.where(delay == 0, np.ones_like(beta * delta * delay),
                            beta * np.power(delta, delay))

        v_ss = r_ss * discount(t_ss)
        v_ll = r_ll * discount(t_ll)

        # Probability to choose an option with late and large rewards.
        p_obs = inv_logit(tau * (v_ll - v_ss))
        return bernoulli.logpmf(choice, p_obs)
Exemplo n.º 11
0
    def _py_log_prob(self, xs, zs):
        # This example is written for pedagogy. We recommend
        # vectorizing operations in practice.
        n_minibatch = zs.shape[0]
        lp = np.zeros(n_minibatch, dtype=np.float32)
        for b in range(n_minibatch):
            lp[b] = beta.logpdf(zs[b, :], a=1.0, b=1.0)
            for n in range(xs['x'].shape[0]):
                lp[b] += bernoulli.logpmf(xs['x'][n], p=zs[b, :])

        return lp
Exemplo n.º 12
0
    def _py_log_prob(self, xs, zs):
        # This example is written for pedagogy. We recommend
        # vectorizing operations in practice.
        n_samples = zs.shape[0]
        lp = np.zeros(n_samples, dtype=np.float32)
        for b in range(n_samples):
            lp[b] = beta.logpdf(zs[b, :], a=1.0, b=1.0)
            for n in range(xs['x'].shape[0]):
                lp[b] += bernoulli.logpmf(xs['x'][n], p=zs[b, :])

        return lp
Exemplo n.º 13
0
def _test(model, xs, zs):
    n_samples = zs.shape[0]
    val_true = np.zeros(n_samples, dtype=np.float32)
    for s in range(n_samples):
        p = np.squeeze(zs[s, :])
        val_true[s] = beta.logpdf(p, 1, 1)
        val_true[s] += np.sum([bernoulli.logpmf(x, p) for x in xs['x']])

    val_ed = model.log_prob(xs, zs)
    assert np.allclose(val_ed.eval(), val_true)
    zs_tf = tf.cast(zs, dtype=tf.float32)
    val_ed = model.log_prob(xs, zs_tf)
    assert np.allclose(val_ed.eval(), val_true)
Exemplo n.º 14
0
def _test(model, xs, zs):
    n_samples = zs.shape[0]
    val_true = np.zeros(n_samples, dtype=np.float32)
    for s in range(n_samples):
        p = np.squeeze(zs[s, :])
        val_true[s] = beta.logpdf(p, 1, 1)
        val_true[s] += np.sum([bernoulli.logpmf(x, p)
                               for x in xs['x']])

    val_ed = model.log_prob(xs, zs)
    assert np.allclose(val_ed.eval(), val_true)
    zs_tf = tf.cast(zs, dtype=tf.float32)
    val_ed = model.log_prob(xs, zs_tf)
    assert np.allclose(val_ed.eval(), val_true)
Exemplo n.º 15
0
    def log_marg_mask(self, mask_new):
        ## log prob for bernoulli distribution
        log_bern_new = np.sum(bernoulli.logpmf(mask_new, self.p_bern))

        # get common components and clustering components from new mask
        local_common_component = self.update_common_component(mask_new)
        assert self.common_component.K == 1, "new common component can only have one cluster component"
        local_clustering_components = self.update_clustering_components(
            mask_new, self.components.assignments)

        ## compute log prob for p(m) * p(X,z | m)
        log_marg_new = self.log_marg_for_specific_component(local_clustering_components) + \
                       local_common_component.log_marg() + log_bern_new

        return log_marg_new
Exemplo n.º 16
0
def gen_data(dtype, shape):
    support_list = {"float16": np.float16, "float32": np.float32}
    seed(0)
    m, k = shape
    x = bernoulli.rvs(0.5, size=(m, k)).astype(support_list[dtype])
    eps = 1e-3
    # generate probabilities in the range [eps, 1 - eps], to avoid mismatch between np.inf and computed
    # inf = -65500.0, due to taking log
    probs = uniform(eps,
                    1.0 - 2.0 * eps).rvs(size=(m,
                                               k)).astype(support_list[dtype])
    expect = bernoulli.logpmf(x, probs)
    output = np.full((m, k), 0.0, dtype)

    return expect, x, probs, output
Exemplo n.º 17
0
def test_bernoulli():

    # Test we can at match a Bernoulli distribution from scipy

    p = 0.5
    dist = lk.Bernoulli()

    x = np.array([0, 1])

    p1 = bernoulli.logpmf(x, p)
    p2 = dist.loglike(x, p)

    np.allclose(p1, p2)

    p1 = bernoulli.cdf(x, p)
    p2 = dist.cdf(x, p)

    np.allclose(p1, p2)
Exemplo n.º 18
0
    def loglike(self, y, f):
        """
        Bernoulli log likelihood.

        Parameters
        ----------
            y: array_like
                array of 0, 1 valued integers of targets
            f: array_like
                latent function from the GLM prior (:math:`\mathbf{f} =
                \\boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
            logp: array_like
                the log likelihood of each y given each f under this
                likelihood.
        """

        ll = bernoulli.logpmf(y, logistic(f))
        ll[np.isinf(ll)] = logtiny
        return ll
Exemplo n.º 19
0
    def loglike(self, y, f):
        """
        Bernoulli log likelihood.

        Parameters
        ----------
            y: array_like
                array of 0, 1 valued integers of targets
            f: array_like
                latent function from the GLM prior (:math:`\mathbf{f} =
                \\boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
            logp: array_like
                the log likelihood of each y given each f under this
                likelihood.
        """

        ll = bernoulli.logpmf(y, logistic(f))
        ll[np.isinf(ll)] = logtiny
        return ll
Exemplo n.º 20
0
def test_hypergeom_logpmf():
    # symmetries test
    # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
    k = 5
    N = 50
    K = 10
    n = 5
    logpmf1 = hypergeom.logpmf(k, N, K, n)
    logpmf2 = hypergeom.logpmf(n - k, N, N - K, n)
    logpmf3 = hypergeom.logpmf(K - k, N, K, N - n)
    logpmf4 = hypergeom.logpmf(k, N, n, K)
    assert_almost_equal(logpmf1, logpmf2, decimal=12)
    assert_almost_equal(logpmf1, logpmf3, decimal=12)
    assert_almost_equal(logpmf1, logpmf4, decimal=12)

    # test related distribution
    # Bernoulli distribution if n = 1
    k = 1
    N = 10
    K = 7
    n = 1
    hypergeom_logpmf = hypergeom.logpmf(k, N, K, n)
    bernoulli_logpmf = bernoulli.logpmf(k, K / N)
    assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
Exemplo n.º 21
0
def test_hypergeom_logpmf():
    # symmetries test
    # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
    k = 5
    N = 50
    K = 10
    n = 5
    logpmf1 = hypergeom.logpmf(k,N,K,n)
    logpmf2 = hypergeom.logpmf(n-k,N,N-K,n)
    logpmf3 = hypergeom.logpmf(K-k,N,K,N-n)
    logpmf4 = hypergeom.logpmf(k,N,n,K)
    assert_almost_equal(logpmf1, logpmf2, decimal=12)
    assert_almost_equal(logpmf1, logpmf3, decimal=12)
    assert_almost_equal(logpmf1, logpmf4, decimal=12)

    # test related distribution
    # Bernoulli distribution if n = 1
    k = 1
    N = 10
    K = 7
    n = 1
    hypergeom_logpmf = hypergeom.logpmf(k,N,K,n)
    bernoulli_logpmf = bernoulli.logpmf(k,K/N)
    assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
Exemplo n.º 22
0
 def _py_log_prob(self, xs, zs):
     log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
     log_lik = np.sum(bernoulli.logpmf(xs['x'], p=zs['p']))
     return log_lik + log_prior
Exemplo n.º 23
0
 def logprob(self, data, G):
     Y = turn_into_iterable(data["Y"])
     Z = turn_into_iterable(data["Z"])
     parameter = [G._node[z]["theta"][0] for z in Z]
     return bernoulli.logpmf(Y, p=parameter)
Exemplo n.º 24
0
def calc_log_likelihood(designs, df, params):
    p = _prob_choose_delayed(designs, df, params)
    log_likelihood = bernoulli.logpmf(designs.R, p)
    return sum(log_likelihood)
Exemplo n.º 25
0
def logpmf(x, p):
    return np.sum(bernoulli.logpmf(x, p))
Exemplo n.º 26
0
 def logpdf(self, x):
     return bernoulli.logpmf(x, self.p)
Exemplo n.º 27
0
 def compute(self, choice, p_var, a_var, r_var, r_fix, alpha, beta, gamma):
     sv_var = np.power(r_var, alpha)
     sv_var = np.power(p_var, 1 + beta * a_var) * sv_var
     sv_fix = .5 * np.power(r_fix, alpha)
     p_obs = inv_logit(gamma * (sv_var - sv_fix))
     return bernoulli.logpmf(choice, p_obs)
Exemplo n.º 28
0
def loglike_bern_t(xtd, mu_td):
    return np.sum(bern.logpmf(xtd, mu_td))
Exemplo n.º 29
0
 def _py_log_prob(self, xs, zs):
   log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
   log_lik = np.sum(bernoulli.logpmf(xs['x'], p=zs['p']))
   return log_lik + log_prior
Exemplo n.º 30
0
def func_logistic_log_lik(choice, stimulus, guess_rate, lapse_rate, threshold,
                          slope):
    f = inv_logit(slope * (stimulus - threshold))
    p = guess_rate + (1 - guess_rate - lapse_rate) * f
    return bernoulli.logpmf(choice, p)
Exemplo n.º 31
0
 def compute(self, choice, stimulus, guess_rate, lapse_rate, threshold,
             slope):
     p_obs = self._compute_prob(inv_logit, stimulus, threshold, slope,
                                guess_rate, lapse_rate)
     return bernoulli.logpmf(choice, p_obs)
Exemplo n.º 32
0
 def logpmf(self, Y, Z):
     Y = turn_into_iterable(Y)
     Z = turn_into_iterable(Z)
     parameter = np.array([self.G.node[z]['theta'][0] for z in Z])
     return bernoulli.logpmf(Y, p=parameter)
Exemplo n.º 33
0
def log_prob_z(d,zt,h_tmin, W, U, b, ut):
    f = W @ h_tmin + U @ ut + b
    mu = expit(f) 
    return np.sum(bern.logpmf(zt, mu))
Exemplo n.º 34
0
    def metropolis_update_mask(self, i_iter):

        assert self.common_component.K == 1, "common component can only have one cluster component"
        ## compute old p(mask | z,X) \propto p(X,z|mask) * p(mask | p_bern)
        ## p(X,z | mask) = p(X_m, z | \alpha, \beta_m) * p(X_mc | \beta_mc)
        log_bern_old = np.sum(bernoulli.logpmf(
            self.mask, self.p_bern))  # log prob for bernoulli distribution
        log_marg_old = self.log_marg() + self.common_component.log_marg(
        ) + log_bern_old

        ## random pick one mask from old mask
        idx = np.random.choice(xrange(self.D), 1)[0]
        mask_new = copy.deepcopy(self.mask)
        mask_new[idx] = 1 - mask_new[idx]

        ## robust step fpr new mask
        cluster_idx = 1
        cluster_D = np.where(mask_new == cluster_idx)[0].shape[0]
        common_idx = 0
        common_D = np.where(mask_new == common_idx)[0].shape[0]
        if cluster_D == 0 or common_D == 0:
            logging.info('use old !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
            mask_new = copy.deepcopy(self.mask)

        ## TODO, test to see if mask are same, do we get same log marg
        # mask_new = copy.deepcopy(self.mask)

        # if i_iter % 20 == 0:
        #     logging.info('mask old: {}'.format(self.mask))
        #     logging.info('mask new: {}'.format(mask_new))

        ## compute new p(mask | z,X) \propto p(X,z|mask) * p(mask | p_bern)
        ## p(X,z | mask) = p(X_m, z | \alpha, \beta_m) * p(X_mc | \beta_mc)
        log_bern_new = np.sum(bernoulli.logpmf(
            mask_new, self.p_bern))  # log prob for bernoulli distribution

        # get common components and clustering components from new mask
        local_common_component = self.update_common_component(mask_new)
        assert self.common_component.K == 1, "new common component can only have one cluster component"
        local_clustering_components = self.update_clustering_components(
            mask_new, self.components.assignments)

        log_marg_new = self.log_marg_for_specific_component(local_clustering_components) + \
                       local_common_component.log_marg() + log_bern_new

        # if i_iter % 20 == 0:
        #     print 'log_bern_old: {}'.format(log_bern_old)
        #     print 'log_bern_new: {}'.format(log_bern_new)
        #
        #     print 'common component: {}'.format(self.common_component.log_marg())
        #     print 'local common component: {}'.format(local_common_component.log_marg())
        #
        #     print 'cluster component: {}'.format(self.components.log_marg())
        #     print 'local cluster component: {}'.format(local_clustering_components.log_marg())
        #
        #     print 'cluster component all: {}'.format(self.log_marg())
        #     print 'local cluster component all: {}'.format(self.log_marg_for_specific_component(
        #         local_clustering_components))

        isNewLarger = True
        self.total_run += 1  ## count run
        if isNewLarger:
            ## metropolis search,
            prob = np.exp(log_marg_new - log_marg_old)
            prob = 1 if prob > 1 else prob
            bern_prob = np.random.binomial(1, prob, 1)[0]

            # if i_iter % 20 == 0:
            #     print 'prob: {}'.format(prob)
            #     print 'prob output: {}'.format(bern_prob)

            if log_marg_new > log_marg_old or bern_prob > 0:
                self.update_run += 1  ## count update
                self.acc_rate = self.update_run * 1. / self.total_run  ## update acceptance rate
                if i_iter % 20 == 0:
                    logging.info('update mask!!')
                self.mask = copy.deepcopy(mask_new)
                self.common_component = copy.deepcopy(local_common_component)
                self.components = copy.deepcopy(local_clustering_components)

                ## update p_bern for the mask if using BetaBern prior
                if self.bern_prior is not None:
                    self.p_bern = np.random.beta(
                        self.bern_prior.a + np.sum(mask_new),
                        self.bern_prior.b + self.D - np.sum(mask_new), 1)[0]
                    self.make_robust_p_bern()
                else:
                    ## self.p_bern is fixed
                    pass
        else:
            ## metropolis search,
            prob = np.exp(log_marg_old - log_marg_new)
            prob = 1 if prob > 1 else prob
            bern_prob = np.random.binomial(1, prob, 1)[0]

            # if i_iter % 20 == 0:
            #     print 'prob: {}'.format(prob)
            #     print 'prob output: {}'.format(bern_prob)

            if log_marg_old > log_marg_new or bern_prob > 0:
                if i_iter % 20 == 0:
                    logging.info('update mask!!')
                self.mask = copy.deepcopy(mask_new)
                self.common_component = copy.deepcopy(local_common_component)
                self.components = copy.deepcopy(local_clustering_components)

                ## update p_bern for the mask if using BetaBern prior
                if self.bern_prior is not None:
                    self.p_bern = np.random.beta(
                        self.bern_prior.a + np.sum(mask_new),
                        self.bern_prior.b + self.D - np.sum(mask_new), 1)[0]
                    self.make_robust_p_bern()

        if i_iter % 20 == 0:
            logging.info('log_marg_old: {}'.format(log_marg_old))
            logging.info('log_marg_new: {}'.format(log_marg_new))
            logging.info('p_bern_new: {}'.format(self.p_bern))
            logging.info('accetance rate: {}'.format(self.acc_rate))
Exemplo n.º 35
0
    def bPF(self, data, sys, par):
        a = np.zeros((self.nPart, sys.T))
        s = np.zeros((self.nPart, sys.T))
        p = np.zeros((self.nPart, sys.T))
        w = np.zeros((self.nPart, sys.T))
        xh = np.zeros((sys.T, 1))
        sh = np.zeros((sys.T, 1))
        llp = 0.0

        p[:, 0] = self.xo
        s[:, 0] = self.so

        for tt in range(0, sys.T):
            if tt != 0:

                # Resample (if needed by ESS criteria)
                if ((np.sum(w[:, tt - 1]**2))**(-1) <
                    (self.nPart * self.resampFactor)):

                    if self.resamplingType == "systematic":
                        nIdx = self.resampleSystematic(w[:, tt - 1], par)
                    elif self.resamplingType == "multinomial":
                        nIdx = self.resampleMultinomial(w[:, tt - 1], par)
                    else:
                        nIdx = self.resample(w[:, tt - 1], par)

                    nIdx = np.transpose(nIdx.astype(int))
                else:
                    nIdx = np.arange(0, self.nPart)

                # Propagate
                s[:, tt] = sys.h(p[nIdx, tt - 1], data.u[tt - 1],
                                 s[nIdx, tt - 1], tt - 1)
                p[:, tt] = sys.f(
                    p[nIdx, tt - 1], data.u[tt - 1], s[:, tt], data.y[tt - 1],
                    tt - 1) + sys.fn(p[nIdx, tt - 1], s[:, tt], data.y[tt - 1],
                                     tt - 1) * np.random.randn(1, self.nPart)
                a[:, tt] = nIdx

            # Calculate weights
            if (self.weightdist == "bernoulli"):
                w[:, tt] = bernoulli.logpmf(
                    data.y[tt], sys.g(p[:, tt], data.u[tt], s[:, tt], tt))
            elif (self.weightdist == "poisson"):
                w[:, tt] = poisson.logpmf(
                    data.y[tt], sys.g(p[:, tt], data.u[tt], s[:, tt], tt))
            else:
                w[:, tt] = loguninormpdf(
                    data.y[tt], sys.g(p[:, tt], data.u[tt], s[:, tt], tt),
                    sys.gn(p[:, tt], s[:, tt], tt))

            wmax = np.max(w[:, tt])
            w[:, tt] = np.exp(w[:, tt] - wmax)

            # Estimate log-likelihood
            llp += wmax + np.log(np.sum(w[:, tt])) - np.log(self.nPart)

            # Estimate state
            w[:, tt] /= np.sum(w[:, tt])
            xh[tt] = np.sum(w[:, tt] * p[:, tt])
            sh[tt] = np.sum(w[:, tt] * s[:, tt])

        self.xhatf = xh
        self.shatf = sh
        self.ll = llp
        self.w = w
        self.a = a
        self.p = p
        self.s = s