Exemplo n.º 1
0
    def log_likelihood_given_activation(self, psi):
        p   = logistic(psi)
        p   = np.clip(p, 1e-32, 1-1e-32)

        return self.log_normalizer(self.X, self.xi) \
               + self.X * np.log(p) \
               + self.xi * np.log(1-p)
Exemplo n.º 2
0
    def _resample_xi_slicesample(self, augmented_data_list):
        # Compute the activations
        Ss = np.vstack([d["S"] for d in augmented_data_list])
        psis = np.vstack(
            [self.activation.compute_psi(d) for d in augmented_data_list])

        # Resample xi using slice sampling
        # p(\xi | \psi, s) \propto p(\xi) * p(s | \xi, \psi)
        for n in xrange(self.N):
            Sn = Ss[:, n]
            psin = psis[:, n]
            pn = logistic(psin)
            pn = np.clip(pn, 1e-32, 1 - 1e-32)

            def _log_prob_xin(xin):
                lp = 0

                # Compute the prior of \xi_n ~ 1 + Gamma(alpha, beta)
                assert xin > 1
                lp += (self.alpha_xi -
                       1) * np.log(xin - 1) - self.beta_xi * (xin - 1)

                # Compute the likelihood of \xi_n, NB(S_{t,n} | xi_n, \psi_{t,n})
                lp += (gammaln(Sn + xin) - gammaln(xin)).sum()
                lp += (xin * np.log(1 - pn)).sum()

                return lp

            # Slice sample \xi_n
            self.xi[0, n], _ = slicesample(self.xi[0, n],
                                           _log_prob_xin,
                                           lb=1 + 1e-5,
                                           ub=100)
Exemplo n.º 3
0
    def _resample_A(self, n_pre, n_post, stats):
        """
        Resample the presence or absence of a connection (synapse)
        :param n_pre:
        :param n_post:
        :param stats:
        :return:
        """
        mu_w                         = self.network.weights.Mu[n_pre, n_post, :]
        Sigma_w                      = self.network.weights.Sigma[n_pre, n_post, :, :]
        post_mu, post_cov, post_prec = stats
        rho                          = self.network.adjacency.P[n_pre, n_post]

        # Compute the log odds ratio
        logdet_prior_cov = np.linalg.slogdet(Sigma_w)[1]
        logdet_post_cov  = np.linalg.slogdet(post_cov)[1]
        logit_rho_post   = logit(rho) \
                           + 0.5 * (logdet_post_cov - logdet_prior_cov) \
                           + 0.5 * post_mu.dot(post_prec).dot(post_mu) \
                           - 0.5 * mu_w.dot(np.linalg.solve(Sigma_w, mu_w))

        rho_post = logistic(logit_rho_post)

        # Sample the binary indicator of an edge
        self.A[n_pre, n_post] = np.random.rand() < rho_post
Exemplo n.º 4
0
    def _resample_xi_discrete(self, augmented_data_list, xi_max=20):
        from pybasicbayes.util.stats import sample_discrete_from_log
        from pyglm.internals.negbin import nb_likelihood_xi

        # Resample xi with uniform prior over discrete set
        # p(\xi | \psi, s) \propto p(\xi) * p(s | \xi, \psi)
        lp_xis = np.zeros((self.N, xi_max))
        for d in augmented_data_list:
            psi = self.activation.compute_psi(d)
            for n in xrange(self.N):
                Sn = d["S"][:, n].copy()
                psin = psi[:, n].copy()
                pn = logistic(psin)
                pn = np.clip(pn, 1e-32, 1 - 1e-32)

                xis = np.arange(1, xi_max + 1).astype(np.float)
                lp_xi = np.zeros(xi_max)
                nb_likelihood_xi(Sn, pn, xis, lp_xi)
                lp_xis[n] += lp_xi

                # lp_xi2 = (gammaln(Sn[:,None]+xis[None,:]) - gammaln(xis[None,:])).sum(0)
                # lp_xi2 += (xis[None,:] * np.log(1-pn)[:,None]).sum(0)
                #
                # assert np.allclose(lp_xi, lp_xi2)

        for n in xrange(self.N):
            self.xi[0, n] = xis[sample_discrete_from_log(lp_xis[n])]
Exemplo n.º 5
0
    def _resample_xi_slicesample(self, augmented_data_list):
        # Compute the activations
        Ss   = np.vstack([d["S"] for d in augmented_data_list])
        psis = np.vstack([self.activation.compute_psi(d) for d in augmented_data_list])

        # Resample xi using slice sampling
        # p(\xi | \psi, s) \propto p(\xi) * p(s | \xi, \psi)
        for n in xrange(self.N):
            Sn   = Ss[:,n]
            psin = psis[:,n]
            pn   = logistic(psin)
            pn   = np.clip(pn, 1e-32, 1-1e-32)

            def _log_prob_xin(xin):
                lp = 0

                # Compute the prior of \xi_n ~ 1 + Gamma(alpha, beta)
                assert xin > 1
                lp += (self.alpha_xi-1) * np.log(xin-1) - self.beta_xi * (xin-1)

                # Compute the likelihood of \xi_n, NB(S_{t,n} | xi_n, \psi_{t,n})
                lp += (gammaln(Sn+xin) - gammaln(xin)).sum()
                lp += (xin * np.log(1-pn)).sum()

                return lp

            # Slice sample \xi_n
            self.xi[0,n], _ = slicesample(self.xi[0,n], _log_prob_xin, lb=1+1e-5, ub=100)
Exemplo n.º 6
0
    def _resample_xi_discrete(self, augmented_data_list, xi_max=20):
        from pybasicbayes.util.stats import sample_discrete_from_log
        from pyglm.internals.negbin import nb_likelihood_xi

        # Resample xi with uniform prior over discrete set
        # p(\xi | \psi, s) \propto p(\xi) * p(s | \xi, \psi)
        lp_xis = np.zeros((self.N, xi_max))
        for d in augmented_data_list:
            psi = self.activation.compute_psi(d)
            for n in xrange(self.N):
                Sn   = d["S"][:,n].copy()
                psin = psi[:,n].copy()
                pn   = logistic(psin)
                pn   = np.clip(pn, 1e-32, 1-1e-32)

                xis = np.arange(1, xi_max+1).astype(np.float)
                lp_xi = np.zeros(xi_max)
                nb_likelihood_xi(Sn, pn, xis, lp_xi)
                lp_xis[n] += lp_xi

                # lp_xi2 = (gammaln(Sn[:,None]+xis[None,:]) - gammaln(xis[None,:])).sum(0)
                # lp_xi2 += (xis[None,:] * np.log(1-pn)[:,None]).sum(0)
                #
                # assert np.allclose(lp_xi, lp_xi2)

        for n in xrange(self.N):
            self.xi[0,n] = xis[sample_discrete_from_log(lp_xis[n])]
Exemplo n.º 7
0
def geweke_test(N_samples=10000):
    # Sample from the prior
    z0 = mu_z + np.sqrt(sigma_z) * np.random.randn()
    x0 = np.random.rand() < logistic(z0 / T)

    # Collect samples from the joint
    xs = [x0]
    zs = [z0]

    for smpl in progprint_xrange(1, N_samples):
        x, z = resample_sweep(xs[smpl - 1], zs[smpl - 1])
        xs.append(x)
        zs.append(z)

    # Make Q-Q plots of the samples
    fig = plt.figure()
    z_ax = fig.add_subplot(121)
    z_dist = norm(mu_z, np.sqrt(sigma_z))
    probplot(np.array(zs), dist=z_dist, plot=z_ax)

    fig.add_subplot(122)
    _, bins, _ = plt.hist(zs, 20, normed=True, alpha=0.2)
    bincenters = 0.5 * (bins[1:] + bins[:-1])
    plt.plot(bincenters, z_dist.pdf(bincenters), 'r--', linewidth=1)
    plt.show()
Exemplo n.º 8
0
    def _meanfieldupdate_A(self, n_pre, n_post, stats, E_net, stepsize=1.0):
        """
        Mean field update the presence or absence of a connection (synapse)
        :param n_pre:
        :param n_post:
        :param stats:
        :return:
        """
        # TODO: A joint factor for mu and Sigma could yield E_mu_dot_Sigma under the priro
        mf_post_mu, mf_post_cov, mf_post_prec = stats
        E_ln_rho, E_ln_notrho, E_mu, E_Sigma_inv, E_logdet_Sigma = E_net

        E_ln_rho       = E_ln_rho[n_pre, n_post]
        E_ln_notrho    = E_ln_notrho[n_pre, n_post]
        E_mu           = E_mu[n_pre, n_post,:]
        E_Sigma_inv    = E_Sigma_inv[n_pre, n_post,:,:]
        E_logdet_Sigma = E_logdet_Sigma[n_pre, n_post]

        # Compute the log odds ratio
        logdet_prior_cov = E_logdet_Sigma
        logdet_post_cov  = np.linalg.slogdet(mf_post_cov)[1]
        logit_rho_post   = E_ln_rho - E_ln_notrho \
                           + 0.5 * (logdet_post_cov - logdet_prior_cov) \
                           + 0.5 * mf_post_mu.dot(mf_post_prec).dot(mf_post_mu) \
                           - 0.5 * E_mu.dot(E_Sigma_inv.dot(E_mu))

        rho_post = logistic(logit_rho_post)

        # Mean field update the binary indicator of an edge
        self.mf_p[n_pre, n_post] = (1.0 - stepsize) * self.mf_p[n_pre, n_post] \
                                   + stepsize * rho_post
Exemplo n.º 9
0
def update(model):
    model.resample_model()
    z_inf = model.states_list[0].stateseq
    C_inf = model.C
    psi_inf = z_inf.dot(C_inf.T)
    p_inf = logistic(psi_inf)
    return model.log_likelihood(), p_inf
Exemplo n.º 10
0
    def log_likelihood_given_activation(self, S, psi):
        p = logistic(psi)
        p = np.clip(p, 1e-32, 1 - 1e-32)
        xi = self.xi

        return self.log_normalizer(S, xi) \
               + S * np.log(p) \
               + xi * np.log(1-p)
Exemplo n.º 11
0
    def log_likelihood_given_activation(self, S, psi):
        p = logistic(psi)
        p = np.clip(p, 1e-32, 1-1e-32)
        xi = self.xi

        return self.log_normalizer(S, xi) \
               + S * np.log(p) \
               + xi * np.log(1-p)
Exemplo n.º 12
0
    def _log_likelihood_given_activation(self, S, psi, obs_params=None):
        p = logistic(psi)
        p = np.clip(p, 1e-32, 1-1e-32)

        xi = obs_params if obs_params is not None else self.xi

        return self.log_normalizer(S, xi) \
               + S * np.log(p) \
               + xi * np.log(1-p)
Exemplo n.º 13
0
    def _log_likelihood_given_activation(self, S, psi, obs_params=None):
        p = logistic(psi)
        p = np.clip(p, 1e-32, 1 - 1e-32)

        xi = obs_params if obs_params is not None else self.xi

        return self.log_normalizer(S, xi) \
               + S * np.log(p) \
               + xi * np.log(1-p)
Exemplo n.º 14
0
    def get_vlb(self, augmented_data):
        # 1. E[ \ln p(s | \psi) ]
        # Compute this with Monte Carlo integration over \psi
        # Psis = self.activation.mf_sample_activation(augmented_data, N_samples=1)
        Psis = self.activation.mf_sample_marginal_activation(augmented_data, N_samples=10)
        ps = logistic(Psis)
        E_lnp = np.log(ps).mean(axis=0)
        E_ln_notp = np.log(1-ps).mean(axis=0)

        vlb = self.expected_log_likelihood(augmented_data,
                                           (E_lnp, E_ln_notp)).sum()
        return vlb
Exemplo n.º 15
0
    def get_vlb(self, augmented_data):
        # 1. E[ \ln p(s | \psi) ]
        # Compute this with Monte Carlo integration over \psi
        # Psis = self.activation.mf_sample_activation(augmented_data, N_samples=1)
        Psis = self.activation.mf_sample_marginal_activation(augmented_data,
                                                             N_samples=10)
        ps = logistic(Psis)
        E_lnp = np.log(ps).mean(axis=0)
        E_ln_notp = np.log(1 - ps).mean(axis=0)

        vlb = self.expected_log_likelihood(augmented_data,
                                           (E_lnp, E_ln_notp)).sum()
        return vlb
Exemplo n.º 16
0
    def rvs(self, X=None, size=[], psi=None):
        if psi is None:
            if X is None:
                assert isinstance(size, int)
                X = npr.randn(size, self.N*self.B)

            X = self._flatten_X(X)
            p = self.mean(X)
        else:
            p = logistic(psi)

        y = npr.rand(*p.shape) < p

        return y
Exemplo n.º 17
0
    def rvs(self, X=None, size=[], psi=None):
        if psi is None:
            if X is None:
                assert isinstance(size, int)
                X = npr.randn(size, self.N * self.B)

            X = self._flatten_X(X)
            p = self.mean(X)
        else:
            p = logistic(psi)

        y = npr.rand(*p.shape) < p

        return y
Exemplo n.º 18
0
    def compute_rate(self, augmented_data):
        """
        Compute the rate of the augmented data

        :param index:   Which dataset to compute the rate of
        :param ns:      Which neurons to compute the rate of
        :return:
        """
        F = augmented_data["F"]
        R = np.zeros((augmented_data["T"], self.N))
        for n in xrange(self.N):
            Xn = F.dot(self.weights[n,:])
            Xn += self.bias[n]
            R[:,n] = logistic(Xn)

        return R
Exemplo n.º 19
0
    def compute_rate(self, augmented_data):
        """
        Compute the rate of the augmented data

        :param index:   Which dataset to compute the rate of
        :param ns:      Which neurons to compute the rate of
        :return:
        """
        F = augmented_data["F"]
        R = np.zeros((augmented_data["T"], self.N))
        for n in xrange(self.N):
            Xn = F.dot(self.weights[n, :])
            Xn += self.bias[n]
            R[:, n] = logistic(Xn)

        return R
Exemplo n.º 20
0
    def _grad_neg_log_posterior(self, x, n):
        """
        Helper function to compute the negative log likelihood
        """
        assert x.shape == (1 + self.N * self.B, )
        self.b[n] = x[0]
        self.weights[n, :] = x[1:]

        # Compute the gradient
        d_ll_d_x = np.zeros_like(x)

        for data in self.data_list:
            S = data["S"][:, n]
            F = data["F"]
            X = self.compute_activation(data, n=n)
            P = logistic(X)
            P = np.clip(P, 1e-32, 1 - 1e-32)

            # Compute each term in the gradient
            d_ll_d_p = S / P - self.xi / (1 - P)  # 1xT
            d_p_d_psi = dlogistic_dx(P)  # technically TxT diagonal
            d_psi_d_b = 1.0  # technically Tx1
            d_psi_d_w = F  # TxNB

            # Multiply em up!
            d_ll_d_x[0] += (d_ll_d_p * d_p_d_psi * d_psi_d_b).sum()
            d_ll_d_x[1:] += (d_ll_d_p * d_p_d_psi).dot(d_psi_d_w)

        # Compute gradient of the log prior
        d_lp_d_x = np.zeros_like(x)
        d_lp_d_x[1:] += -self.lmbda * np.sign(self.weights[n, :])

        d_lpost_d_x = d_ll_d_x + d_lp_d_x

        # Normalize by T
        d_lpost_d_x /= self.T

        # If self connections are disallowed, remove their gradient
        if not self.allow_self_connections:
            offset = 1 + n * self.B
            d_lpost_d_x[offset:offset + self.B] = 0

        return -d_lpost_d_x
Exemplo n.º 21
0
def sample_spiketrain(T, N, basis, A, W, b):
    L = basis.L
    psi = np.ones((T+L,N)) * b
    S = np.zeros((T+L,N))
    H = basis.basis.ravel()[:,None,None] * W[None,:,:] * A[None,:,:]

    for t in xrange(T):
        # Sample spikes for t-th time bin
        # S[t] =  np.random.negative_binomial(xi, 1.-logistic(psi[t]))
        S[t] =  np.random.rand(N) < logistic(psi[t])

        # Compute change in activation via tensor product
        dpsi = np.tensordot( H, S[t,:], axes=([1, 0]))
        psi[t:t+L,:] += dpsi

    S = S[:T]
    psi = psi[:T]

    return S,psi
Exemplo n.º 22
0
    def _grad_neg_log_posterior(self, x, n):
        """
        Helper function to compute the negative log likelihood
        """
        assert x.shape == (1+self.N * self.B,)
        self.b[n] = x[0]
        self.weights[n,:] = x[1:]

        # Compute the gradient
        d_ll_d_x = np.zeros_like(x)

        for data in self.data_list:
            S = data["S"][:,n]
            F = data["F"]
            X = self.compute_activation(data, n=n)
            P = logistic(X)
            P = np.clip(P, 1e-32, 1-1e-32)

            # Compute each term in the gradient
            d_ll_d_p  = S / P - self.xi / (1-P)     # 1xT
            d_p_d_psi = dlogistic_dx(P)             # technically TxT diagonal
            d_psi_d_b = 1.0                         # technically Tx1
            d_psi_d_w = F                           # TxNB

            # Multiply em up!
            d_ll_d_x[0]  += (d_ll_d_p * d_p_d_psi * d_psi_d_b).sum()
            d_ll_d_x[1:] += (d_ll_d_p * d_p_d_psi).dot(d_psi_d_w)

        # Compute gradient of the log prior
        d_lp_d_x = np.zeros_like(x)
        d_lp_d_x[1:] += -self.lmbda * np.sign(self.weights[n,:])

        d_lpost_d_x = d_ll_d_x + d_lp_d_x

        # Normalize by T
        d_lpost_d_x /= self.T

        # If self connections are disallowed, remove their gradient
        if not self.allow_self_connections:
            offset = 1 + n * self.B
            d_lpost_d_x[offset:offset+self.B] = 0

        return -d_lpost_d_x
Exemplo n.º 23
0
    def log_likelihood(self, augmented_data=None, n=None):
        """
        Compute the log likelihood of the augmented data
        :return:
        """
        if augmented_data is None:
            datas = self.data_list
        else:
            datas = [augmented_data]

        ll = 0
        for data in datas:
            S = data["S"][:,n] if n is not None else data["S"]
            Z = self.log_normalizer(S)
            X = self.compute_activation(data, n=n)
            P = logistic(X)
            P = np.clip(P, 1e-32, 1-1e-32)
            ll += (Z + S * np.log(P) + self.xi * np.log(1-P)).sum()

        return ll
Exemplo n.º 24
0
    def log_likelihood(self, augmented_data=None, n=None):
        """
        Compute the log likelihood of the augmented data
        :return:
        """
        if augmented_data is None:
            datas = self.data_list
        else:
            datas = [augmented_data]

        ll = 0
        for data in datas:
            S = data["S"][:, n] if n is not None else data["S"]
            Z = self.log_normalizer(S)
            X = self.compute_activation(data, n=n)
            P = logistic(X)
            P = np.clip(P, 1e-32, 1 - 1e-32)
            ll += (Z + S * np.log(P) + self.xi * np.log(1 - P)).sum()

        return ll
Exemplo n.º 25
0
    def _resample_A(self, n_pre, n_post, stats):
        """
        Resample the presence or absence of a connection (synapse)
        :param n_pre:
        :param n_post:
        :param stats:
        :return:
        """
        prior_mu                         = self.network.weights.Mu[n_pre, n_post, :]
        prior_cov                       = self.network.weights.Sigma[n_pre, n_post, :, :]
        prior_sigmasq = np.diag(prior_cov)

        post_mu, post_cov, post_prec = stats
        post_sigmasq = np.diag(post_cov)
        rho                          = self.network.adjacency.P[n_pre, n_post]

        # Compute the log odds ratio
        logdet_prior_cov = 0.5*np.log(prior_sigmasq).sum()
        logdet_post_cov  = 0.5*np.log(post_sigmasq).sum()
        logit_rho_post   = logit(rho) \
                           + 0.5 * (logdet_post_cov - logdet_prior_cov) \
                           + 0.5 * post_mu.dot(post_prec).dot(post_mu) \
                           - 0.5 * prior_mu.dot(np.linalg.solve(prior_cov, prior_mu))

        # The truncated normal prior introduces another term,
        # the ratio of the normalizers of the truncated distributions
        logit_rho_post += np.log(normal_cdf((self.ub-post_mu) / np.sqrt(post_sigmasq)) -
                                 normal_cdf((self.lb-post_mu) / np.sqrt(post_sigmasq))).sum()

        logit_rho_post -= np.log(normal_cdf((self.ub-prior_mu) / np.sqrt(prior_sigmasq)) -
                                 normal_cdf((self.lb-prior_mu) / np.sqrt(prior_sigmasq))).sum()

        rho_post = logistic(logit_rho_post)

        # Sample the binary indicator of an edge
        self.A[n_pre, n_post] = np.random.rand() < rho_post
Exemplo n.º 26
0
 def log_likelihood_given_activation(self, S, psi):
     p = logistic(psi)
     p = np.clip(p, 1e-32, 1-1e-32)
     ll = (S * np.log(p) + (1-S) * np.log(1-p))
     return ll
Exemplo n.º 27
0
 def mean(self, X):
     psi = self.activation(X)
     return logistic(psi)
Exemplo n.º 28
0
 def expected_S(self, Psi):
     p = logistic(Psi)
     return p
Exemplo n.º 29
0
 def rvs(self, Psi):
     p = logistic(Psi)
     return np.random.rand(*p.shape) < p
Exemplo n.º 30
0
 def rvs(self, Psi):
     p = logistic(Psi)
     p = np.clip(p, 1e-32, 1 - 1e-32)
     return np.random.negative_binomial(self.xi, 1 - p)
Exemplo n.º 31
0
    def _log_likelihood_given_activation(self, S, psi, obs_params=None):
        p = logistic(psi)
        p = np.clip(p, 1e-32, 1 - 1e-32)

        ll = (S * np.log(p) + (1 - S) * np.log(1 - p))
        return ll
Exemplo n.º 32
0
 def rvs(self, psi):
     p = logistic(psi)
     return (np.random.rand(*p.shape) < p).astype(np.float)
Exemplo n.º 33
0
 def mean(self, X):
     psi = self.activation(X)
     return logistic(psi)
Exemplo n.º 34
0
def resample_x(z):
    # Resample x from its (scaled) Bernoulli lkhd
    p = logistic(z / T)
    x_smpl = np.random.rand() < p
    return x_smpl
Exemplo n.º 35
0
 def expected_S(self, Psi):
     p = logistic(Psi)
     return p
Exemplo n.º 36
0
D_out, D_in = C.shape


###################
#  generate data  #
###################

truemodel = NegativeBinomialLDS(
    init_dynamics_distn=Gaussian(mu_init, sigma_init),
    dynamics_distn=AutoRegression(A=A,sigma=sigma_states),
    emission_distn=PGEmissions(D_out, D_in, C=C))

T = 2000
data, z_true = truemodel.generate(T)
psi_true = z_true.dot(C.T)
p_true = logistic(psi_true)



###############
#  fit model  #
###############
model = NegativeBinomialLDS(
    init_dynamics_distn=Gaussian(mu_0=np.zeros(D_in), sigma_0=np.eye(D_in),
                                 kappa_0=1.0, nu_0=D_in+1),
    dynamics_distn=AutoRegression(
            sigma=sigma_states, nu_0=D_in+1, S_0=D_in*np.eye(D_in), M_0=np.zeros((D_in, D_in)), K_0=D_in*np.eye(D_in)),
    emission_distn=PGEmissions(D_out, D_in, C=C, sigmasq_C=1.0))
model.add_data(data.X)

N_samples = 1000
Exemplo n.º 37
0
    def log_likelihood_given_activation(self, psi):
        p   = logistic(psi)
        p   = np.clip(p, 1e-16, 1-1e-16)

        ll = (self.X * np.log(p) + (1-self.X) * np.log(1-p))
        return ll
Exemplo n.º 38
0
 def rvs(self, Psi):
     p = logistic(Psi)
     p = np.clip(p, 1e-32, 1-1e-32)
     return np.random.negative_binomial(self.xi, 1-p)
Exemplo n.º 39
0
 def rvs(self, psi):
     p = logistic(psi)
     p = np.clip(p, 1e-32, 1-1e-32)
     return np.random.negative_binomial(self.xi, 1-p).astype(np.float)
Exemplo n.º 40
0
 def expected_S(self, Psi):
     p = logistic(Psi)
     p = np.clip(p, 1e-32, 1 - 1e-32)
     return self.xi * p / (1 - p)
Exemplo n.º 41
0
 def expected_S(self, Psi):
     p = logistic(Psi)
     p = np.clip(p, 1e-32, 1-1e-32)
     return self.xi * p / (1-p)
Exemplo n.º 42
0
 def rvs(self, Psi):
     p = logistic(Psi)
     return np.random.rand(*p.shape) < p