コード例 #1
0
 def _posterior_dist(self, X, y, A):
     '''
     Uses Laplace approximation for calculating posterior distribution
     '''
     f = lambda w: _logistic_cost_grad(X, y, w, A)
     w_init = np.random.random(X.shape[1])
     Mn = fmin_l_bfgs_b(f,
                        x0=w_init,
                        pgtol=self.tol_solver,
                        maxiter=self.n_iter_solver)[0]
     Xm = np.dot(X, Mn)
     s = expit(Xm)
     B = logistic._pdf(Xm)  # avoids underflow
     S = np.dot(X.T * B, X)
     np.fill_diagonal(S, np.diag(S) + A)
     t_hat = y - s
     cholesky = True
     # try using Cholesky , if it fails then fall back on pinvh
     try:
         R = np.linalg.cholesky(S)
         Sn = solve_triangular(R,
                               np.eye(A.shape[0]),
                               check_finite=False,
                               lower=True)
     except LinAlgError:
         Sn = pinvh(S)
         cholesky = False
     return [Mn, Sn, B, t_hat, cholesky]
コード例 #2
0
 def _posterior_dist(self,X,y,A,intercept_prior):
     '''
     Uses Laplace approximation for calculating posterior distribution
     '''
     if self.solver == 'lbfgs_b':
         f  = lambda w: _logistic_cost_grad(X,y,w,A,intercept_prior)
         w_init  = np.random.random(X.shape[1])
         Mn      = fmin_l_bfgs_b(f, x0 = w_init, pgtol = self.tol_solver,
                                 maxiter = self.n_iter_solver)[0]
         Xm      = np.dot(X,Mn)
         s       = expit(Xm)
         B       = logistic._pdf(Xm) # avoids underflow
         S       = np.dot(X.T*B,X)
         np.fill_diagonal(S, np.diag(S) + A)
         t_hat   = Xm + (y - s) / B
         Sn      = pinvh(S)
     elif self.solver == 'newton_cg':
         # TODO: Implement Newton-CG
         raise NotImplementedError(('Newton Conjugate Gradient optimizer '
                                    'is not currently supported'))
     return [Mn,Sn,B,t_hat]
コード例 #3
0
 def _posterior_dist(self,X,y,A,intercept_prior):
     '''
     Uses Laplace approximation for calculating posterior distribution
     '''
     if self.solver == 'lbfgs_b':
         f  = lambda w: _logistic_cost_grad(X,y,w,A,intercept_prior)
         w_init  = np.random.random(X.shape[1])
         Mn      = fmin_l_bfgs_b(f, x0 = w_init, pgtol = self.tol_solver,
                                 maxiter = self.n_iter_solver)[0]
         Xm      = np.dot(X,Mn)
         s       = expit(Xm)
         B       = logistic._pdf(Xm) # avoids underflow
         S       = np.dot(X.T*B,X)
         np.fill_diagonal(S, np.diag(S) + A)
         t_hat   = Xm + (y - s) / B
         Sn      = pinvh(S)
     elif self.solver == 'newton_cg':
         # TODO: Implement Newton-CG
         raise NotImplementedError(('Newton Conjugate Gradient optimizer '
                                    'is not currently supported'))
     return [Mn,Sn,B,t_hat]
コード例 #4
0
 def _posterior_dist(self,X,y,A):
     '''
     Uses Laplace approximation for calculating posterior distribution
     '''
     f         = lambda w: _logistic_cost_grad(X,y,w,A)
     w_init    = np.random.random(X.shape[1])
     Mn        = fmin_l_bfgs_b(f, x0 = w_init, pgtol = self.tol_solver,
                             maxiter = self.n_iter_solver)[0]
     Xm        = np.dot(X,Mn)
     s         = expit(Xm)
     B         = logistic._pdf(Xm) # avoids underflow
     S         = np.dot(X.T*B,X)
     np.fill_diagonal(S, np.diag(S) + A)
     t_hat     = y - s
     cholesky  = True
     # try using Cholesky , if it fails then fall back on pinvh
     try:
         R        = np.linalg.cholesky(S)
         Sn       = solve_triangular(R,np.eye(A.shape[0]),
                                     check_finite=False,lower=True)
     except LinAlgError:
         Sn       = pinvh(S)
         cholesky = False
     return [Mn,Sn,B,t_hat,cholesky]
コード例 #5
0
    def evolve(self):
        """TODO"""
        if self.learn:
            # Compute number of active synapses.
            np.add.reduce(2 * (expit(self.scale_w * self.w) - 0.5),
                          axis=1,
                          keepdims=True,
                          out=self.n_syn)

            # Compute the prior.
            sigmoid = expit(self.lambd * (self.n_syn_max - self.n_syn))
            grad_sigmoid = logistic._pdf(self.scale_w * self.w)
            grad_sigmoid[self.w > self.grad_sigmoid_clip] = 0
            np.multiply(-self.lambd * self.scale_w * (1 - sigmoid),
                        grad_sigmoid, self.prior)

            # Compute the likelihood.
            if self.cichon_gan_rule:
                if self.destination.branch.branch_dynamics:
                    np.multiply(
                        self.scale_likelihood *
                        np.heaviside(self.destination.branch.pla, 0),
                        self.tr_pre_LTP.val, self.likelihood)
                else:
                    np.multiply(
                        self.scale_likelihood / self.destination.branch.v_thr *
                        self.destination.branch.pla, self.tr_pre_LTP.val,
                        self.likelihood)
                np.add(
                    -self.gamma * self.destination.branch.pla_on *
                    self.tr_pre_LTD.val, self.likelihood, self.likelihood)
            else:
                if self.destination.branch.branch_dynamics:
                    np.multiply(np.heaviside(self.destination.branch.pla, 0),
                                (self.tr_pre.val - self.gamma *
                                 (1 - self.tr_pre.val)), self.likelihood)
                else:
                    np.multiply(
                        self.destination.branch.pla /
                        self.destination.branch.v_thr,
                        (self.tr_pre.val - self.gamma * (1 - self.tr_pre.val)),
                        self.likelihood)

            # Add contribution from prior and likelihood to active synapses.
            if self.cichon_gan_rule:
                np.add(self.theta,
                       np.multiply(
                           self.eta,
                           np.add(self.scale_prior * self.prior,
                                  self.likelihood)),
                       self.theta,
                       where=self.c == 1)
            else:
                np.add(self.theta,
                       np.multiply(
                           self.eta,
                           np.add(self.scale_prior * self.prior,
                                  self.scale_likelihood * self.likelihood)),
                       self.theta,
                       where=self.c == 1)

            # Add noise to all synapses.
            # W_t+dt - W_t is normally distributed with mean 0 and variance dt (N(0, dt)). So
            # rng.normal(loc=0, scale=np.sqrt(dt))
            np.add(
                self.theta,
                self.scale_noise * core.kernel.rng.normal(
                    loc=0, scale=np.sqrt(simulation_timestep), size=self.size),
                self.theta)

            # Clip weights and parameters.
            np.clip(self.theta, self.theta_min, self.theta_max, self.theta)
            np.maximum(0, self.theta, self.w)
            np.heaviside(self.theta, 0, self.c)