Esempio n. 1
0
def find_distn(lgbinwidth, numlgbins, transient, N, M_t, M_i):
    totspkhist = zeros((numlgbins, 1))
    skiptime = transient * ms
    skipbin = int(ceil(skiptime / lgbinwidth))
    for i in xrange(numlgbins):
        step_start = (i) * lgbinwidth  #30*ms #
        step_end = (i + 1) * lgbinwidth  #30*ms #
        for j in xrange(N):
            #spks=where(logical_and(M[j]>step_start, M[j]<step_end))
            #totspkhist[i]+=len(M[j][spks])
            totspkhist[i] += len(M_i[logical_and(M_t > step_start,
                                                 M_t < step_end)])

    #totspkhist_1D=reshape(totspkhist,len(totspkhist))
    ###smooth plot first so thresholds work better
    #b,a=butter(3,0.4,'low')
    #totspkhist_smooth=filtfilt(b,a,totspkhist_1D)
    totspkhist_smooth = reshape(
        totspkhist, len(totspkhist)
    )  #here we took out the actual smoothing and left it as raw distn.

    #create distn based on hist, but skip first skiptime to cut out transient excessive spiking
    if max(totspkhist_smooth[skipbin:]) > 0:
        totspkdist_smooth = totspkhist_smooth / max(
            totspkhist_smooth[skipbin:])
    else:
        totspkdist_smooth = totspkhist_smooth
    totspkhist_list = [val for subl in totspkhist for val in subl]
    return [totspkhist, totspkdist_smooth, totspkhist_list]
Esempio n. 2
0
    def test_get_gaussian_2d(self):
        X = asarray([-1, 1])
        X = reshape(X, (len(X), 1))
        y = asarray([+1 if x >= 0 else -1 for x in X])
        covariance = SquaredExponentialCovariance(sigma=1, scale=1)
        likelihood = LogitLikelihood()
        gp = GaussianProcess(y, X, covariance, likelihood)
        laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
        
        f_mode, L, steps = laplace.find_mode_newton(return_full=True)
        gaussian = laplace.get_gaussian(f_mode, L)
        F = linspace(-10, 10, 20)
        D = zeros((len(F), len(F)))
        Q = array(D, copy=True)
        for i in range(len(F)):
            for j in range(len(F)):
                f = asarray([F[i], F[j]])
                D[i, j] = gp.log_posterior_unnormalised(f)
                Q[i, j] = gaussian.log_pdf(f.reshape(1, len(f)))
        
        subplot(1, 2, 1)
        pcolor(F, F, D)
        hold(True)
        plot(steps[:, 0], steps[:, 1])
        plot(f_mode[1], f_mode[0], 'mo', markersize=10)
        hold(False)
        colorbar()
        subplot(1, 2, 2)
        pcolor(F, F, Q)
        hold(True)
        plot(f_mode[1], f_mode[0], 'mo', markersize=10)
        hold(False)
        colorbar()
#        show()
        clf()
Esempio n. 3
0
    def test_mode_newton_2d(self):
        X = asarray([-1, 1])
        X = reshape(X, (len(X), 1))
        y = asarray([+1 if x >= 0 else -1 for x in X])
        covariance = SquaredExponentialCovariance(sigma=1, scale=1)
        likelihood = LogitLikelihood()
        gp = GaussianProcess(y, X, covariance, likelihood)
        laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
        
        f_mode, _, steps = laplace.find_mode_newton(return_full=True)
        F = linspace(-10, 10, 20)
        D = zeros((len(F), len(F)))
        for i in range(len(F)):
            for j in range(len(F)):
                f = asarray([F[i], F[j]])
                D[i, j] = gp.log_posterior_unnormalised(f)
           
        idx = unravel_index(D.argmax(), D.shape)
        empirical_max = asarray([F[idx[0]], F[idx[1]]])
        
        pcolor(F, F, D)
        hold(True)
        plot(steps[:, 0], steps[:, 1])
        plot(f_mode[1], f_mode[0], 'mo', markersize=10)
        hold(False)
        colorbar()
        clf()
#        show()
           
        self.assertLessEqual(norm(empirical_max - f_mode), 1)
 def test_2(self):
     kernel=GaussianKernel(sigma=2)
     X=reshape(arange(9.0), (3,3))
     K_chol, I, R, W=incomplete_cholesky(X, kernel, eta=0.999)
     K=kernel.kernel(X)
     
     self.assertEqual(len(I), 2)
     self.assertEqual(I[0], 0)
     self.assertEqual(I[1], 2)
     
     self.assertEqual(shape(K_chol), (len(I), len(I)))
     for i in range(len(I)):
         self.assertEqual(K_chol[i,i], K[I[i], I[i]])
         
     self.assertEqual(shape(R), (len(I), len(X)))
     self.assertAlmostEqual(R[0,0], 1.000000000000000)
     self.assertAlmostEqual(R[0,1],  0.034218118311666)
     self.assertAlmostEqual(R[0,2], 0.000001370959086)
     self.assertAlmostEqual(R[1,0], 0)
     self.assertAlmostEqual(R[1,1], 0.034218071400058)
     self.assertAlmostEqual(R[1,2], 0.999999999999060)
     
     self.assertEqual(shape(W), (len(I), len(X)))
     self.assertAlmostEqual(W[0,0], 1.000000000000000)
     self.assertAlmostEqual(W[0,1], 0.034218071400090)
     self.assertAlmostEqual(W[0,2], 0)
     self.assertAlmostEqual(W[1,0], 0)
     self.assertAlmostEqual(W[1,1], 0.034218071400090)
     self.assertAlmostEqual(W[1,2], 1)
 def test_1(self):
     kernel=GaussianKernel(sigma=10)
     X=reshape(arange(9.0), (3,3))
     K_chol, I, R, W=incomplete_cholesky(X, kernel, eta=0.8, power=2)
     K=kernel.kernel(X)
     
     self.assertEqual(len(I), 2)
     self.assertEqual(I[0], 0)
     self.assertEqual(I[1], 2)
     
     self.assertEqual(shape(K_chol), (len(I), len(I)))
     for i in range(len(I)):
         self.assertEqual(K_chol[i,i], K[I[i], I[i]])
         
     self.assertEqual(shape(R), (len(I), len(X)))
     self.assertAlmostEqual(R[0,0], 1.000000000000000)
     self.assertAlmostEqual(R[0,1], 0.763379494336853)
     self.assertAlmostEqual(R[0,2], 0.339595525644939)
     self.assertAlmostEqual(R[1,0], 0)
     self.assertAlmostEqual(R[1,1], 0.535992421608228)
     self.assertAlmostEqual(R[1,2], 0.940571570355992)
     
     self.assertEqual(shape(W), (len(I), len(X)))
     self.assertAlmostEqual(W[0,0], 1.000000000000000)
     self.assertAlmostEqual(W[0,1], 0.569858199525808)
     self.assertAlmostEqual(W[0,2], 0)
     self.assertAlmostEqual(W[1,0], 0)
     self.assertAlmostEqual(W[1,1], 0.569858199525808)
     self.assertAlmostEqual(W[1,2], 1)
Esempio n. 6
0
 def log_prior(self, f):
     """
     Computes log(p(f)), only possible do if K is psd
     
     f - 1d vector
     """
     assert (len(f) == len(self.y))
     f_2d = reshape(f, (1, len(f)))
     return self.get_gp_prior().log_pdf(f_2d)
Esempio n. 7
0
 def log_prior(self, f):
     """
     Computes log(p(f)), only possible do if K is psd
     
     f - 1d vector
     """
     assert(len(f) == len(self.y))
     f_2d = reshape(f, (1, len(f)))
     return self.get_gp_prior().log_pdf(f_2d)
 def construct_proposal(self, y):
     assert(len(shape(y)) == 1)
     m = MixtureDistribution(self.distribution.dimension, self.num_eigen)
     m.mixing_proportion = Discrete((self.eigvalues + 1) / (sum(self.eigvalues) + self.num_eigen))
     # print "current mixing proportion: ", m.mixing_proportion.omega
     for ii in range(self.num_eigen):
         L = sqrt(self.dwscale[ii] * self.eigvalues[ii]) * reshape(self.eigvectors[:, ii], (self.distribution.dimension, 1))
         m.components[ii] = Gaussian(y, L, is_cholesky=True, ell=1)
     # Z=m.sample(1000).samples
     # Visualise.plot_data(Z)
     return m
Esempio n. 9
0
def ciBatchMeans(M, N, k):
    sim = simulateLindleyEfficient(lam, mu, M * N + k)

    # throw away the first k observations, and divide the rest into
    # subruns of length N each
    run = sim[k:(M * N + k)]
    p = reshape(run, (M, N))
    sample = mean(p, axis=0)  # take row means
    meanW = mean(sample)
    varW = var(sample)
    ci = meanW - 1.96 * sqrt(varW / M), meanW + 1.96 * sqrt(varW / M)
    return ci
    def assert_file_matrix(self, filename, M):
        try:
            with open(filename):
                m = loadtxt(filename)

                # python loads vectors as 1d-arrays, but we want 2d-col-vectors
                if len(shape(m)) == 1:
                    m = reshape(m, (len(m), 1))

                self.assertEqual(M.shape, m.shape)
                self.assertLessEqual(norm(m - M), 1e-5)
                return True
        except IOError:
            return False
Esempio n. 11
0
    def test_log_mean_exp(self):
        X = asarray([-1, 1])
        X = reshape(X, (len(X), 1))
        y = asarray([+1. if x >= 0 else -1. for x in X])
        covariance = SquaredExponentialCovariance(sigma=1, scale=1)
        likelihood = LogitLikelihood()
        gp = GaussianProcess(y, X, covariance, likelihood)
        laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
        proposal = laplace.get_gaussian()

        n = 200
        prior = gp.get_gp_prior()
        samples = proposal.sample(n).samples

        log_likelihood = asarray([gp.log_likelihood(f) for f in samples])
        log_prior = prior.log_pdf(samples)
        log_proposal = proposal.log_pdf(samples)

        X = log_likelihood + log_prior - log_proposal

        a = log(mean(exp(X)))
        b = GPTools.log_mean_exp(X)

        self.assertLessEqual(a - b, 1e-5)
Esempio n. 12
0
 def test_log_mean_exp(self):
     X = asarray([-1, 1])
     X = reshape(X, (len(X), 1))
     y = asarray([+1. if x >= 0 else -1. for x in X])
     covariance = SquaredExponentialCovariance(sigma=1, scale=1)
     likelihood = LogitLikelihood()
     gp = GaussianProcess(y, X, covariance, likelihood)
     laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
     proposal=laplace.get_gaussian()
     
     n=200
     prior = gp.get_gp_prior()
     samples = proposal.sample(n).samples
     
     log_likelihood=asarray([gp.log_likelihood(f) for f in samples])
     log_prior = prior.log_pdf(samples)
     log_proposal = proposal.log_pdf(samples)
     
     X=log_likelihood+log_prior-log_proposal
     
     a=log(mean(exp(X)))
     b=GPTools.log_mean_exp(X)
     
     self.assertLessEqual(a-b, 1e-5)
Esempio n. 13
0
 def mean_and_cov_adapt(self,learn_scale):
     current_1d=reshape(self.current_sample_object.samples, (self.distribution.dimension,))
     difference=current_1d - self.mean_est
     self.cov_est += learn_scale * (outer(difference, difference) - self.cov_est)
     self.mean_est += learn_scale * (current_1d - self.mean_est)
Esempio n. 14
0
 def step(self):
     """
     Performs on Metropolis-Hastings step, updates internal state and returns
     
     sample_object, proposal_2d, accepted, log_lik, log_ratio where
     sample_object - new or old sample_object (row-vector)
     accepted - boolean whether accepted
     log_lik - log-likelihood of returned sample_object
     log_ratio - log probability of acceptance
     """
     # create proposal around current_sample_object point in first step only
     dim = self.distribution.dimension
     if self.Q is None:
         current_1d = reshape(self.current_sample_object.samples, (dim,))
         self.Q = self.construct_proposal(current_1d)
     
     # propose sample_object and construct new Q centred at proposal_2d
     proposal_object = self.Q.sample(1)
     proposal_2d = proposal_object.samples
     proposal_1d = reshape(proposal_2d, (dim,))
     Q_new = self.construct_proposal(proposal_1d)
     
     # 2d view for current_sample_object point
     current_2d = reshape(self.current_sample_object.samples, (1, dim))
     
     # First find out whether this sampler is gibbs (which has a full target)
     # or a MH (otherwise).
     if isinstance(self.distribution, FullConditionals):
         log_lik_proposal = self.distribution.full_target.log_pdf(self.distribution.get_current_state_array())
         accepted = True
         log_ratio = log(1)
     else:
         # do normal MH-step, compute acceptance ratio
     
         # evaluate both Q
         if not self.is_symmetric:
             log_Q_proposal_given_current = self.Q.log_pdf(proposal_2d)
             log_Q_current_given_proposal = Q_new.log_pdf(current_2d)
         else:
             log_Q_proposal_given_current = 0
             log_Q_current_given_proposal = 0
             
         log_lik_proposal = self.distribution.log_pdf(proposal_2d)
         
         log_ratio = log_lik_proposal - self.log_lik_current \
                     + log_Q_current_given_proposal - log_Q_proposal_given_current
         
         log_ratio = min(log(1), log_ratio)
     
         accepted = log_ratio > log(rand(1))
         
     if accepted:
         self.log_lik_current = log_lik_proposal
         sample_object = proposal_object
         self.Q = Q_new
     else:
         sample_object = self.current_sample_object
         
     # adapt state: position and proposal_2d
     self.current_sample_object = sample_object
         
     return StepOutput(sample_object, proposal_object, accepted, self.log_lik_current, log_ratio)
Esempio n. 15
0
 def init(self, start):
     assert(len(shape(start)) == 1)
     
     self.current_sample_object = Sample(start)
     start_2d = reshape(start, (1, len(start)))
     self.log_lik_current = self.distribution.log_pdf(start_2d)
Esempio n. 16
0
    def step(self):
        """
        Performs on Metropolis-Hastings step, updates internal state and returns
        
        sample_object, proposal_2d, accepted, log_lik, log_ratio where
        sample_object - new or old sample_object (row-vector)
        accepted - boolean whether accepted
        log_lik - log-likelihood of returned sample_object
        log_ratio - log probability of acceptance
        """
        # create proposal around current_sample_object point in first step only
        dim = self.distribution.dimension
        if self.Q is None:
            current_1d = reshape(self.current_sample_object.samples, (dim, ))
            self.Q = self.construct_proposal(current_1d)

        # propose sample_object and construct new Q centred at proposal_2d
        proposal_object = self.Q.sample(1)
        proposal_2d = proposal_object.samples
        proposal_1d = reshape(proposal_2d, (dim, ))
        Q_new = self.construct_proposal(proposal_1d)

        # 2d view for current_sample_object point
        current_2d = reshape(self.current_sample_object.samples, (1, dim))

        # First find out whether this sampler is gibbs (which has a full target)
        # or a MH (otherwise).
        if isinstance(self.distribution, FullConditionals):
            log_lik_proposal = self.distribution.full_target.log_pdf(
                self.distribution.get_current_state_array())
            accepted = True
            log_ratio = log(1)
        else:
            # do normal MH-step, compute acceptance ratio

            # evaluate both Q
            if not self.is_symmetric:
                log_Q_proposal_given_current = self.Q.log_pdf(proposal_2d)
                log_Q_current_given_proposal = Q_new.log_pdf(current_2d)
            else:
                log_Q_proposal_given_current = 0
                log_Q_current_given_proposal = 0

            log_lik_proposal = self.distribution.log_pdf(proposal_2d)

            log_ratio = log_lik_proposal - self.log_lik_current \
                        + log_Q_current_given_proposal - log_Q_proposal_given_current

            log_ratio = min(log(1), log_ratio)

            accepted = log_ratio > log(rand(1))

        if accepted:
            self.log_lik_current = log_lik_proposal
            sample_object = proposal_object
            self.Q = Q_new
        else:
            sample_object = self.current_sample_object

        # adapt state: position and proposal_2d
        self.current_sample_object = sample_object

        return StepOutput(sample_object, proposal_object, accepted,
                          self.log_lik_current, log_ratio)
Esempio n. 17
0
    def init(self, start):
        assert (len(shape(start)) == 1)

        self.current_sample_object = Sample(start)
        start_2d = reshape(start, (1, len(start)))
        self.log_lik_current = self.distribution.log_pdf(start_2d)
 def precompute(self):
     # collect lines for Graphlab graph definition file for full rank case
     graphlab_lines=GraphlabLines(output_filename=self.output_filename)
                                         
     # compute all non-symmetric kernels for incoming messages at a node
     print "precomputing (non-symmetric) kernels for incoming messages at a node"
     graphlab_lines.lines.append("# non-observed nodes")
     for node in self.graph:
         added_node=False
         
         for in_message in self.graph[node]:
             for out_message in self.graph[node]:
                 if in_message==out_message:
                     continue
                 
                 # dont add nodes which have no kernels, and only do once if they have
                 if not added_node:
                     graphlab_lines.new_non_observed_node(node)
                     added_node=True
                     
                 edge_in_message=(node, in_message)
                 edge_out_message=(out_message, node)
                 
                 lhs=self.data[edge_in_message][0]
                 rhs=self.data[edge_out_message][1]
                 lhs=reshape(lhs, (len(lhs),1))
                 rhs=reshape(rhs, (len(rhs),1))
                 K=self.kernel.kernel(lhs,rhs)
                 graphlab_lines.add_non_observed_node(node, out_message, in_message, K)
         
     print "precomputing kernel (vectors) at observed nodes"
     graphlab_lines.lines.append(os.linesep + "# observed nodes")
     for node, observation in self.observations.items():
         graphlab_lines.new_observed_node(node)
         
         for out_message in self.graph[node]:
             edge=(out_message, node)
             lhs=self.data[edge][1]
             lhs=reshape(lhs, (len(lhs), 1))
             rhs=[[observation]]
             K=self.kernel.kernel(lhs, rhs)
             graphlab_lines.add_observed_node(node, out_message, K)
             
     
     # now precompute systems for inference
     
     print "precomputing systems for messages from observed nodes"
     graphlab_lines.lines.append(os.linesep + "# edges with observed targets")
     for node, observation in self.observations.items():
         for out_message in self.graph[node]:
             edge=(out_message, node)
             graphlab_lines.new_edge_observed_target(node, out_message)
             
             data_source=self.data[edge][0]
             data_source=reshape(data_source, (len(data_source), 1))
             data_target=self.data[edge][1]
             data_target=reshape(data_target, (len(data_target), 1))
     
             Ks=self.kernel.kernel(data_source)
             Kt=self.kernel.kernel(data_target)
             
             Ls=cholesky(Ks+eye(shape(Ks)[0])*self.reg_lambda)
             Lt=cholesky(Kt+eye(shape(Kt)[0])*self.reg_lambda)
             
             graphlab_lines.add_edge(node, out_message,"L_s", Ls)
             graphlab_lines.add_edge(node, out_message,"L_t", Lt)
     
     print "precomputing systems for messages from non-observed nodes"
     graphlab_lines.lines.append(os.linesep + "# edges with non-observed targets")
     for edge in self.edges:
         # exclude edges which involve observed nodes
         is_edge_target_observed=len(Set(self.observations.keys()).intersection(Set(edge)))>0
         if not is_edge_target_observed:
             graphlab_lines.new_edge_observed_target(edge[1], edge[0])
             
             data_source=self.data[edge][0]
             data_source=reshape(data_source, (len(data_source), 1))
             Ks=self.kernel.kernel(data_source)
             Ls=cholesky(Ks+eye(shape(Ks)[0])*self.reg_lambda)
             graphlab_lines.add_edge(edge[1], edge[0],"L_s", Ls)
             
     # write graph definition file to disc
     graphlab_lines.flush()
Esempio n. 19
0
 def log_pdf(self, x):
     return self.log_pdf_multiple_points(reshape(x, (1, len(x))))