def _initialise(self):
        """
        Initialises internal state. To be called before MCMC chain starts.
        """
        # fix feature space random basis
        self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)
        
        # _initialise running averages for feature covariance
        self.t = 0

        if self.schedule is not None:
            # start from scratch
            self.mu = np.zeros(self.m)
            
            # _initialise as isotropic
            self.C = np.eye(self.m)
        else:
            # make user call the set_batch_covariance() function
            self.mu = None
            self.C = None
Exemple #2
0
    def _initialise(self):
        """
        Initialises internal state. To be called before MCMC chain starts.
        """
        # fix feature space random basis
        self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)

        # _initialise running averages for feature covariance
        self.t = 0

        if self.schedule is not None:
            # start from scratch
            self.mu = np.zeros(self.m)

            # _initialise as isotropic
            self.C = np.eye(self.m)
        else:
            # make user call the set_batch_covariance() function
            self.mu = None
            self.C = None
 def update(self, z_new, previous_accpept_prob):
     """
     Updates the proposal covariance and potentially scaling parameter, according to schedule.
     Note that every call increases a counter that is used for the schedule (if set)
     
     If not schedule is set, this method does not have any effect unless counting.
     
     Parameters:
     z_new                       - A 1-dimensional array of size (D) of.
     previous_accpept_prob       - Acceptance probability of previous iteration
     """
     self.next_iteration()
     
     if self.schedule is not None:
         # generate updating weight
         lmbda = self.schedule(self.t)
         
         # project current point
         phi = feature_map_single(z_new, self.omega, self.u)
         
         # update
         centred = self.mu - phi
         self.mu = self.mu * (1 - lmbda) + lmbda * phi
         self.C = self.C * (1 - lmbda) + lmbda * np.outer(centred, centred)
         
         # update scalling parameter if wanted
         if self.acc_star is not None:
             self.update_step_size(previous_accpept_prob)
             self.nu2s.append(self.step_size)
         
         if self.update_kernel_gamma is not None:
             # update sliding window
             self.past_samples.append(z_new)
             if len(self.past_samples) > self.update_kernel_gamma:
                 self.past_samples.popleft()
             
             num_samples_window = len(self.past_samples)
             
             # probability of updating
             if self.update_kernel_gamma_schedule is not None:
                 update_prob = self.update_kernel_gamma_schedule(self.t)
             else:
                 update_prob = 1. / (self.t + 1)
             
             # update kernel bandwidth (if window full yet)
             if np.random.rand() < update_prob and num_samples_window >= self.update_kernel_gamma:
                 
                 # transform past samples into array
                 Z = np.array(self.past_samples)
                 
                 # compute new kernel gamma
                 print("Checking whether to update kernel_gamma")
                 new_kernel_gamma = gamma_median_heuristic(Z, num_samples_window)
                 diff = np.abs(new_kernel_gamma - self.kernel_gamma)
                 
                 # only update if change above tolerance
                 if np.abs(diff > self.update_kernel_gamma_tol):
                     self.kernel_gamma = new_kernel_gamma
                     
                     # re-sample basis
                     self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)
                     
                     # populate feature covariance from past samples
                     self.set_batch_covariance(Z)
                     
                     print("Updated kernel gamma to %.3f (from %d samples)" % (self.kernel_gamma, num_samples_window))
Exemple #4
0
    def update(self, z_new, previous_accpept_prob):
        """
        Updates the proposal covariance and potentially scaling parameter, according to schedule.
        Note that every call increases a counter that is used for the schedule (if set)
        
        If not schedule is set, this method does not have any effect unless counting.
        
        Parameters:
        z_new                       - A 1-dimensional array of size (D) of.
        previous_accpept_prob       - Acceptance probability of previous iteration
        """
        self.next_iteration()

        if self.schedule is not None:
            # generate updating weight
            lmbda = self.schedule(self.t)

            # project current point
            phi = feature_map_single(z_new, self.omega, self.u)

            # update
            centred = self.mu - phi
            self.mu = self.mu * (1 - lmbda) + lmbda * phi
            self.C = self.C * (1 - lmbda) + lmbda * np.outer(centred, centred)

            # update scalling parameter if wanted
            if self.acc_star is not None:
                self.update_step_size(previous_accpept_prob)
                self.nu2s.append(self.step_size)

            if self.update_kernel_gamma is not None:
                # update sliding window
                self.past_samples.append(z_new)
                if len(self.past_samples) > self.update_kernel_gamma:
                    self.past_samples.popleft()

                num_samples_window = len(self.past_samples)

                # probability of updating
                if self.update_kernel_gamma_schedule is not None:
                    update_prob = self.update_kernel_gamma_schedule(self.t)
                else:
                    update_prob = 1. / (self.t + 1)

                # update kernel bandwidth (if window full yet)
                if np.random.rand(
                ) < update_prob and num_samples_window >= self.update_kernel_gamma:

                    # transform past samples into array
                    Z = np.array(self.past_samples)

                    # compute new kernel gamma
                    print("Checking whether to update kernel_gamma")
                    new_kernel_gamma = gamma_median_heuristic(
                        Z, num_samples_window)
                    diff = np.abs(new_kernel_gamma - self.kernel_gamma)

                    # only update if change above tolerance
                    if np.abs(diff > self.update_kernel_gamma_tol):
                        self.kernel_gamma = new_kernel_gamma

                        # re-sample basis
                        self.omega, self.u = sample_basis(
                            self.D, self.m, self.kernel_gamma)

                        # populate feature covariance from past samples
                        self.set_batch_covariance(Z)

                        print(
                            "Updated kernel gamma to %.3f (from %d samples)" %
                            (self.kernel_gamma, num_samples_window))
Exemple #5
0
from kameleon_rks.densities.banana import sample_banana
from kameleon_rks.densities.gaussian import sample_gaussian
from old.gaussian_rks import sample_basis, feature_map,\
    feature_map_single, feature_map_grad_single
import matplotlib.pyplot as plt
import numpy as np


np.random.seed(0)

# fix basis for now
D = 2
m = 1000
gamma = .3
omega, u = sample_basis(D, m, gamma)

# sample points in input space
N = 2000
Z = sample_banana(N, D)

# fit Gaussian in feature space
Phi = feature_map(Z, omega, u)

# mean and covariance, batch version
mu = np.mean(Phi, 0)
eta = 0.01
C = np.cov(Phi.T) + eta ** 2 * np.eye(m)
L = np.linalg.cholesky(C)

# step size
eta = 50.