Esempio n. 1
0
    def update(self, z_new, previous_accpept_prob):
        """
        Updates the proposal covariance and potentially scaling parameter, according to schedule.
        Note that every call increases a counter that is used for the schedule (if set)
        
        If not schedule is set, this method does not have any effect unless counting.
        
        Parameters:
        z_new                       - A 1-dimensional array of size (D) of.
        previous_accpept_prob       - Acceptance probability of previous iteration
        """
        self.next_iteration()

        if self.schedule is not None:
            # generate updating weight
            lmbda = self.schedule(self.t)

            # project current point
            phi = feature_map_single(z_new, self.omega, self.u)

            # update
            centred = self.mu - phi
            self.mu = self.mu * (1 - lmbda) + lmbda * phi
            self.C = self.C * (1 - lmbda) + lmbda * np.outer(centred, centred)

            # update scalling parameter if wanted
            if self.acc_star is not None:
                self.update_step_size(previous_accpept_prob)
                self.nu2s.append(self.step_size)

            if self.update_kernel_gamma is not None:
                # update sliding window
                self.past_samples.append(z_new)
                if len(self.past_samples) > self.update_kernel_gamma:
                    self.past_samples.popleft()

                num_samples_window = len(self.past_samples)

                # probability of updating
                if self.update_kernel_gamma_schedule is not None:
                    update_prob = self.update_kernel_gamma_schedule(self.t)
                else:
                    update_prob = 1. / (self.t + 1)

                # update kernel bandwidth (if window full yet)
                if np.random.rand(
                ) < update_prob and num_samples_window >= self.update_kernel_gamma:

                    # transform past samples into array
                    Z = np.array(self.past_samples)

                    # compute new kernel gamma
                    print("Checking whether to update kernel_gamma")
                    new_kernel_gamma = gamma_median_heuristic(
                        Z, num_samples_window)
                    diff = np.abs(new_kernel_gamma - self.kernel_gamma)

                    # only update if change above tolerance
                    if np.abs(diff > self.update_kernel_gamma_tol):
                        self.kernel_gamma = new_kernel_gamma

                        # re-sample basis
                        self.omega, self.u = sample_basis(
                            self.D, self.m, self.kernel_gamma)

                        # populate feature covariance from past samples
                        self.set_batch_covariance(Z)

                        print(
                            "Updated kernel gamma to %.3f (from %d samples)" %
                            (self.kernel_gamma, num_samples_window))
 def update(self, z_new, previous_accpept_prob):
     """
     Updates the proposal covariance and potentially scaling parameter, according to schedule.
     Note that every call increases a counter that is used for the schedule (if set)
     
     If not schedule is set, this method does not have any effect unless counting.
     
     Parameters:
     z_new                       - A 1-dimensional array of size (D) of.
     previous_accpept_prob       - Acceptance probability of previous iteration
     """
     self.next_iteration()
     
     if self.schedule is not None:
         # generate updating weight
         lmbda = self.schedule(self.t)
         
         # project current point
         phi = feature_map_single(z_new, self.omega, self.u)
         
         # update
         centred = self.mu - phi
         self.mu = self.mu * (1 - lmbda) + lmbda * phi
         self.C = self.C * (1 - lmbda) + lmbda * np.outer(centred, centred)
         
         # update scalling parameter if wanted
         if self.acc_star is not None:
             self.update_step_size(previous_accpept_prob)
             self.nu2s.append(self.step_size)
         
         if self.update_kernel_gamma is not None:
             # update sliding window
             self.past_samples.append(z_new)
             if len(self.past_samples) > self.update_kernel_gamma:
                 self.past_samples.popleft()
             
             num_samples_window = len(self.past_samples)
             
             # probability of updating
             if self.update_kernel_gamma_schedule is not None:
                 update_prob = self.update_kernel_gamma_schedule(self.t)
             else:
                 update_prob = 1. / (self.t + 1)
             
             # update kernel bandwidth (if window full yet)
             if np.random.rand() < update_prob and num_samples_window >= self.update_kernel_gamma:
                 
                 # transform past samples into array
                 Z = np.array(self.past_samples)
                 
                 # compute new kernel gamma
                 print("Checking whether to update kernel_gamma")
                 new_kernel_gamma = gamma_median_heuristic(Z, num_samples_window)
                 diff = np.abs(new_kernel_gamma - self.kernel_gamma)
                 
                 # only update if change above tolerance
                 if np.abs(diff > self.update_kernel_gamma_tol):
                     self.kernel_gamma = new_kernel_gamma
                     
                     # re-sample basis
                     self.omega, self.u = sample_basis(self.D, self.m, self.kernel_gamma)
                     
                     # populate feature covariance from past samples
                     self.set_batch_covariance(Z)
                     
                     print("Updated kernel gamma to %.3f (from %d samples)" % (self.kernel_gamma, num_samples_window))
Esempio n. 3
0
# step size
eta = 50.

plt.plot(Z[:, 0], Z[:, 1], 'bx')

# proposal plotting colors
colors = ['y', 'r', 'g', 'm', 'black']

# proposals centred at those points
Ys = np.array([[-20, 9.7], [-10, 0], [0,-3], [10, 0], [20, 9.7]])

for j in range(len(colors)):

    # pick point at random, embed, gradient
    y = Ys[j]
    phi_y = feature_map_single(y, omega, u)
    grad_phi_y = feature_map_grad_single(y, omega, u)
    
    # draw a number of proposals at the current point
    n_proposals = 100
    X_star = np.zeros((n_proposals, D))
    
    # generate proposal samples in feature space
    for i in range(n_proposals):
        plt.plot(y[0], y[1], '*', markersize=15, color=colors[j])
        
        # construct covariance, adding exploration noise
        R = eta**2 * np.dot(grad_phi_y, np.dot(C, grad_phi_y.T))
        L_R = np.linalg.cholesky(R)
        
        # sample proposal