def setUp(self):

        # Basic parameters
        self.K = 100
        self.ds = 3
        self.do = 3

        # System matrices
        params = dict()
        params['F'] = np.array([[0.9, 0.8, 0.7], [0, 0.9, 0.8], [0, 0, 0.7]])
        params['rank'] = np.array([2])
        params['vec'] = (1. / np.sqrt(3)) * np.array([[1, 1], [1, 1], [1, -1]])
        params['val'] = np.array([1. / 5, 1. / 2])
        params['H'] = np.identity(self.do)
        params['R'] = 0.1 * np.identity(self.do)
        self.params = params

        # Create model
        prior = GaussianDensity(np.zeros(self.ds), np.identity(self.ds))
        self.model = DegenerateLinearModel(self.ds, self.do, prior,
                                           self.params)

        # Simulate data
        np.random.seed(1)
        self.state, self.observ = self.model.simulate_data(self.K)

        # Create initial estimated model
        est_params = dict()
        est_params['F'] = 0.5 * np.identity(self.ds)
        est_params['rank'] = np.array([2])
        est_params['vec'] = np.array([[1.0, 0.0], [0.0, 1.0], [0.0, 0.0]])
        est_params['val'] = np.array([1, 1])
        est_params['H'] = np.identity(self.do)
        est_params['R'] = np.identity(self.do)
        est_model = DegenerateLinearModel(self.ds, self.do, prior, est_params)
        self.est_model = est_model

        # Set MCMC parameters
        self.num_iter = 2000
        self.num_burn = int(self.num_iter / 5)
class DegenerateModelLearningTestCase(unittest.TestCase):
    def setUp(self):

        # Basic parameters
        self.K = 100
        self.ds = 3
        self.do = 3

        # System matrices
        params = dict()
        params['F'] = np.array([[0.9,0.8,0.7],[0,0.9,0.8],[0,0,0.7]])
        params['rank'] = np.array([2])
        params['vec'] = (1./np.sqrt(3))*np.array([[1,1],[1,1],[1,-1]])
        params['val'] = np.array([1./5,1./2])
        params['H'] = np.identity(self.do)
        params['R'] = 0.1*np.identity(self.do)
        self.params = params

        # Create model
        prior = GaussianDensity(np.zeros(self.ds), np.identity(self.ds))
        self.model = DegenerateLinearModel(self.ds, self.do, prior, 
                                                                  self.params)

        # Simulate data
        np.random.seed(1)
        self.state, self.observ = self.model.simulate_data(self.K)

        # Create initial estimated model
        est_params = dict()
        est_params['F'] = 0.5*np.identity(self.ds)
        est_params['rank'] = np.array([2])
        est_params['vec'] = np.array([[1.0,0.0],[0.0,1.0],[0.0,0.0]])
        est_params['val'] = np.array([1,1])
        est_params['H'] = np.identity(self.do)
        est_params['R'] = np.identity(self.do)
        est_model = DegenerateLinearModel(self.ds, self.do, prior, est_params)
        self.est_model = est_model

        # Set MCMC parameters
        self.num_iter = 2000
        self.num_burn = int(self.num_iter/5)
Example #3
0
num_burn = int(num_iter/2)


ds = 4
do = 4
params = dict()
params['F'] = np.array([[0.95,0.8,0.8,0.0],[0,0.95,-0.5,0.1],[0,0,1.6,-0.8],[0.0,0.0,1.0,0.0]])
params['rank'] = np.array([2])
params['val'] = np.array([1.5,0.5])
params['vec'] = np.array([[0.5,0.5,0.5,0.5],[1.0/np.sqrt(2),-1.0/np.sqrt(2),0.0,0.0]]).T
params['Q'] = np.dot(params['vec'], np.dot(np.diag(params['val']), params['vec'].T))
params['H'] = np.identity(do)
params['R'] = 0.03*np.identity(do)
prior = GaussianDensity(np.zeros(ds), 10*np.identity(ds))

model = DegenerateLinearModel(ds, do, prior, params)
state, observ = model.simulate_data(K)

# Draw it
fig, axs = plt.subplots(nrows=ds,ncols=1)
for dd in range(ds):
    axs[dd].plot(observ[:,dd])

# Save the data
fh = open(test_data_file, 'wb')
pickle.dump([model, state, observ], fh)
fh.close()

est_params = deepcopy(params)
est_params['F'] = 0.5*np.identity(ds)
est_params['rank'] = np.array([4])
    def smc_reduce_rank(self, rank):
        """
        The main step of the algorithm. Use the previous approximation to
        'propose' parameters for a reduced rank mode, and weight them
        correctly.
        """

        # Create a new SMC approximation
        if rank in self.approx.keys():
            raise ValueError("Already done that one")
        if rank + 1 not in self.approx.keys():
            raise ValueError("Need to do rank {} first.".format(rank + 1))
        self.approx[rank] = DegenerateModelSMCApproximation(
            self.num_samples, self.ds, rank)

        # Create space to store the state trajectories
        self.state[rank] = np.zeros(
            (self.num_samples, self.num_rejuv, self.K, self.ds))

        # Create a store for the filter results for RM in the next iteration
        filters = []

        # Resampling
        w = self.approx[rank + 1].weight.copy()
        w -= np.max(w)
        w = np.exp(w)
        w /= np.sum(w)
        ancestors = np.random.choice(self.num_samples,
                                     size=self.num_samples,
                                     replace=True,
                                     p=w)
        self.approx[rank].ancestor = ancestors

        # Loop through samples
        for nn in range(self.num_samples):

            if self.verbose:
                print("Sample number {}.".format(nn + 1))

            # Create model object
            ai = ancestors[nn]
            parameters = {
                'F': self.approx[rank + 1].F[ai, :, :].copy(),
                'rank': [rank + 1],
                'val': self.approx[rank + 1].val[ai, :].copy(),
                'vec': self.approx[rank + 1].vec[ai, :, :].copy(),
                'H': self.H,
                'R': self.approx[rank + 1].Rs[ai].copy() * np.identity(self.do)
            }

            model = DegenerateLinearModel(self.ds, self.do,
                                          self.initial_state_prior, parameters)

            # Resample-move with Gibbs sampling to improve diversity
            if (self.filters is not None) and (self.num_rejuv > 0):
                flt = self.filters[ai]
                for ii in range(self.num_rejuv):
                    state = model.backward_simulation(flt)
                    model = sample_transition_within_subspace(
                        model, state, self.hyperparams)
                    model = sample_observation_diagonal_covariance(
                        model, state, self.observ, self.hyperparams)
                    flt, _, old_lhood = model.kalman_filter(self.observ)
                    self.state[rank][nn, ii, :, :] = state
                old_prior = transition_prior(rank, model.parameters['val'],
                                             model.parameters['vec'],
                                             model.parameters['F'],
                                             self.hyperparams)
            else:
                old_prior = self.approx[rank + 1].prior[ai]
                old_lhood = self.approx[rank + 1].lhood[ai]

            # Remove smallest eigenvalue/vector pair
            remVal, remVec = model.remove_min_eigen_value_vector()

            # Probabilities for new model
            prior = transition_prior(rank, model.parameters['val'],
                                     model.parameters['vec'],
                                     model.parameters['F'], self.hyperparams)
            flt, _, lhood = model.kalman_filter(self.observ)
            exten = extended_density(remVal, remVec, model.parameters['val'])

            # Jacobian of transformation
            jac = - np.log(2) \
                  - np.sum(np.log(model.parameters['val'])) \
                  + (self.ds - rank - 1)*np.log(remVal) \
                  + np.sum(np.log(model.parameters['val']-remVal))

            # Calculate weight
            weight = + prior \
                     + lhood \
                     - old_prior \
                     - old_lhood \
                     - jac \
                     + exten

            #            print(lhood-old_lhood)
            #            print(prior-old_prior)
            #            print(exten)
            #            print(jac)

            if self.verbose:
                print("Particle log-weight: {}".format(weight))

            # Store everything
            filters.append(flt)
            self.approx[rank].prior[nn] = prior
            self.approx[rank].lhood[nn] = lhood
            self.approx[rank].weight[nn] = weight
            self.approx[rank].F[nn, :, :] = model.parameters['F'].copy()
            self.approx[rank].val[nn] = model.parameters['val'].copy()
            self.approx[rank].vec[nn] = model.parameters['vec'].copy()
            self.approx[rank].Rs[nn] = model.parameters['R'][0][0]

        # End of particle loop

        # Save the filter results for later
        self.filters = filters

        if self.verbose:
            print("For rank {}, effective sample size: {}".format(
                rank, effective_sample_size(self.approx[rank].weight)))
filename = './results/toy-mcmc-degenerate.p'

K = 100
ds = 3
do = 3

params = dict()
params['F'] = np.array([[0.9, 0.8, 0.7], [0, 0.9, 0.8], [0, 0, 0.7]])
params['rank'] = np.array([2])
params['vec'] = (1. / np.sqrt(3)) * np.array([[1, 1], [1, 1], [1, -1]])
params['val'] = np.array([1. / 5, 1. / 2])
params['H'] = np.identity(do)
params['R'] = 0.1 * np.identity(do)

prior = GaussianDensity(np.zeros(ds), 100 * np.identity(ds))
model = DegenerateLinearModel(ds, do, prior, params)

np.random.seed(0)
state, observ = model.simulate_data(K)

est_params = deepcopy(params)
est_params['F'] = 0.5 * np.identity(ds)
est_params['rank'] = np.array([2])
est_params['vec'] = np.array([[1, 0], [0, 1], [0, 0]])
est_params['val'] = np.array([1, 1])
est_params['R'] = np.identity(do)
est_model = DegenerateLinearModel(ds, do, prior, est_params)

hyperparams = dict()
hyperparams['nu0'] = params['rank']
hyperparams['rPsi0'] = np.identity(ds)
    def smc_reduce_rank(self, rank):
        """
        The main step of the algorithm. Use the previous approximation to
        'propose' parameters for a reduced rank mode, and weight them
        correctly.
        """

        # Create a new SMC approximation
        if rank in self.approx.keys():
            raise ValueError("Already done that one")
        if rank+1 not in self.approx.keys():
            raise ValueError("Need to do rank {} first.".format(rank+1))
        self.approx[rank] = DegenerateModelSMCApproximation(self.num_samples,
                                                            self.ds, rank)

        # Create space to store the state trajectories
        self.state[rank] = np.zeros((self.num_samples,
                                     self.num_rejuv,
                                     self.K,
                                     self.ds))

        # Create a store for the filter results for RM in the next iteration
        filters = []

        # Resampling
        w = self.approx[rank+1].weight.copy()
        w -= np.max(w)
        w = np.exp(w)
        w /= np.sum(w)
        ancestors = np.random.choice(self.num_samples,
                                     size=self.num_samples,
                                     replace=True,
                                     p=w)
        self.approx[rank].ancestor = ancestors

        # Loop through samples
        for nn in range(self.num_samples):

            if self.verbose:
                print("Sample number {}.".format(nn+1))

            # Create model object
            ai = ancestors[nn]
            parameters = {
                      'F': self.approx[rank+1].F[ai,:,:].copy(),
                      'rank': [rank+1],
                      'val': self.approx[rank+1].val[ai,:].copy(),
                      'vec': self.approx[rank+1].vec[ai,:,:].copy(),
                      'H': self.H,
                      'R': self.approx[rank+1].Rs[ai].copy()*np.identity(self.do)
                      }

            model = DegenerateLinearModel(self.ds,
                                          self.do,
                                          self.initial_state_prior,
                                          parameters)

            # Resample-move with Gibbs sampling to improve diversity
            if (self.filters is not None) and (self.num_rejuv > 0):
                flt = self.filters[ai]
                for ii in range(self.num_rejuv):
                    state = model.backward_simulation(flt)
                    model = sample_transition_within_subspace(model, state,
                                                              self.hyperparams)
                    model = sample_observation_diagonal_covariance(
                                                            model,
                                                            state,
                                                            self.observ,
                                                            self.hyperparams)
                    flt,_,old_lhood = model.kalman_filter(self.observ)
                    self.state[rank][nn,ii,:,:] = state
                old_prior = transition_prior(rank,
                                             model.parameters['val'],
                                             model.parameters['vec'],
                                             model.parameters['F'],
                                             self.hyperparams)
            else:
                old_prior = self.approx[rank+1].prior[ai]
                old_lhood = self.approx[rank+1].lhood[ai]


            # Remove smallest eigenvalue/vector pair
            remVal, remVec = model.remove_min_eigen_value_vector()

            # Probabilities for new model
            prior = transition_prior(rank,
                                     model.parameters['val'],
                                     model.parameters['vec'],
                                     model.parameters['F'],
                                     self.hyperparams)
            flt,_,lhood = model.kalman_filter(self.observ)
            exten = extended_density(remVal, remVec, model.parameters['val'])

            # Jacobian of transformation
            jac = - np.log(2) \
                  - np.sum(np.log(model.parameters['val'])) \
                  + (self.ds - rank - 1)*np.log(remVal) \
                  + np.sum(np.log(model.parameters['val']-remVal))

            # Calculate weight
            weight = + prior \
                     + lhood \
                     - old_prior \
                     - old_lhood \
                     - jac \
                     + exten

#            print(lhood-old_lhood)
#            print(prior-old_prior)
#            print(exten)
#            print(jac)

            if self.verbose:
                print("Particle log-weight: {}".format(weight))

            # Store everything
            filters.append(flt)
            self.approx[rank].prior[nn] = prior
            self.approx[rank].lhood[nn] = lhood
            self.approx[rank].weight[nn] = weight
            self.approx[rank].F[nn,:,:] = model.parameters['F'].copy()
            self.approx[rank].val[nn] = model.parameters['val'].copy()
            self.approx[rank].vec[nn] = model.parameters['vec'].copy()
            self.approx[rank].Rs[nn] = model.parameters['R'][0][0]

        # End of particle loop

        # Save the filter results for later
        self.filters = filters

        if self.verbose:
            print("For rank {}, effective sample size: {}".format(rank,
                  effective_sample_size(self.approx[rank].weight)))
Example #7
0
est_params['F'] = np.vstack((np.hstack((Imat,Imat)), np.hstack((Zmat,Imat))))

est_params['Q'] = 0.001*np.vstack((np.hstack((Imat/3.0,Imat/2.0)), np.hstack((Imat/2.0,Imat/1.0))))
est_params['val'], est_params['vec'] = la.eigh(est_params['Q'])
est_params['rank'] = np.array([ds])
#val,vec = la.eigh(est_params['Q'])
#est_params['val'] = val[:1]
#est_params['vec'] = vec[:,:1]
#est_params['rank'] = np.array([1])

est_params['H'] = np.hstack((np.identity(d),np.zeros((d,d))))
est_params['R'] = 0.001*np.identity(d)

prior = GaussianDensity(np.zeros(ds), 1000*np.identity(ds))
est_degenerate_model = DegenerateLinearModel(ds, do, prior, est_params)
est_basic_model = BasicLinearModel(ds, do, prior, est_params)
est_naive_model = BasicLinearModel(ds, do, prior, est_params)

# Hyperparameters
hyperparams = dict()
hyperparams['nu0'] = ds
hyperparams['rPsi0'] = 0.001*np.identity(ds)
hyperparams['Psi0'] = ds*hyperparams['rPsi0']
hyperparams['M0'] = np.zeros((ds,ds))
hyperparams['V0'] = 1E2*np.identity(ds)
hyperparams['alpha'] = 0.01
hyperparams['a0'] = 1
hyperparams['b0'] = 0.001

# Algorithm parameters