def get_smoothed_res(self, n, owa=False):
     model = self.get_model(n)
     fk_boot = ssm.Bootstrap(ssm=model, data=[np.array(self.series[n])])
     alg_with_mom = particles.SMC(fk=fk_boot, N=100, moments=True)
     alg_with_mom.run()
     mu, sigma = model.avg_y, np.exp(alg_with_mom.summaries.moments[0]['mean']/2) # scale
     res = np.mean(np.random.normal(mu, sigma, 100))
     return res
Exemple #2
0
def smoothing_trajectories(rho, sig2, N=100):
    fk = ssms.Bootstrap(ssm=NeuroXp(rho=rho, sig2=sig2), data=data)
    pf = particles.SMC(fk=fk, N=N, qmc=False, store_history=True)
    pf.run()
    (paths, ar) = pf.hist.backward_sampling(N,
                                            return_ar=True,
                                            linear_cost=True)
    print('Acceptance rate (FFBS-reject): %.3f' % ar)
    return (paths, pf.logLt)
Exemple #3
0
 def __call__(self, x):
     rho, sig2 = x[0], x[1]
     if rho > 1. or rho < 0. or sig2 <= 0:
         return np.inf
     fk = ssms.Bootstrap(ssm=NeuroXp(rho=rho, sig2=sig2), data=data)
     pf = particles.SMC(fk=fk, N=self.N, qmc=True)
     pf.run()
     self.args.append(x)
     self.lls.append(pf.logLt)
     return -pf.logLt
def generate_aid_x(record_id, data, cir_args, tau, H, N=100000):
    logger.info("Start to generate posterior mean used as benchmark.")
    aid_ssm = CIR(tau=tau, H=H, **cir_args)
    aid_alg = particles.SMC(ssm.Bootstrap(ssm=aid_ssm, data=data), **{
        **default_args, 'N': N
    })
    aid_alg.run()
    store = {
        'aid_x': aid_alg.hist.X,
        'aid_post_mean': aid_alg.summaries.moments
    }
    np.savez(file=f"./Records/CIR{record_id}/aid_x_boot", **store)
    logger.info("Posterior mean successfully generated.")
        cir_args = {
            key: config['PARAMETERS'].getfloat(key)
            for key in config['PARAMETERS'].keys()
        }
        logger.info(
            "Data lodaded successfully. Shape of states x: {}; Shape of observations y: {}.",
            real_x.shape, real_y.shape)
    else:
        config = CIR_config(record_id).load()
        cir_args = config['MODEL']['PARAMS']
        tau, H = config['MODEL']['OBS']['tau'], config['MODEL']['OBS']['H']

    # Define Feynman-Kac models ======================================================================================
    fk_list = {
        "bootstrap_60k":
        ssm.Bootstrap(ssm=CIR(tau=tau, H=H, **cir_args), data=real_y),
        # "SMC"           : ssm.GuidedPF(ssm=CIR(tau=tau, H=H, **cir_args), data=real_y),
        # "SMCt"         : ssm.GuidedPF(ssm=CIR(tau=tau, H=H, **cir_args, proposal='t', tdf=5), data=real_y),
        # "SMC_mod"       : ssm.GuidedPF(ssm=CIR_mod(tau=tau, H=H, s=50, proposal='boot', **cir_args), data=real_y),
        # #
        # "normal+q811.99": MultiPropPF.MultiPropFK(ssm=CIR(tau=tau, H=H, ptp=0.99, **cir_args), data=real_y,
        #                                      proposals={'normal': 0.8, 'normalq': 0.1, 'normalql': 0.1}),
        # "normal+tq": MultiPropPF.MultiPropFK(ssm=CIR(tau=tau, H=H, tdf=3, **cir_args), data=real_y,
        #                                      proposals={'normal': 0.9, 'tq': 0.05, 'tql': 0.05}),
        # 为normal插上t的翅膀

        # "normal+q3"    : MultiPropPF.MultiPropFK(ssm=CIR(tau=tau, H=H), data=real_y,
        #                                             proposals={'normal':0.8, 'normalq':0.1, 'normalql':0.1}),
        # 2021-04-08 3:10 enlarge the proportion of right sided tail, expect better performance on extreme quantiles estimate
        #
        # "t5+q811.95": MultiPropPF.MultiPropFK(ssm=CIR(tau=tau, H=H, tdf=5, ptq=0.95, **cir_args), data=real_y,
Exemple #6
0
from matplotlib import pyplot as plt
import numpy as np
# import seaborn as sb

import particles
from particles import distributions as dists
from particles import state_space_models as ssm

# set up models, simulate and save data
T = 100
mu0 = 0.
phi0 = 0.9
sigma0 = .5  # true parameters
my_ssm = ssm.DiscreteCox(mu=mu0, phi=phi0, sigma=sigma0)
true_states, data = my_ssm.simulate(T)
fkmod = ssm.Bootstrap(ssm=my_ssm, data=data)

# run particle filter, compute trajectories
N = 100
pf = particles.SMC(fk=fkmod, N=N, store_history=True)
pf.run()
pf.hist.compute_trajectories()

# PLOT
# ====
# sb.set_palette("dark")
plt.style.use('ggplot')
savefigs = False

plt.figure()
plt.xlabel('t')
Exemple #7
0

def save_results(new_results):
    all_results.update(new_results)
    with open(results_file, 'wb') as f:
        pickle.dump(all_results, f)


# Evaluate log-likelihood on a grid
###################################
ng = 50
rhos = np.linspace(0., 1., ng)
sig2s = np.linspace(1e-2, 25., ng)  # for sigma=0., returns Nan
ijs = list(itertools.product(range(ng), range(ng)))
fks = {
    ij: ssms.Bootstrap(ssm=NeuroXp(rho=rhos[ij[0]], sig2=sig2s[ij[1]]),
                       data=data)
    for ij in ijs
}
outf = lambda pf: pf.logLt
nruns = 5
print('computing log-likelihood on a grid')
results = particles.multiSMC(fk=fks,
                             N=100,
                             qmc=True,
                             nruns=nruns,
                             nprocs=0,
                             out_func=outf)
save_results({'results': results})


# EM
#parameter values
alpha0 = 0.4
T = 50
dims = range(5, 21, 5)

# instantiate models
models = OrderedDict()
true_loglik, true_filt_means = {}, {}
for d in dims:
    my_ssm = ssm.MVLinearGauss_Guarniero_etal(alpha=alpha0, dx=d)
    _, data = my_ssm.simulate(T)
    truth = my_ssm.kalman_filter(data)
    true_loglik[d] = truth.logpyts.cumsum()
    true_filt_means[d] = truth.filt.means
    models['boot_%i' % d] = ssm.Bootstrap(ssm=my_ssm, data=data)
    models['guided_%i' % d] = ssm.GuidedPF(ssm=my_ssm, data=data)

# Get results
N = 10**4
results = particles.multiSMC(fk=models,
                             qmc=[False, True],
                             N=N,
                             moments=True,
                             nruns=100,
                             nprocs=0)

# Format results
results_mse = []
for d in dims:
    for t in range(T):
Exemple #9
0
# parameters
dy = 80  # number of neurons
dx = 6 # 3 for position, 3 for velocity
T = 25
a0 = random.normal(loc=2.5, size=dy)  # from Koyama et al
# the b's are generated uniformly on the unit sphere in R^6 (see Koyama et al)
b0 = random.normal(size=(dy, dx))
b0 = b0 / (linalg.norm(b0, axis=1)[:, np.newaxis])
delta0 = 0.03; tau0 = 1.
x0 = np.zeros(dx)

# models
chosen_ssm = NeuralDecoding(a=a0, b=b0, x0=x0, delta=delta0, tau=tau0)
_, data = chosen_ssm.simulate(T)
models = OrderedDict()
models['boot'] = ssms.Bootstrap(ssm=chosen_ssm, data=data)
models['guided'] = ssms.GuidedPF(ssm=chosen_ssm, data=data)
# models['apf'] = ssms.AuxiliaryPF(ssm=chosen_ssm, data=data)
# Uncomment this if you want to include the APF in the comparison

N = 10**4; nruns = 50
results = particles.multiSMC(fk=models, N=N, nruns=nruns, nprocs=1,
                             collect=[Moments], store_history=True)

## PLOTS
#########
savefigs = True  # False if you don't want to save plots as pdfs
plt.style.use('ggplot')

# arbitrary time
t0 = 9
Exemple #10
0
def fkmod(**kwargs):
    return ssm.Bootstrap(ssm=ssm.StochVol(**kwargs), data=data)
Exemple #11
0
from matplotlib import pyplot as plt
import numpy as np
# import seaborn as sb

import particles
from particles import distributions as dists
from particles import state_space_models

# set up models, simulate and save data
T = 100
mu0 = 0.
phi0 = 0.9
sigma0 = .5  # true parameters
ssm = state_space_models.DiscreteCox(mu=mu0, phi=phi0, sigma=sigma0)
true_states, data = ssm.simulate(T)
fkmod = state_space_models.Bootstrap(ssm=ssm, data=data)

# run particle filter, compute trajectories
N = 100
pf = particles.SMC(fk=fkmod, N=N, store_history=True)
pf.run()
Bs = pf.hist.compute_trajectories()

# PLOT
# ====
# sb.set_palette("dark")
plt.style.use('ggplot')
savefigs = True  # False if you don't want to save plots as pdfs

plt.figure()
plt.xlabel('t')
                            scale=self.sigma)

    def PY(self, t, xp,
           x):  # Distribution of Y_t given X_t=x (and possibly X_{t-1}=xp)
        return dists.Normal(loc=0., scale=np.exp(x))


my_model = StochVol(mu=-1., rho=.9, sigma=.1)  # actual model
true_states, data = my_model.simulate(
    100)  # we simulate from the model 100 data points

plt.style.use('ggplot')
plt.figure()
plt.plot(data)

fk_model = ssm.Bootstrap(ssm=my_model, data=data)
pf = particles.SMC(fk=fk_model,
                   N=100,
                   resampling='stratified',
                   moments=True,
                   store_history=True)
pf.run()
plt.figure()
plt.plot([yt**2 for yt in data], label='data-squared')
plt.plot([m['mean'] for m in pf.summaries.moments],
         label='filtered volatility')
plt.legend()

#results=particles.multiSMC(fk=fk_model,N=100,nruns=30,qmc={'SMC':False,'SQMC':True})
#results = particles.multiSMC(fk=fk_model, N=100, nruns=30, qmc={'SMC':False, 'SQMC':True})
#plt.figure()
def fk_mod(T):
    "FeynmanKac object, given T"
    return ssm.Bootstrap(ssm=my_ssm, data=data[:T])
"""

from __future__ import division, print_function

from matplotlib import pyplot as plt
import numpy as np
from scipy import stats

import particles
from particles import state_space_models as ssms

# instantiate model
T = 100
model = ssms.Gordon()
_, data = model.simulate(T)
fk = ssms.Bootstrap(ssm=model, data=data)

# Get results
Ns = [2**k for k in range(6, 21)]
of = lambda pf: {
    'll': pf.logLt,
    'EXt': [m['mean'] for m in pf.summaries.moments]
}
results = particles.multiSMC(fk=fk,
                             qmc={
                                 'smc': False,
                                 'sqmc': True
                             },
                             N=Ns,
                             moments=True,
                             nruns=200,
def fkmod(theta, T):
    return ssms.Bootstrap(ssm=ssmod(theta), data=data[:T])
def log_gamma(x, mu, phi, sigma):
    return stats.norm.logpdf(x, loc=mu, scale=sigma / np.sqrt(1. - phi**2))


if __name__ == '__main__':

    # set up model, simulate data
    T = 100
    mu0 = 0.
    phi0 = .9
    sigma0 = .5  # true parameters
    my_ssm = DiscreteCox_with_add_f(mu=mu0, phi=phi0, sigma=sigma0)
    _, data = my_ssm.simulate(T)

    # FK models
    fkmod = ssms.Bootstrap(ssm=my_ssm, data=data)
    # FK model for information filter: same model with data in reverse
    fk_info = ssms.Bootstrap(ssm=my_ssm, data=data[::-1])

    nruns = 100  # run each algo 100 times
    Ns = [50, 200, 800, 3200, 12800]
    methods = [
        'FFBS_ON', 'FFBS_ON2', 'two-filter_ON', 'two-filter_ON_prop',
        'two-filter_ON2'
    ]

    add_func = partial(psit, mu=mu0, phi=phi0, sigma=sigma0)
    log_gamma_func = partial(log_gamma, mu=mu0, phi=phi0, sigma=sigma0)
    results = utils.multiplexer(f=smoothing_worker,
                                method=methods,
                                N=Ns,
Exemple #17
0
              'sigma': dists.Gamma(a=2., b=2.),
              'rho': dists.Beta(a=9., b=1.)
              }
prior = dists.StructDist(dict_prior)
mu0, sigma0, rho0 = -1.02, 0.178, 0.9702
theta0 = np.array([(mu0, rho0, sigma0)],
                  dtype=[('mu', float), ('rho', float), ('sigma', float)])

ssm_cls = ssm.StochVol
ssmod = ssm_cls(mu=mu0, sigma=sigma0, rho=rho0)


# (QMC-)FFBS as a reference
N = 3000
tic = time.time()
pf = particles.SMC(fk=ssm.Bootstrap(ssm=ssmod, data=data), N=N, qmc=True,
                store_history=True)
pf.run()
smth_traj = pf.hist.backward_sampling_qmc(M=N)
cpu_time_fbbs = time.time() - tic
print('FFBS-QMC: run completed, took %f min' % (cpu_time_fbbs / 60.))


def reject_sv(m, s, y):
    """ Sample from N(m, s^2) times SV likelihood using rejection. 

    SV likelihood (in x) corresponds to y ~ N(0, exp(x)).
    """
    mp = m + 0.5 * s**2 * (-1. + y**2 * np.exp(-m))
    ntries = 0
    while True:
Exemple #18
0
 def fk_mod(self):
     if self.auxiliary_bootstrap:
         return ssm.AuxiliaryBootstrap(ssm=self.ss_mod, data=self.data)
     else:
         # AuxiliaryBootstrap seems to perform better
         return ssm.Bootstrap(ssm=self.ss_mod, data=self.data)
Exemple #19
0
def fkmod(theta):
    mu = theta[0]
    rho = theta[1]
    sigma = theta[2]
    return ssms.Bootstrap(ssm=ssms.StochVol(mu=mu, rho=rho, sigma=sigma),
                          data=data)
Exemple #20
0
                           Nx=Nx, niter=niter, adaptive=False, rw_cov=rw_cov, 
                           verbose=10)

# Run the algorithms 
####################

for alg_name, alg in algos.items(): 
    print('\nRunning ' + alg_name)
    alg.run()
    print('CPU time: %.2f min' % (alg.cpu_time / 60))

# Compute variances 
###################
thin = int(niter / 100)  # compute average (of variances) over 100 points
thetas = algos['mh'].chain.theta[(burnin - 1)::thin]
fks = {k: ssm.Bootstrap(ssm=ReparamLinGauss(**smc_samplers.rec_to_dict(th)), data=data)
                        for k, th in enumerate(thetas)}
outf = lambda pf: pf.logLt
print('Computing variances of log-lik estimates as a function of N')
results = particles.multiSMC(fk=fks, N=Nxs, nruns=4, nprocs=0, out_func=outf)
df = pandas.DataFrame(results)
df_var = df.groupby(['fk', 'N']).var()  # variance as a function of fk and N
df_var = df_var.reset_index()
df_var_mean = df_var.groupby('N').mean()  # mean variance as function of N

# Plots
#######
savefigs = False
plt.style.use('ggplot')

def msjd(theta):
        return 10 + 5 * ap**2


mu0 = [8.5e-11, 2.7, 10]
sigma0 = np.diag([7e-13**2, 0.005**2, 1])
my_model = ParisLaw(mu=mu0,
                    sigma=sigma0)  # actual model, theta = [mu, rho, sigma]
true_states, data = my_model.simulate(
    10)  # we simulate from the model 100 data points

plt.style.use('ggplot')
plt.figure()
plt.scatter(range(len(data)), data)

data = np.array(data).flatten()
fk_model = ssm.Bootstrap(ssm=my_model,
                         data=data)  # we use the Bootstrap filter
pf = particles.SMC(fk=fk_model,
                   N=1000,
                   resampling='stratified',
                   moments=False,
                   store_history=True)  # the algorithm
pf.run()  # actual computation

# plot
#plt.figure()
#plt.plot([yt**2 for yt in data], label='data-squared')
#plt.plot([m['mean'] for m in pf.summaries.moments], label='filtered volatility')
#plt.legend()

# prior_dict = {'mu':dists.Normal(),
#               'sigma': dists.Gamma(a=1., b=1.),
def fk_mod(T):
    "FeynmanKac object, given T"
    return state_space_models.Bootstrap(ssm=ssm, data=data[:T])
Exemple #23
0
#parameter values 
alpha0 = 0.3
T = 50
dims = range(5, 21, 5)

# instantiate models 
models = OrderedDict()
true_loglik, true_filt_means = {}, {}
for d in dims: 
    ssm = kalman.MVLinearGauss_Guarniero_etal(alpha=alpha0, dx=d)
    _, data = ssm.simulate(T)
    kf = kalman.Kalman(ssm=ssm, data=data)
    kf.filter()
    true_loglik[d] = np.cumsum(kf.logpyt) 
    true_filt_means[d] = [f.mean for f in kf.filt]
    models['boot_%i' % d] = ssms.Bootstrap(ssm=ssm, data=data)
    models['guided_%i' % d] = ssms.GuidedPF(ssm=ssm, data=data)

# Get results 
N = 10**4 
results = particles.multiSMC(fk=models, qmc=[False, True], N=N, moments=True,
                          nruns=100, nprocs=1) 

# Format results 
results_mse = []
for d in dims: 
    for t in range(T): 
        # this returns the estimate of E[X_t(1)|Y_{0:t}]
        estimate = lambda r: r['output'].summaries.moments[t]['mean'][0] 
        for type_fk in ['guided', 'boot']:
            for qmc in [False, True]:  
Exemple #24
0
dy = 80  # number of neurons
dx = 6  # 3 for position, 3 for velocity
T = 25
a0 = random.normal(loc=2.5, size=dy)  # from Kass' paper
# the b's are generated uniformly on the unit sphere in R^6 (see Kass' paper)
b0 = random.normal(size=(dy, dx))
b0 = b0 / (linalg.norm(b0, axis=1)[:, np.newaxis])
delta0 = 0.03
tau0 = 1.
x0 = np.zeros(dx)  # TODO

# models
chosen_ssm = NeuralDecoding(a=a0, b=b0, x0=x0, delta=delta0, tau=tau0)
_, data = chosen_ssm.simulate(T)
models = OrderedDict()
models['boot'] = state_space_models.Bootstrap(ssm=chosen_ssm, data=data)
models['guided'] = state_space_models.GuidedPF(ssm=chosen_ssm, data=data)
# models['apf'] = state_space_models.AuxiliaryPF(ssm=chosen_ssm, data=data)
# Uncomment this if you want to include the APF in the comparison

N = 10**4
nruns = 50
results = particles.multiSMC(fk=models,
                             N=N,
                             nruns=nruns,
                             nprocs=1,
                             moments=True,
                             store_history=True)

## PLOTS
#########
Exemple #25
0
import particles
from particles import state_space_models as ssm

# Data and parameter values from Pitt & Shephard
raw_data = np.loadtxt('../../datasets/GBP_vs_USD_9798.txt',
                      skiprows=2,
                      usecols=(3, ),
                      comments='(C)')
T = 201
data = 100. * np.diff(np.log(raw_data[:(T + 1)]))
my_ssm = ssm.StochVol(mu=2 * np.log(.5992), sigma=0.178, rho=0.9702)

# FK models
models = OrderedDict()
models['bootstrap'] = ssm.Bootstrap(ssm=my_ssm, data=data)
models['guided'] = ssm.GuidedPF(ssm=my_ssm, data=data)
models['apf'] = ssm.AuxiliaryPF(ssm=my_ssm, data=data)

# Get results
results = particles.multiSMC(fk=models, N=10**3, nruns=250, moments=True)

# Golden standard
bigN = 10**5
bigpf = particles.SMC(fk=models['bootstrap'], N=bigN, qmc=True, moments=True)
print('One SQMC run with N=%i' % bigN)
bigpf.run()

# PLOTS
# =====
plt.style.use('ggplot')
    'mu': dists.Normal(scale=2.),
    'sigma': dists.Gamma(a=2., b=2.),
    'rho': dists.Beta(a=9., b=1.)
}
prior = dists.StructDist(dict_prior)
mu0, sigma0, rho0 = -1.02, 0.178, 0.9702
theta0 = np.array([(mu0, rho0, sigma0)],
                  dtype=[('mu', float), ('rho', float), ('sigma', float)])

ssm_cls = state_space_models.StochVol
ssm = ssm_cls(mu=mu0, sigma=sigma0, rho=rho0)

# (QMC-)FFBS as a reference
N = 3000
tic = time.time()
pf = particles.SMC(fk=state_space_models.Bootstrap(ssm=ssm, data=data),
                   N=N,
                   qmc=True,
                   store_history=True)
pf.run()
smth_traj = pf.hist.backward_sampling_qmc(M=N)
cpu_time_fbbs = time.time() - tic
print('FFBS-QMC: run completed, took %f min' % (cpu_time_fbbs / 60.))


def reject_sv(m, s, y):
    """ Sample from N(m, s^2) times SV likelihood using rejection. 

    SV likelihood (in x) corresponds to y ~ N(0, exp(x)).
    """
    mp = m + 0.5 * s**2 * (-1. + y**2 * np.exp(-m))