示例#1
0
def mcmc(self, p0=None, nsteps=3000, nwalks=None, tune=None, moves=None, temp=False, seed=None, backend=True, suffix=None, linear=None, resume=False, append=False, update_freq=None, lprob_seed=None, biject=False, report=None, verbose=False, debug=False, **samplerargs):

    import pathos
    import emcee

    if not hasattr(self, 'ndim'):
        # if it seems to be missing, lets do it.
        # but without guarantee...
        self.prep_estim(load_R=True)

    if seed is None:
        seed = self.fdict['seed']

    self.tune = tune
    if tune is None:
        self.tune = int(nsteps*1/5.)

    if update_freq is None:
        update_freq = int(nsteps/5.)

    if linear is None:
        linear = self.filter.name == 'KalmanFilter'

    if 'description' in self.fdict.keys():
        self.description = self.fdict['description']

    self.fdict['biject'] = biject

    from grgrlib.core import serializer

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    lprob_global = serializer(self.lprob)

    if isinstance(temp, bool) and not temp:
        temp = 1

    def lprob(par): return lprob_global(
        par, linear=linear, verbose=verbose, temp=temp, lprob_seed=lprob_seed or 'set')

    bnd = np.array(self.fdict['prior_bounds'])

    def bjfunc(x):
        if not biject:
            return x
        x = 1/(1 + np.exp(x))
        return (bnd[1] - bnd[0])*x + bnd[0]

    def rjfunc(x):
        if not biject:
            return x
        x = (x - bnd[0])/(bnd[1] - bnd[0])
        return np.log(1/x - 1)

    def lprob_scaled(x): return lprob(bjfunc(x))

    if self.pool:
        self.pool.clear()

    if p0 is None and not resume:
        if temp < 1:
            p0 = get_par(self, 'prior_mean', asdict=False,
                         full=False, nsample=nwalks, verbose=verbose)
        else:
            p0 = get_par(self, 'best', asdict=False, full=False,
                         nsample=nwalks, verbose=verbose)
    elif not resume:
        nwalks = p0.shape[0]

    if backend:

        if isinstance(backend, str):
            # backend_file will only be loaded later if explicitely defined before
            self.fdict['backend_file'] = backend
        try:
            backend = self.fdict['backend_file']
        except KeyError:
            # this is the default case
            suffix = str(suffix) if suffix else '_sampler.h5'
            backend = os.path.join(self.path, self.name+suffix)

        backend = emcee.backends.HDFBackend(backend)

        if not (resume or append):
            if not nwalks:
                raise TypeError(
                    "If neither `resume`, `append` or `p0` is given I need to know the number of walkers (`nwalks`).")
            try:
                backend.reset(nwalks, self.ndim)
            except KeyError as e:
                raise KeyError(
                    str(e) + '. Your `*.h5` file is likeli to be damaged...')
    else:
        backend = None

    if resume:
        nwalks = backend.get_chain().shape[1]

    if debug:
        sampler = emcee.EnsembleSampler(nwalks, self.ndim, lprob_scaled)
    else:
        sampler = emcee.EnsembleSampler(
            nwalks, self.ndim, lprob_scaled, moves=moves, pool=self.pool, backend=backend)

    if resume and not p0:
        p0 = sampler.get_last_sample()

    self.sampler = sampler
    self.temp = temp

    if not verbose:
        np.warnings.filterwarnings('ignore')

    if verbose > 2:
        report = report or print
    else:
        pbar = tqdm.tqdm(total=nsteps, unit='sample(s)', dynamic_ncols=True)
        report = report or pbar.write

    p0 = rjfunc(p0) if biject else p0
    old_tau = np.inf
    cnt = 0

    for result in sampler.sample(p0, iterations=nsteps, **samplerargs):

        # cnt = sampler.iteration

        if not verbose:
            lls = list(result)[1]
            maf = np.mean(sampler.acceptance_fraction[-update_freq:])*100
            pbar.set_description('[ll/MAF:%s(%1.0e)/%1.0f%%]' %
                                 (str(np.max(lls))[:7], np.std(lls), maf))

        if cnt and update_freq and not cnt % update_freq:

            prnttup = '[mcmc:]'.ljust(
                15, ' ') + "Summary from last %s of %s iterations" % (update_freq, cnt)

            if temp < 1:
                prnttup += ' with temp of %s%%' % (np.round(temp*100, 6))

            if self.description is not None:
                prnttup += ' (%s)' % str(self.description)

            prnttup += ':'

            report(prnttup)

            sample = sampler.get_chain()

            tau = emcee.autocorr.integrated_time(sample, tol=0)
            min_tau = np.min(tau).round(2)
            max_tau = np.max(tau).round(2)
            dev_tau = np.max(np.abs(old_tau - tau)/tau)

            tau_sign = '>' if max_tau > sampler.iteration/50 else '<'
            dev_sign = '>' if dev_tau > .01 else '<'

            self.mcmc_summary(chain=bjfunc(sample), tune=update_freq,
                              calc_mdd=False, calc_ll_stats=True, out=lambda x: report(str(x)))

            report("Convergence stats: tau is in (%s,%s) (%s%s) and change is %s (%s0.01)." % (
                min_tau, max_tau, tau_sign, sampler.iteration/50, dev_tau.round(3), dev_sign))

        if cnt and update_freq and not (cnt+1) % update_freq:
            sample = sampler.get_chain()
            old_tau = emcee.autocorr.integrated_time(sample, tol=0)

        if not verbose:
            pbar.update(1)

        cnt += 1

    pbar.close()
    if self.pool:
        self.pool.close()

    if not verbose:
        np.warnings.filterwarnings('default')

    log_probs = sampler.get_log_prob()[-self.tune:]
    chain = sampler.get_chain()[-self.tune:]
    chain = chain.reshape(-1, chain.shape[-1])

    arg_max = log_probs.argmax()
    mode_f = log_probs.flat[arg_max]
    mode_x = bjfunc(chain[arg_max].flatten())

    if temp == 1:

        self.fdict['mcmc_mode_x'] = mode_x
        self.fdict['mcmc_mode_f'] = mode_f

        if 'mode_f' in self.fdict.keys() and mode_f < self.fdict['mode_f']:
            print('[mcmc:]'.ljust(15, ' ') + " New mode of %s is below old mode of %s. Rejecting..." %
                  (mode_f, self.fdict['mode_f']))
        else:
            self.fdict['mode_x'] = mode_x
            self.fdict['mode_f'] = mode_f

    self.fdict['datetime'] = str(datetime.now())

    return
示例#2
0
def simulate(self,
             source=None,
             mask=None,
             pars=None,
             resid=None,
             init=None,
             operation=np.multiply,
             linear=False,
             debug=False,
             verbose=False,
             **args):
    """Simulate time series given a series of exogenous innovations.

    Parameters
    ----------
        source : dict
            Dict of `extract` results
        mask : array
            Mask for eps. Each non-None element will be replaced.
    """
    from grgrlib.core import serializer

    pars = pars if pars is not None else source['pars']
    resi = resid if resid is not None else source['resid']
    init = init if init is not None else np.array(source['means'])[..., 0, :]

    sample = pars, resi, init

    if verbose:
        st = time.time()

    self.debug |= debug

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    set_par = serializer(self.set_par)
    t_func = serializer(self.t_func)
    obs = serializer(self.obs)

    def runner(arg):

        superflag = False
        par, eps, state = arg

        if mask is not None:
            eps = np.where(np.isnan(mask), eps, operation(np.array(mask), eps))

        set_par(par, **args)

        X = [state]
        Y = [obs(state)]
        K = []
        L = []

        for eps_t in eps:

            state, (l, k), flag = t_func(state,
                                         noise=eps_t,
                                         return_k=True,
                                         linear=linear)

            superflag |= flag

            X.append(state)
            Y.append(obs(state))
            L.append(l)
            K.append(k)

        X = np.array(X)
        Y = np.array(Y)
        LK = np.array((L, K))
        K = np.array(K)

        return X, Y, LK, superflag

    wrap = tqdm.tqdm if verbose else (lambda x, **kwarg: x)

    if np.ndim(resi) > 2 or np.ndim(pars) > 1 or np.ndim(init) > 2:

        res = wrap(self.mapper(runner, zip(*sample)),
                   unit=' sample(s)',
                   total=len(source['pars']),
                   dynamic_ncols=True)
        res = map2arr(res)

    else:
        res = runner(sample)

    superflag = np.any(res[-1])

    if verbose:
        print('[simulate:]'.ljust(15, ' ') + 'Simulation took ',
              time.time() - st, ' seconds.')

    if superflag and verbose:
        print('[simulate:]'.ljust(15, ' ') +
              'No rational expectations solution found.')

    X, Y, LK, flags = res

    return X, Y, (LK[..., 0, :], LK[..., 1, :]), flags
示例#3
0
def extract(self,
            sample=None,
            nsamples=1,
            precalc=True,
            seed=0,
            nattemps=4,
            accept_failure=False,
            verbose=True,
            debug=False,
            l_max=None,
            k_max=None,
            **npasargs):
    """Extract the timeseries of (smoothed) shocks.

    Parameters
    ----------
    sample : array, optional
        Provide one or several parameter vectors used for which the smoothed shocks are calculated (default is the current `self.par`)
    nsamples : int, optional
        Number of `npas`-draws for each element in `sample`. Defaults to 1
    nattemps : int, optional
        Number of attemps per sample to crunch the sample with a different seed. Defaults to 4

    Returns
    -------
    tuple
        The result(s)
    """

    import tqdm
    import os
    from grgrlib.core import map2arr, serializer

    # if sample is None:
    # sample = self.par

    if np.ndim(sample) <= 1:
        sample = [sample]

    np.random.seed(seed)

    fname = self.filter.name
    verbose = max(verbose, debug)

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    if fname == 'ParticleFilter':
        raise NotImplementedError

    elif fname == 'KalmanFilter':
        if nsamples > 1:
            print(
                '[extract:]'.ljust(15, ' ') +
                ' Setting `nsamples` to 1 as the linear filter does not rely on sampling.'
            )
        nsamples = 1
        debug = not hasattr(self, 'debug') or self.debug
        self.debug = True

    else:
        if self.filter.reduced_form:
            self.create_filter(R=self.filter.R,
                               N=self.filter.N,
                               reduced_form=False)

            print(
                '[extract:]'.ljust(15, ' ') +
                ' Extraction requires filter in non-reduced form. Recreating filter instance.'
            )

        npas = serializer(self.filter.npas)

    self.debug |= debug

    if sample[0] is not None:
        set_par = serializer(self.set_par)

    run_filter = serializer(self.run_filter)
    t_func = serializer(self.t_func)
    edim = len(self.shocks)
    xdim = len(self.vv)
    odim = len(self.observables)

    obs_func = serializer(self.obs)
    filter_get_eps = serializer(self.get_eps_lin)

    dimeps = self.dimeps
    dimp = self.dimp

    seeds = np.random.randint(2**31, size=nsamples)  # win explodes with 2**32
    sample = [(x, y) for x in sample for y in seeds]

    def runner(arg):

        par, seed_loc = arg

        if par is not None:
            set_par(par, l_max=l_max, k_max=k_max)

        res = run_filter(verbose=verbose > 2, seed=seed_loc)

        if fname == 'KalmanFilter':
            means, covs = res
            res = means.copy()
            resid = np.empty((means.shape[0] - 1, dimeps))

            for t, x in enumerate(means[1:]):
                resid[t] = filter_get_eps(x, res[t])
                res[t + 1] = t_func(res[t], resid[t], linear=True)[0]

            return res[0], resid, 0

        np.random.shuffle(res)
        sample = np.dstack((obs_func(res), res[..., dimp:]))
        inits = res[:, 0, :]

        def t_func_loc(states, eps):

            (q, pobs), flag = t_func(states, eps, get_obs=True)

            return np.hstack((pobs, q)), flag

        for natt in range(nattemps):
            try:
                init, resid, flags = npas(func=t_func_loc,
                                          X=sample,
                                          init_states=inits,
                                          verbose=max(
                                              len(sample) == 1, verbose - 1),
                                          seed=seed_loc,
                                          nsamples=1,
                                          **npasargs)

                return init, resid[0], flags

            except Exception as e:
                raised_error = e

        if accept_failure:
            print('[extract:]'.ljust(15, ' ') +
                  "got an error: '%s' (after %s unsuccessful attemps)." %
                  (raised_error, natt + 1))
            return None
        else:
            import sys
            raise type(raised_error)(str(raised_error) +
                                     ' (after %s unsuccessful attemps).' %
                                     (natt + 1)).with_traceback(
                                         sys.exc_info()[2])

    wrap = tqdm.tqdm if (verbose
                         and len(sample) > 1) else (lambda x, **kwarg: x)
    res = wrap(self.mapper(runner, sample),
               unit=' sample(s)',
               total=len(sample),
               dynamic_ncols=True)
    init, resid, flags = map2arr(res)

    if hasattr(self, 'pool') and self.pool:
        self.pool.close()

    if fname == 'KalmanFilter':
        self.debug = debug

    if resid.shape[0] == 1:
        resid[0] = pd.DataFrame(resid[0],
                                index=self.data.index[:-1],
                                columns=self.shocks)

    edict = {
        'pars': np.array([s[0] for s in sample]),
        'init': init,
        'resid': resid,
        'flags': flags
    }

    return edict
示例#4
0
def irfs(self,
         shocklist,
         pars=None,
         state=None,
         T=30,
         linear=False,
         set_k=False,
         verbose=True,
         debug=False,
         **args):
    """Simulate impulse responses

    Parameters
    ----------

    shocklist : tuple or list of tuples
        Tuple of (shockname, size, period)
    T : int
        Simulation horizon. (default: 30)

    Returns
    -------
    DataFrame, tuple(int,int)
        The simulated series as a pandas.DataFrame object and the expected durations at the constraint
    """

    from grgrlib.core import serializer

    self.debug |= debug

    if not isinstance(shocklist, list):
        shocklist = [
            shocklist,
        ]

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    st = time.time()
    shocks = self.shocks
    nstates = len(self.vv)

    set_par = serializer(self.set_par)
    t_func = serializer(self.t_func)

    # accept all sorts of inputs
    new_shocklist = []

    for vec in shocklist:
        if isinstance(vec, str):
            vec = (vec, 1, 0)
        elif len(vec) == 2:
            vec += 0,
        new_shocklist.append(vec)

    def runner(par):

        X = np.empty((T, nstates))
        K = np.empty(T)
        L = np.empty(T)

        if np.any(par):
            try:
                set_par(par, **args)
            except ValueError:
                X[:] = np.nan
                K[:] = np.nan
                L[:] = np.nan
                return X, K, L, 4

        st_vec = state if state is not None else np.zeros(nstates)

        superflag = False

        for t in range(T):

            shk_vec = np.zeros(len(shocks))
            for vec in new_shocklist:
                if vec[2] == t:

                    shock = vec[0]
                    shocksize = vec[1]

                    shock_arg = shocks.index(shock)
                    shk_vec[shock_arg] = shocksize

            set_k_eff = max(set_k - t, 0) if set_k else set_k

            st_vec, (l, k), flag = t_func(st_vec,
                                          shk_vec,
                                          set_k=set_k_eff,
                                          linear=linear,
                                          return_k=True)

            superflag |= flag

            X[t, :] = st_vec
            L[t] = l
            K[t] = k

        return X, K, L, superflag

    if pars is not None and np.ndim(pars) > 1:
        res = self.mapper(runner, pars)
        X, K, L, flag = map2arr(res)
    else:
        X, K, L, flag = runner(pars)
        X = pd.DataFrame(X, columns=self.vv)

    if np.any(flag) and verbose:
        print('[irfs:]'.ljust(15, ' ') +
              'No rational expectations solution found at least once.')

    if verbose > 1:
        print('[irfs:]'.ljust(15, ' ') + 'Simulation took ',
              np.round((time.time() - st), 5), ' seconds.')

    return X, (K, L), flag
示例#5
0
文件: tools.py 项目: fredojb/pydsge
def simulate(self,
             source=None,
             mask=None,
             pars=None,
             resid=None,
             init=None,
             operation=np.multiply,
             linear=False,
             debug=False,
             verbose=False,
             **args):
    """Simulate time series given a series of exogenous innovations.

    Parameters
    ----------
        source : dict
            Dict of `extract` results
        mask : array
            Mask for eps. Each non-None element will be replaced.
    """
    from grgrlib.core import serializer

    pars = pars if pars is not None else source['pars']
    resi = resid if resid is not None else source['resid']
    init = init if init is not None else source['init']

    sample = pars, resi, init

    if verbose:
        st = time.time()

    self.debug |= debug

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    if self.set_par is not None:
        set_par = serializer(self.set_par)
    else:
        set_par = None

    t_func = serializer(self.t_func)
    obs = serializer(self.obs)
    vv_orig = self.vv.copy()

    def runner(arg):

        superflag = False
        par, eps, state = arg

        if mask is not None:
            eps = np.where(np.isnan(mask), eps, operation(np.array(mask), eps))

        if set_par is not None:
            _, vv = set_par(par, return_vv=True, **args)
            if not np.all(vv == vv_orig):
                raise Exception(
                    'The ordering of variables has changed given different parameters.'
                )

        X = [state]
        L, K = [], []

        for eps_t in eps:

            state, (l, k), flag = t_func(state,
                                         eps_t,
                                         return_k=True,
                                         linear=linear)

            superflag |= flag

            X.append(state)
            L.append(l)
            K.append(k)

        X = np.array(X)
        LK = np.array((L, K))
        K = np.array(K)

        return X, LK, superflag

    wrap = tqdm.tqdm if verbose else (lambda x, **kwarg: x)
    res = wrap(self.mapper(runner, zip(*sample)),
               unit=' sample(s)',
               total=len(source['pars']),
               dynamic_ncols=True)

    X, LK, flags = map2arr(res)

    if verbose > 1:
        print('[simulate:]'.ljust(15, ' ') + 'Simulation took ',
              time.time() - st, ' seconds.')

    if np.any(flags) and verbose:
        print('[simulate:]'.ljust(15, ' ') +
              'No rational expectations solution found (at least once).')

    return X, (LK[..., 0, :], LK[..., 1, :]), flags
示例#6
0
文件: tools.py 项目: fredojb/pydsge
def irfs(self,
         shocklist,
         pars=None,
         state=None,
         T=30,
         linear=False,
         set_k=False,
         force_init_equil=None,
         verbose=True,
         debug=False,
         **args):
    """Simulate impulse responses

    Parameters
    ----------

    shocklist : tuple or list of tuples
        Tuple of (shockname, size, period)
    T : int, optional
        Simulation horizon. (default: 30)
    linear : bool, optional
        Simulate linear model (default: False)
    set_k: int, optional
        Enforce a `k` (defaults to False)
    force_init_equil:
        If set to `False`, the equilibrium will be recalculated every iteration. This may be problematic if there is multiplicity because the algoritm selects the equilibrium with the lowest (l,k) (defaults to True)
    verbose : bool or int, optional
        Level of verbosity (default: 1)

    Returns
    -------
    DataFrame, tuple(int,int)
        The simulated series as a pandas.DataFrame object and the expected durations at the constraint
    """

    from grgrlib.core import serializer

    self.debug |= debug
    if force_init_equil is None:
        force_init_equil = not bool(np.any(set_k))

    if not isinstance(shocklist, list):
        shocklist = [
            shocklist,
        ]

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    st = time.time()
    shocks = self.shocks
    nstates = self.dimx

    if self.set_par is not None:
        set_par = serializer(self.set_par)
    t_func = serializer(self.t_func)

    # accept all sorts of inputs
    new_shocklist = []

    for vec in shocklist:
        if isinstance(vec, str):
            vec = (vec, 1, 0)
        elif len(vec) == 2:
            vec += 0,
        new_shocklist.append(vec)

    def runner(par):

        X = np.empty((T, nstates))
        K = np.empty(T)
        L = np.empty(T)

        if np.any(par):
            try:
                set_par(par, **args)
            except ValueError:
                X[:] = np.nan
                K[:] = np.nan
                L[:] = np.nan
                return X, K, L, 4

        st_vec = state if state is not None else np.zeros(nstates)

        supererrflag = False
        supermultflag = False
        l, k = 0, 0

        for t in range(T):

            shk_vec = np.zeros(len(shocks))
            for vec in new_shocklist:
                if vec[2] == t:

                    shock = vec[0]
                    shocksize = vec[1]

                    shock_arg = shocks.index(shock)
                    shk_vec[shock_arg] = shocksize

            # force_init_equil will force recalculation of l,k only if the shock vec is not empty
            if force_init_equil and not np.any(shk_vec):
                set_k_eff = (l - 1, k) if l else (l, max(k - 1, 0))

                _, (l_endo,
                    k_endo), flag = t_func(st_vec[-(self.dimq - self.dimeps):],
                                           shk_vec,
                                           set_k=None,
                                           linear=linear,
                                           return_k=True)

                multflag = l_endo != set_k_eff[0] or k_endo != set_k_eff[1]
                supermultflag |= multflag

                if verbose > 1 and multflag:
                    print(
                        '[irfs:]'.ljust(15, ' ') +
                        'Multiplicity found in period %s: new eql. %s coexits with old eql. %s.'
                        % (t, (l_endo, k_endo), set_k_eff))

            elif set_k is None:
                set_k_eff = None
            elif isinstance(set_k, tuple):
                set_l_eff, set_k_eff = set_k
                if set_l_eff - t >= 0:
                    set_k_eff = set_l_eff - t, set_k_eff
                else:
                    set_k_eff = 0, max(set_k_eff + set_l_eff - t, 0)
            elif set_k:
                set_k_eff = 0, max(set_k - t, 0)
            else:
                set_k_eff = set_k

            if set_k_eff:
                if set_k_eff[0] > self.lks[0] or set_k_eff[1] > self.lks[1]:
                    raise IndexError('set_k exceeds l_max (%s vs. %s).' %
                                     (set_k_eff, self.lks))

            st_vec, (l, k), flag = t_func(st_vec[-(self.dimq - self.dimeps):],
                                          shk_vec,
                                          set_k=set_k_eff,
                                          linear=linear,
                                          return_k=True)

            if flag and verbose > 1:
                print(
                    '[irfs:]'.ljust(15, ' ') +
                    'No rational expectations solution found in period %s (error flag %s).'
                    % (t, flag))

            supererrflag |= flag

            X[t, :] = st_vec
            L[t] = l
            K[t] = k

        return X, L, K, supererrflag, supermultflag

    if pars is not None and np.ndim(pars) > 1:
        res = self.mapper(runner, pars)
        X, L, K, flag, multflag = map2arr(res)
    else:
        X, L, K, flag, multflag = runner(pars)
        X = pd.DataFrame(X, columns=self.vv)

    if verbose == 1:
        if np.any(flag):
            print('[irfs:]'.ljust(14, ' ') +
                  ' No rational expectations solution(s) found.')
        elif np.any(multflag):
            print('[irfs:]'.ljust(14, ' ') +
                  ' Multiplicity/Multiplicities found.')

    if verbose > 2:
        print('[irfs:]'.ljust(15, ' ') + 'Simulation took ',
              np.round((time.time() - st), 5), ' seconds.')

    return X, np.vstack((L, K)), flag
示例#7
0
def extract(self,
            sample=None,
            nsamples=1,
            precalc=True,
            seed=0,
            nattemps=4,
            accept_failure=False,
            verbose=True,
            debug=False,
            l_max=None,
            k_max=None,
            **npasargs):
    """Extract the timeseries of (smoothed) shocks.

    Parameters
    ----------
    sample : array, optional
        Provide one or several parameter vectors used for which the smoothed shocks are calculated (default is the current `self.par`)
    nsamples : int, optional
        Number of `npas`-draws for each element in `sample`. Defaults to 1
    nattemps : int, optional
        Number of attemps per sample to crunch the sample with a different seed. Defaults to 4

    Returns
    -------
    tuple
        The result(s)
    """

    import tqdm
    import os
    from grgrlib.core import map2arr, serializer

    if sample is None:
        sample = self.par

    if np.ndim(sample) <= 1:
        sample = [sample]

    fname = self.filter.name
    verbose = max(verbose, debug)

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    if fname == 'ParticleFilter':
        raise NotImplementedError

    elif fname == 'KalmanFilter':
        if nsamples > 1:
            print(
                '[extract:]'.ljust(15, ' ') +
                ' Setting `nsamples` to 1 as the linear filter is deterministic.'
            )
        nsamples = 1
        debug = not hasattr(self, 'debug') or self.debug
        self.debug = True

    else:
        npas = serializer(self.filter.npas)

    if self.filter.dim_x != len(self.vv):
        raise RuntimeError(
            'Shape mismatch between dimensionality of filter and model. Maybe you want to set `reduce_sys` to True/False or (re) define the/a new filter?'
        )

    else:
        self.debug |= debug

    set_par = serializer(self.set_par)
    run_filter = serializer(self.run_filter)
    t_func = serializer(self.t_func)
    obs = serializer(self.obs)
    filter_get_eps = serializer(self.get_eps_lin)
    edim = len(self.shocks)
    xdim = len(self.vv)
    odim = len(self.observables)

    sample = [(x, y) for x in sample for y in range(nsamples)]

    def runner(arg):

        par, seed_loc = arg

        if par is not None:
            set_par(par, l_max=l_max, k_max=k_max)

        res = run_filter(verbose=verbose > 2)

        if fname == 'KalmanFilter':
            means, covs = res
            res = means.copy()
            resid = np.empty((means.shape[0] - 1, edim))

            for t, x in enumerate(means[1:]):
                resid[t] = filter_get_eps(x, res[t])
                res[t + 1] = t_func(res[t], resid[t], linear=True)[0]

            return res, obs(res), covs, resid, 0

        get_eps = filter_get_eps if precalc else None

        for natt in range(nattemps):
            np.random.seed(seed_loc)
            seed_loc = np.random.randint(2**31)  # win explodes with 2**32
            try:
                means, covs, resid, flags = npas(get_eps=get_eps,
                                                 verbose=max(
                                                     len(sample) == 1,
                                                     verbose - 1),
                                                 seed=seed_loc,
                                                 nsamples=1,
                                                 **npasargs)

                return means[0], obs(means[0]), covs, resid[0], flags
            except Exception as e:
                ee = e

        if accept_failure:
            print('[extract:]'.ljust(15, ' ') +
                  "got an error: '%s' (after %s unsuccessful attemps)." %
                  (ee, natt + 1))
            return None
        else:
            import sys
            raise type(ee)(str(ee) + ' (after %s unsuccessful attemps).' %
                           (natt + 1)).with_traceback(sys.exc_info()[2])

    wrap = tqdm.tqdm if (verbose
                         and len(sample) > 1) else (lambda x, **kwarg: x)
    res = wrap(self.mapper(runner, sample),
               unit=' sample(s)',
               total=len(sample),
               dynamic_ncols=True)
    means, obs, covs, resid, flags = map2arr(res)

    if hasattr(self, 'pool') and self.pool:
        self.pool.close()

    if fname == 'KalmanFilter':
        self.debug = debug

    if means.shape[0] == 1:
        means = pd.DataFrame(means[0], index=self.data.index, columns=self.vv)
        resid = pd.DataFrame(resid[0],
                             index=self.data.index[:-1],
                             columns=self.shocks)

    pars = np.array([s[0] for s in sample])

    edict = {
        'pars': pars.squeeze(),
        'means': means,
        'obs': obs,
        'covs': covs,
        'resid': resid,
        'flags': flags
    }

    return edict
示例#8
0
def simulate(self,
             source,
             mask=None,
             linear=False,
             debug=False,
             verbose=False):
    """Simulate time series given a series of exogenous innovations.

    Parameters
    ----------
        source : dict
            Dict of `extract` results
        mask : array
            Mask for eps. Each non-None element will be replaced.
    """
    from grgrlib.core import serializer

    sample = zip(source['pars'], source['resid'],
                 [s[0] for s in source['means']])

    if verbose:
        st = time.time()

    self.debug |= debug

    if hasattr(self, 'pool'):
        from .estimation import create_pool
        create_pool(self)

    set_par = serializer(self.set_par)
    t_func = serializer(self.t_func)

    def runner(arg):

        superflag = False
        par, eps, state = arg

        if mask is not None:
            eps = np.where(np.isnan(mask), eps, np.array(mask) * eps)

        set_par(par)

        X = [state]
        K = []
        L = []

        for eps_t in eps:

            state, (l, k), flag = t_func(state,
                                         noise=eps_t,
                                         return_k=True,
                                         linear=linear)

            superflag |= flag

            X.append(state)
            L.append(l)
            K.append(k)

        X = np.array(X)
        L = np.array(L)
        K = np.array(K)

        return X, (L, K), superflag

    wrap = tqdm.tqdm if verbose else (lambda x, **kwarg: x)

    res = wrap(self.mapper(runner, sample),
               unit=' sample(s)',
               total=len(source['pars']),
               dynamic_ncols=True)

    res = map2arr(res)
    superflag = res[-1].any()

    if verbose:
        print('[simulate:]'.ljust(15, ' ') + 'Simulation took ',
              time.time() - st, ' seconds.')

    if superflag and verbose:
        print('[simulate:]'.ljust(15, ' ') +
              'No rational expectations solution found.')

    X, LK, flags = res

    return X, (LK[:, 0, :], LK[:, 1, :]), flags