Пример #1
0
class BAR2(Model):

    name = "BAR(2) model"
    has_observation_error = False
    μ = global_param(NormalPrior(0, 1))
    ρ1 = global_param(NormalPrior(0, 1), transform="logit", rename="φ1")
    ρ2 = global_param(NormalPrior(0, 1), transform="logit", rename="φ2")

    def ln_joint(self, y, ζ):
        μ, (ρ1, φ1), (ρ2, φ2), = self.unpack(ζ)
        llhood = Bernoulli(μ + y[1:-1] * ρ1 + y[:-2] * ρ2).log_prob(
            y[2:]).sum()
        lprior = (self.μ_prior.log_prob(μ) + self.φ1_prior.log_prob(φ1) +
                  self.φ2_prior.log_prob(φ2))
        return llhood + lprior
Пример #2
0
class StochVolModel(Model):
    name = "Stochastic volatility model"
    b = local_param()
    # λ = global_param(prior=Normal(0, 1e-4))
    σ = global_param(prior=LogNormalPrior(0, 1), rename="α", transform="log")
    φ = global_param(prior=BetaPrior(2, 2), rename="ψ", transform="logit")

    def ln_joint(self, y, ζ):
        # b, λ, (σ, α), (φ, ψ) = self.unpack(ζ)
        b, (σ, α), (φ, ψ) = self.unpack(ζ)
        ar1_sd = torch.pow(1 - torch.pow(φ, 2), -0.5)
        llikelihood = (
            Normal(0, torch.exp(.5 * (σ * b))).log_prob(y).sum()
            # Normal(0, torch.exp(.5 * (λ + σ * b))).log_prob(y).sum()
            + Normal(φ * b[:-1], 1).log_prob(b[1:]).sum() +
            Normal(0., ar1_sd).log_prob(b[0]))
        lprior = (
            self.ψ_prior.log_prob(ψ) + self.α_prior.log_prob(α)
            # + self.λ_prior.log_prob(λ)
        )
        return llikelihood + lprior

    def simulate(self, λ=0., σ=0.5, φ=0.95):
        assert σ > 0 and 0 < φ < 1
        b = torch.zeros(self.input_length)
        φ = torch.tensor(φ)
        ar1_sd = torch.pow(1 - torch.pow(φ, 2), -0.5)
        b[0] = Normal(0, ar1_sd).sample()
        for t in range(1, self.input_length):
            b[t] = Normal(φ * b[t - 1], 1).sample()
        y = Normal(loc=0., scale=torch.exp(0.5 * (λ + σ * b))).sample()
        return y, b

    def sample_observed(self, ζ, y, fc_steps=0):
        b, (σ, α), (φ, ψ) = self.unpack(ζ)
        λ = 0.
        if fc_steps > 0:
            b = torch.cat([b, torch.zeros(fc_steps)])
        for t in range(self.input_length, self.input_length + fc_steps):
            b[t] = b[t - 1] * φ + Normal(0, 1).sample()
        return Normal(loc=0., scale=torch.exp(0.5 * (λ + σ * b))).sample()
Пример #3
0
class UnivariateGaussian(Model):
    """Simple univariate Gaussian model.

    For the optimization, we transform σ -> ln(σ) = η to ensure σ > 0.
    """

    name = "Univariate Gaussian model"
    μ = global_param(prior=NormalPrior(0., 10.))
    σ = global_param(prior=LogNormalPrior(0., 10.),
                     rename="η",
                     transform="log")

    def simulate(self, N: int, μ: float, σ: float):
        assert N > 2 and σ > 0
        return Normal(μ, σ).sample((N, )).type(self.dtype).to(self.device)

    def ln_joint(self, y, ζ):
        μ, (σ, η) = self.unpack(ζ)
        ll = Normal(μ, σ).log_prob(y).sum()
        lp = self.μ_prior.log_prob(μ) + self.η_prior.log_prob(η)
        return ll + lp
Пример #4
0
class LocalLevelModel(Model):

    name = "Local level model"
    z = local_param()
    γ = global_param(prior=NormalPrior(1, 3))
    η = global_param(prior=LogNormalPrior(0, 3), transform="log", rename="ψ")
    σ = global_param(prior=InvGammaPrior(1, 5), transform="log", rename="ς")
    ρ = global_param(prior=BetaPrior(2, 2), transform="logit", rename="φ")

    def ln_joint(self, y, ζ):
        """Computes the log likelihood plus the log prior at ζ."""
        z, γ, (η, ψ), (σ, ς), (ρ, φ) = self.unpack(ζ)
        ar1_uncond_var = torch.pow((1 - torch.pow(ρ, 2)), -0.5)
        llikelihood = (Normal(γ + η * z, σ).log_prob(y).sum() +
                       Normal(ρ * z[:-1], 1).log_prob(z[1:]).sum() +
                       Normal(0., ar1_uncond_var).log_prob(z[0]))
        lprior = (self.γ_prior.log_prob(γ) + self.ψ_prior.log_prob(ψ) +
                  self.ς_prior.log_prob(ς) + self.φ_prior.log_prob(φ))
        return llikelihood + lprior

    def simulate(self, γ: float, η: float, σ: float, ρ: float):
        z = torch.empty([self.input_length])
        z[0] = Normal(0, 1 / (1 - ρ**2)**0.5).sample()
        for i in range(1, self.input_length):
            z[i] = ρ * z[i - 1] + Normal(0, 1).sample()
        y = Normal(γ + η * z, σ).sample()
        return y.type(self.dtype).to(self.device), z.type(self.dtype).to(
            self.device)

    def sample_observed(self, ζ, y, fc_steps=0):
        z, γ, (η, ψ), (σ, ς), (ρ, φ) = self.unpack(ζ)
        if fc_steps > 0:
            z = torch.cat([z, torch.zeros(fc_steps)])
        # iteratively project states forward
        for t in range(self.input_length, self.input_length + fc_steps):
            z[t] = z[t - 1] * ρ + Normal(0, 1).sample()
        return Normal(γ + η * z, σ).sample()
Пример #5
0
class FilteredSVModelDualOpt(FilteredStateSpaceModelFreeProposal):
    """ A simple stochastic volatility model for estimating with FIVO.

    .. math::
        x_t = exp(a)exp(z_t/2) ε_t       ε_t ~ Ν(0,1)
        z_t = b + c * z_{t-1} + ν_t    ν_t ~ Ν(0,1)

    The proposal density is

    .. math::
        z_t = d + e * z_{t-1} + η_t    η_t ~ Ν(0,1)

    The model parameter ζ covers the parameters used in the SV model, ζ={a, b, c}.

    The alternative parameter η covers the parameters η={d, e}.
    """

    name = "Particle filtered stochastic volatility model"
    a = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="α")
    b = global_param(prior=NormalPrior(0, 1))
    c = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ψ")
    d = global_param(prior=NormalPrior(0, 1))
    e = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ρ")

    def __init__(
        self,
        input_length: int,
        num_particles: int = 50,
        resample=True,
        dtype=None,
        device=None,
    ):
        super().__init__(input_length, num_particles, resample, dtype, device)
        self._md = 3
        self._pd = 2  # no σ in proposal yet

    def simulate(self, a, b, c):
        """Simulate from p(y, z | θ)"""
        a, b, c = map(torch.tensor, (a, b, c))
        z_true = torch.empty((self.input_length,))
        z_true[0] = Normal(b, (1 - c ** 2) ** (-.5)).sample()
        for t in range(1, self.input_length):
            z_true[t] = b + c * z_true[t - 1] + Normal(0, 1).sample()
        y = Normal(0, torch.exp(a) * torch.exp(z_true / 2)).sample()
        return (
            y.type(self.dtype).to(self.device),
            z_true.type(self.dtype).to(self.device),
        )

    def conditional_log_prob(self, t, y, z, ζ):
        """Compute log p(x_t, z_t | y_{0:t-1}, z_{0:t-1}, ζ).

        Args:
            t: time index (zero-based)
            y: y_{0:t} vector of points observed up to this point (which may
               actually be longer, but should only be indexed up to t)
            z: z_{0:t} vector of unobserved variables to condition on (ditto,
               array may be longer)
            ζ: parameter to condition on; should be unpacked with self.unpack
        """
        a, b, c, = self.unpack_natural_model_parameters(ζ)
        if t == 0:
            log_pzt = Normal(b, (1 - c ** 2) ** (-.5)).log_prob(z[t])
        else:
            log_pzt = Normal(b + c * z[t - 1], 1).log_prob(z[t])
        log_pxt = Normal(0, torch.exp(a) * torch.exp(z[t] / 2)).log_prob(y[t])
        return log_pzt + log_pxt

    def ln_prior(self, ζ: torch.Tensor) -> float:
        a, b, c = self.unpack_natural_model_parameters(ζ)
        return (
            self.a_prior.log_prob(a)
            + self.b_prior.log_prob(b)
            + self.c_prior.log_prob(c)
        )

    def model_parameters(self):
        return [self.a, self.b, self.c]

    def proposal_parameters(self):
        return [self.d, self.e]

    def unpack_natural_model_parameters(self, ζ: torch.Tensor):
        α, b, ψ = ζ[0], ζ[1], ζ[2]
        return self.a_to_α.inv(α), b, self.c_to_ψ.inv(ψ)

    def unpack_natural_proposal_parameters(self, η: torch.Tensor):
        d, ρ = η[0], η[1]
        return d, self.e_to_ρ.inv(ρ)

    def simulate_log_phatN(
        self,
        y: torch.Tensor,
        ζ: torch.Tensor,
        η: torch.Tensor,
        sample: torch.Tensor = None,
    ):
        """Apply particle filter to estimate marginal likelihood log p^(y | ζ)

        This algorithm is subtly different than the one in fivo.py, because it
        also takes η as a parameter.
        """
        log_phatN = 0.
        log_N = math.log(self.num_particles)
        log_w = torch.full(
            (self.num_particles,), -log_N, dtype=self.dtype, device=self.device
        )
        Z = None
        proposal = self.proposal_for(y, η)
        for t in range(self.input_length):
            zt = proposal.conditional_sample(t, Z, self.num_particles).unsqueeze(0)
            Z = torch.cat([Z, zt]) if Z is not None else zt
            log_αt = self.conditional_log_prob(
                t, y, Z, ζ
            ) - proposal.conditional_log_prob(t, Z)
            log_phatt = torch.logsumexp(log_w + log_αt, dim=0)
            log_phatN += log_phatt
            log_w += log_αt - log_phatt
            with torch.no_grad():
                ESS = 1. / torch.exp(2 * log_w).sum()
                if self.resample and ESS < self.num_particles:
                    a = Categorical(torch.exp(log_w)).sample((self.num_particles,))
                    Z = (Z[:, a]).clone()
                    log_w = torch.full(
                        (self.num_particles,),
                        -log_N,
                        dtype=self.dtype,
                        device=self.device,
                    )
        if sample is not None:
            with torch.no_grad():
                # samples should be M * T, where M is the number of samples
                assert sample.shape[0] >= self.input_length
                idxs = Categorical(torch.exp(log_w)).sample()
                sample[: self.input_length] = Z[:, idxs]
        return log_phatN

    def proposal_for(self, y: torch.Tensor, η: torch.Tensor) -> PFProposal:
        """Return the proposal distribution for the given parameters.

        Args:
            y: data vector
            η: proposal parameter vector
        """
        d, e = self.unpack_natural_proposal_parameters(η)
        return AR1Proposal(μ=d, ρ=e, σ=1.)

    @property
    def md(self) -> int:
        """Dimension of the model."""
        return self._md

    @property
    def pd(self) -> int:
        """Dimension of the proposal."""
        return self._pd

    def sample_observed(self, ζ, y, fc_steps=0):
        a, b, c = self.unpack_natural_model_parameters(ζ[:3])
        z = self.sample_unobserved(ζ, y, fc_steps)
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def sample_unobserved(self, ζ, y, fc_steps=0):
        assert y is not None
        a, b, c = self.unpack_natural_model_parameters(ζ[:3])
        # get a sample of states by filtering wrt y
        z = torch.empty((len(y) + fc_steps,))
        self.simulate_log_phatN(y=y, ζ=ζ[:3], η=ζ[3:], sample=z)
        # now project states forward fc_steps
        if fc_steps > 0:
            for t in range(self.input_length, self.input_length + fc_steps):
                z[t] = b + c * z[t - 1] + Normal(0, 1).sample()
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def __repr__(self):
        return (
            f"Stochastic volatility model for dual optimization of model and proposal:\n"
            f"\tx_t = exp(a * z_t/2) ε_t      t=1, …, {self.input_length}\n"
            f"\tz_t = b + c * z_{{t-1}} + ν_t,  t=2, …, {self.input_length}\n"
            f"\tz_1 = b + 1/√(1 - c^2) ν_1\n"
            f"\twhere ε_t, ν_t ~ Ν(0,1)\n\n"
            f"Particle filter with {self.num_particles} particles, AR(1) proposal:\n"
            f"\tz_t = d + e * z_{{t-1}} + η_t,  t=2, …, {self.input_length}\n"
            f"\tz_1 = d + 1/√(1 - e^2) η_1\n"
            f"\twhere η_t ~ Ν(0,1)\n"
        )
Пример #6
0
class FilteredStochasticVolatilityModelFreeProposal(FilteredStateSpaceModel):
    """ A simple stochastic volatility model for estimating with FIVO.

    .. math::
        x_t = exp(a)exp(z_t/2) ε_t       ε_t ~ Ν(0,1)
        z_t = b + c * z_{t-1} + ν_t    ν_t ~ Ν(0,1)

    The proposal density is also an AR(1):

    .. math::
        z_t = d + e * z_{t-1} + η_t    η_t ~ Ν(0,1)
    """

    name = "Particle filtered stochastic volatility model"
    a = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="α")
    b = global_param(prior=NormalPrior(0, 1))
    c = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ψ")
    d = global_param(prior=NormalPrior(0, 1))
    e = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ρ")
    f = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="ι")

    def simulate(self, a, b, c):
        """Simulate from p(x, z | θ)"""
        a, b, c = map(torch.tensor, (a, b, c))
        z = torch.empty((self.input_length,))
        z[0] = Normal(b, (1 - c ** 2) ** (-.5)).sample()
        for t in range(1, self.input_length):
            z[t] = b + c * z[t - 1] + Normal(0, 1).sample()
        x = Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()
        return x.type(self.dtype).to(self.device), z.type(self.dtype).to(self.device)

    def conditional_log_prob(self, t, y, z, ζ):
        """Compute log p(x_t, z_t | y_{0:t-1}, z_{0:t-1}, ζ).

        Args:
            t: time index (zero-based)
            y: y_{0:t} vector of points observed up to this point (which may
               actually be longer, but should only be indexed up to t)
            z: z_{0:t} vector of unobserved variables to condition on (ditto,
               array may be longer)
            ζ: parameter to condition on; should be unpacked with self.unpack
        """
        a, b, c, _, _, _ = self.unpack_natural(ζ)
        if t == 0:
            log_pzt = Normal(b, (1 - c ** 2) ** (-.5)).log_prob(z[t])
        else:
            log_pzt = Normal(b + c * z[t - 1], 1).log_prob(z[t])
        log_pxt = Normal(0, torch.exp(a) * torch.exp(z[t] / 2)).log_prob(y[t])
        return log_pzt + log_pxt

    def sample_observed(self, ζ, y, fc_steps=0):
        a, _, _, _, _, _ = self.unpack_natural(ζ)
        z = self.sample_unobserved(ζ, y, fc_steps)
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def sample_unobserved(self, ζ, y, fc_steps=0):
        assert y is not None
        a, b, c, _, _, _ = self.unpack_natural(ζ)
        # get a sample of states by filtering wrt y
        z = torch.empty((len(y) + fc_steps,))
        self.simulate_log_phatN(y=y, ζ=ζ, sample=z)
        # now project states forward fc_steps
        if fc_steps > 0:
            for t in range(self.input_length, self.input_length + fc_steps):
                z[t] = b + c * z[t - 1] + Normal(0, 1).sample()
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def proposal_for(self, y: torch.Tensor, ζ: torch.Tensor) -> PFProposal:
        _, _, _, d, e, f = self.unpack_natural(ζ)
        return AR1Proposal(μ=d, ρ=e, σ=f)

    def __repr__(self):
        return (
            f"Stochastic volatility model with parameters {{a, b, c}}:\n"
            f"\tx_t = exp(a * z_t/2) ε_t        t=1,…,{self.input_length}\n"
            f"\tz_t = b + c * z_{{t-1}} + ν_t,    t=2,…,{self.input_length}\n"
            f"\tz_1 = b + 1/√(1 - c^2) ν_1\n"
            f"\twhere ε_t, ν_t ~ Ν(0,1)\n\n"
            f"Filter with {self.num_particles} particles; AR(1) proposal params {{d, e, f}}:\n"
            f"\tz_t = d + e * z_{{t-1}} + f η_t,  t=2,…,{self.input_length}\n"
            f"\tz_1 = d + f/√(1 - e^2) η_1\n"
            f"\twhere η_t ~ Ν(0,1)\n"
        )
Пример #7
0
class FilteredStochasticVolatilityModelFixedParams(FilteredStateSpaceModel):
    """ A simple stochastic volatility model for estimating with FIVO.

    .. math::
        x_t = exp(a)exp(z_t/2) ε_t       ε_t ~ Ν(0,1)
        z_t = b + c * z_{t-1} + f ν_t    ν_t ~ Ν(0,1)
    """

    name = "Particle filtered stochastic volatility model"
    d = global_param(prior=NormalPrior(0, 1))
    e = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ρ")
    f = global_param(prior=LogNormalPrior(0, 1), transform="log")

    def __init__(
        self,
        input_length,
        num_particles,
        resample,
        a=0.5,
        b=1.,
        c=0.95,
        dtype=None,
        device=None,
    ):
        super().__init__(
            input_length=input_length,
            num_particles=num_particles,
            resample=resample,
            dtype=dtype,
            device=device,
        )
        self.a, self.b, self.c = (
            torch.tensor(x, dtype=self.dtype, device=self.device) for x in (a, b, c)
        )

    def simulate(self):
        """Simulate from p(x, z | θ)"""
        z_true = torch.empty((self.input_length,), dtype=self.dtype, device=self.device)
        z_true[0] = Normal(self.b, (1 - self.c ** 2) ** (-.5)).sample()
        for t in range(1, self.input_length):
            z_true[t] = (
                self.b
                + self.c * z_true[t - 1]
                + torch.randn(1, dtype=self.dtype, device=self.device)
            )
        x = Normal(0, torch.exp(self.a) * torch.exp(z_true / 2)).sample()
        return (
            x.type(self.dtype).to(self.device),
            z_true.type(self.dtype).to(self.device),
        )

    def conditional_log_prob(self, t, y, z, ζ):
        """Compute log p(x_t, z_t | y_{0:t-1}, z_{0:t-1}, ζ).

        Args:
            t: time index (zero-based)
            y: y_{0:t} vector of points observed up to this point (which may
               actually be longer, but should only be indexed up to t)
            z: z_{0:t} vector of unobserved variables to condition on (ditto,
               array may be longer)
            ζ: parameter to condition on; should be unpacked with self.unpack
        """
        if t == 0:
            log_pzt = Normal(self.b, (1 - self.c ** 2) ** (-.5)).log_prob(z[t])
        else:
            log_pzt = Normal(self.b + self.c * z[t - 1], 1).log_prob(z[t])
        log_pxt = Normal(0, torch.exp(self.a) * torch.exp(z[t] / 2)).log_prob(y[t])
        return log_pzt + log_pxt

    def sample_observed(self, ζ, y, fc_steps=0):
        z = self.sample_unobserved(ζ, y, fc_steps)
        return Normal(0, torch.exp(self.a) * torch.exp(z / 2)).sample()

    def sample_unobserved(self, ζ, y, fc_steps=0):
        assert y is not None
        # get a sample of states by filtering wrt y
        z = torch.empty((len(y) + fc_steps,))
        self.simulate_log_phatN(y=y, ζ=ζ, sample=z)
        # now project states forward fc_steps
        if fc_steps > 0:
            for t in range(self.input_length, self.input_length + fc_steps):
                z[t] = self.b + self.c * z[t - 1] + Normal(0, 1).sample()
        return Normal(0, torch.exp(self.a) * torch.exp(z / 2)).sample()

    def proposal_for(self, y: torch.Tensor, ζ: torch.Tensor) -> PFProposal:
        d, e, f = self.unpack_natural(ζ)
        return AR1Proposal(μ=d, ρ=torch.tensor(.95, dtype=self.dtype), σ=f)

    def __repr__(self):
        return (
            f"Stochastic volatility model:\n"
            f"\tx_t = exp(a * z_t/2) ε_t        t=1, …, {self.input_length}\n"
            f"\tz_t = b + c * z_{{t-1}} + ν_t,    t=2, …, {self.input_length}\n"
            f"\tz_1 = b + 1/√(1 - c^2) ν_1\n"
            f"\twhere ε_t, ν_t ~ Ν(0,1)\n\n"
            f"Particle filter with {self.num_particles} particles, AR(1) proposal:\n"
            f"\tz_t = d + e * z_{{t-1}} + f η_t,  t=2, …, {self.input_length}\n"
            f"\tz_1 = d + f/√(1 - e^2) η_1\n"
            f"\twhere η_t ~ Ν(0,1)\n"
        )
Пример #8
0
class FilteredLocalLevelModel(Model):
    """Local level (linear gaussian) model that exploits the Kalman filter."""

    name = "Filtered local level model"
    z0 = global_param(prior=NormalPrior(0, 10))
    σz0 = global_param(prior=LogNormalPrior(1, 1),
                       transform="log",
                       rename="ςz0")
    γ = global_param(prior=NormalPrior(1, 3))
    η = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="ψ")
    σ = global_param(prior=InvGammaPrior(1, 5), transform="log", rename="ς")
    ρ = global_param(prior=BetaPrior(1, 1), transform="logit", rename="φ")

    def ln_joint(self, y, ζ):
        """Computes the log likelihood plus the log prior at ζ."""
        z0, (σz0, ςz0), γ, (η, ψ), (σ, ς), (ρ, φ) = self.unpack(ζ)

        # unroll first iteration of loop to set initial conditions

        # prediction step
        z_pred = ρ * z0
        Σz_pred = ρ**2 * σz0**2 + 1
        y_pred = γ + η * z_pred
        Σy_pred = η**2 * Σz_pred + σ**2

        # correction step
        gain = Σz_pred * η / Σy_pred
        z_upd = z_pred + gain * (y[0] - y_pred)
        Σz_upd = Σz_pred - gain**2 * Σy_pred

        llik = Normal(y_pred, torch.sqrt(Σy_pred)).log_prob(y[0])

        for t in range(2, y.shape[0] + 1):
            i = t - 1
            # prediction step
            z_pred = ρ * z_upd
            Σz_pred = ρ**2 * Σz_upd + 1
            y_pred = γ + η * z_pred
            Σy_pred = η**2 * Σz_pred + σ**2

            # correction step
            gain = Σz_pred * η / Σy_pred
            z_upd = z_pred + gain * (y[i] - y_pred)
            Σz_upd = Σz_pred - gain**2 * Σy_pred

            llik += Normal(y_pred, torch.sqrt(Σy_pred)).log_prob(y[i])

        return llik + self.ln_prior(ζ)

    def simulate(
        self,
        γ: float,
        η: float,
        σ: float,
        ρ: float,
        z0: float = None,
        σz0: float = None,
    ):
        if z0 is None:
            z0 = 0
        if σz0 is None:
            σz0 = 1. / (1 - ρ**2)**0.5
        z = torch.empty([self.input_length])
        z[0] = Normal(z0, σz0).sample()
        for i in range(1, self.input_length):
            z[i] = ρ * z[i - 1] + Normal(0, 1).sample()
        y = Normal(γ + η * z, σ).sample()
        return y, z

    def kalman_smoother(self, y, ζ):
        z0, (σz0, ςz0), γ, (η, ψ), (σ, ς), (ρ, φ) = self.unpack(ζ)

        z_pred = torch.zeros((self.input_length, ))  # z_{t|t-1}
        z_upd = torch.zeros((self.input_length, ))  # z_{t|t}
        Σz_pred = torch.zeros((self.input_length, ))  # Σ_{z_{t|t-1}}
        Σz_upd = torch.zeros((self.input_length, ))  # Σ_{z_{t|t}}
        y_pred = torch.zeros((self.input_length, ))  # y_{t|t-1}
        Σy_pred = torch.zeros((self.input_length, ))  # Σ_{y_{t|t-1}}
        z_smooth = torch.zeros((self.input_length, ))  # z_{t|T}
        Σz_smooth = torch.zeros((self.input_length, ))  # Σ_{z_{t|T}}

        # prediction step
        z_pred[0] = ρ * z0
        Σz_pred[0] = ρ**2 * σz0**2 + 1
        y_pred[0] = η * z_pred[0]
        Σy_pred[0] = η**2 * Σz_pred[0] + σ**2

        # correction step
        gain = Σz_pred[0] * η / Σy_pred[0]
        z_upd[0] = z_pred[0] + gain * (y[0] - y_pred[0])
        Σz_upd[0] = Σz_pred[0] - gain**2 * Σy_pred[0]

        for i in range(1, y.shape[0]):
            # prediction step
            z_pred[i] = ρ * z_upd[i - 1]
            Σz_pred[i] = ρ**2 * Σz_upd[i - 1] + 1
            y_pred[i] = η * z_pred[i]
            Σy_pred[i] = η**2 * Σz_pred[i] + σ**2
            # correction step
            gain = Σz_pred[i] * η / Σy_pred[i]
            z_upd[i] = z_pred[i] + gain * (y[i] - y_pred[i])
            Σz_upd[i] = Σz_pred[i] - gain**2 * Σy_pred[i]

        # smoothing step
        z_smooth[self.input_length - 1] = z_upd[self.input_length - 1]
        Σz_smooth[self.input_length - 1] = Σz_upd[self.input_length - 1]
        for i in range(self.input_length - 2, -1, -1):
            smooth = Σz_upd[i] * ρ / Σz_pred[i]
            z_smooth[i] = z_upd[i] + smooth**2 * (Σz_pred[i + 1] -
                                                  Σz_smooth[i])
            Σz_smooth[i] = Σz_upd[i] - smooth**2 * (Σz_pred[i + 1] -
                                                    Σz_smooth[i + 1])

        return {
            "z_upd": z_upd,
            "Σz_upd": Σz_upd,
            "z_smooth": z_smooth,
            "Σz_smooth": Σz_smooth,
            "y_pred": y_pred,
            "Σy_pred": Σy_pred,
        }

    def filtered_path(self, y, params):
        """Filter path, return final obs."""
        z0, σz0, γ, η, σ, ρ, = params
        z = torch.zeros_like(y)
        z[0] = z0
        # unroll first iteration of loop to set initial conditions
        # prediction step
        z_pred = ρ * z0
        Σz_pred = ρ**2 * σz0**2 + 1
        y_pred = γ + η * z_pred
        Σy_pred = η**2 * Σz_pred + σ**2

        # correction step
        gain = Σz_pred * η / Σy_pred
        z_upd = z_pred + gain * (y[0] - y_pred)
        Σz_upd = Σz_pred - gain**2 * Σy_pred
        z[1] = z_upd

        for t in range(2, y.shape[0] + 1):
            i = t - 1
            # prediction step
            z_pred = ρ * z_upd
            Σz_pred = ρ**2 * Σz_upd + 1
            y_pred = γ + η * z_pred
            Σy_pred = η**2 * Σz_pred + σ**2

            # correction step
            gain = Σz_pred * η / Σy_pred
            z_upd = z_pred + gain * (y[i] - y_pred)
            Σz_upd = Σz_pred - gain**2 * Σy_pred
            z[t - 1] = z_upd

        return z

    def forecast_paths(self, y, post, nsteps=10, ndraws=10_000):
        # loop over posterior draws
        # conditional on draw, obtain x_T draw
        # project x_T+1, x_T+2, ..., x_T+h
        N = len(y)
        z_ext = torch.zeros([N + nsteps])
        y_ext = torch.zeros([N + nsteps])
        y_ext[:N] = y
        fc_paths = torch.zeros([nsteps, ndraws])
        is_mcmc_posterior = getattr(post, 'flatnames', None)
        if is_mcmc_posterior:
            mcmc_draws = post.extract()
            flatnames = ['z0', 'sigma_z0', 'gamma', 'eta', 'sigma', 'rho']
            param_draws = torch.stack(
                [torch.tensor(mcmc_draws[n]) for n in flatnames])
        for i in range(ndraws):
            if is_mcmc_posterior:
                # posterior is matrix of samples
                params = param_draws[:, i % param_draws.shape[1]]
            else:
                params = post.q.sample()
            z_ext[:N] = self.filtered_path(y, params)
            # allow latent states z to evolve
            z0, σz0, γ, η, σ, ρ, = params
            # print([γ, η, σ, ρ])
            for t in range(N, N + nsteps):
                # draw z[t] | z[t-1]
                z_ext[t] = z_ext[t - 1] * ρ + torch.normal(torch.zeros(1))
            # sample conditionally indepedent ys
            y_ext[N:] = torch.normal(γ + η * z_ext[N:], σ * torch.ones(nsteps))
            fc_paths[:, i] = y_ext[N:]
        return fc_paths
Пример #9
0
class SVModel(FilteredStateSpaceModel):
    """ A simple stochastic volatility model for estimating with FIVO.

    .. math::
        x_t = exp(a)exp(z_t/2) ε_t       ε_t ~ Ν(0,1)
        z_t = b + c * z_{t-1} + ν_t    ν_t ~ Ν(0,1)

    The proposal density is also an AR(1):

    .. math::
        z_t = d + e * z_{t-1} + η_t    η_t ~ Ν(0,1)
    """

    class SVModelResult(FilteredStateSpaceModel.FIVOResult):
        def forecast(self, steps=1, n=100):
            """Produce forecasts of the specified number of steps ahead.

            Procedure: sample ζ, filter to get p(z_T | y, θ), project the state chain
            forward, then compute y.
            """
            z_T_draws = torch.zeros(n)
            z_proj_draws = torch.zeros((n, steps))
            y_proj_draws = torch.zeros((n, steps))
            sample = torch.zeros((1, n))
            self.model.num_particles = n  # dodgy hack to simulate more particles
            phatns = torch.zeros((n,))
            for i in range(n):
                ζ = self.q.sample()
                a, b, c = ζ[0], ζ[1], ζ[2]
                phatns[i] = self.model.simulate_log_phatN(self.y, ζ, sample)
                # just take a single particle's z_T
                z_T_draws[i] = sample[0, random.randint(0, n - 1)]
                z_proj_draws[i, 0] = b + c * z_T_draws[i] + torch.randn(1)
                for j in range(1, steps):
                    z_proj_draws[i, j] = b + c * z_proj_draws[i, j - 1] + torch.randn(1)
                y_proj_draws[i, :] = Normal(
                    0, torch.exp(a) * torch.exp(z_proj_draws[i, :] / 2)
                ).sample()

            kde = stats.gaussian_kde(y_proj_draws[:, -1].cpu().numpy())
            return kde, y_proj_draws.cpu().numpy()

    name = "Particle filtered stochastic volatility model"
    a = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="α")
    b = global_param(prior=NormalPrior(0, 1))
    c = global_param(prior=ModifiedBetaPrior(0.5, 1.5), transform="logit", rename="ψ")
    d = global_param(prior=NormalPrior(0, 1))
    e = global_param(prior=BetaPrior(1, 1), transform="logit", rename="ρ")
    f = global_param(prior=LogNormalPrior(0, 1), transform="log", rename="ι")

    result_type = SVModelResult

    def simulate(self, a, b, c):
        """Simulate from p(x, z | θ)"""
        a, b, c = map(torch.tensor, (a, b, c))
        z = torch.empty((self.input_length,))
        z[0] = Normal(b, (1 - c ** 2) ** (-.5)).sample()
        for t in range(1, self.input_length):
            z[t] = b + c * z[t - 1] + Normal(0, 1).sample()
        x = Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()
        return x.type(self.dtype).to(self.device), z.type(self.dtype).to(self.device)

    def conditional_log_prob(self, t, y, z, ζ):
        """Compute log p(x_t, z_t | y_{0:t-1}, z_{0:t-1}, ζ).

        Args:
            t: time index (zero-based)
            y: y_{0:t} vector of points observed up to this point (which may
               actually be longer, but should only be indexed up to t)
            z: z_{0:t} vector of unobserved variables to condition on (ditto,
               array may be longer)
            ζ: parameter to condition on; should be unpacked with self.unpack
        """
        a, b, c, _, _, _ = self.unpack_natural(ζ)
        if t == 0:
            log_pzt = Normal(b, (1 - c ** 2) ** (-.5)).log_prob(z[t])
        else:
            log_pzt = Normal(b + c * z[t - 1], 1).log_prob(z[t])
        log_pxt = Normal(0, torch.exp(a) * torch.exp(z[t] / 2)).log_prob(y[t])
        return log_pzt + log_pxt

    def sample_observed(self, ζ, y, fc_steps=0):
        a, _, _, _, _, _ = self.unpack_natural(ζ)
        z = self.sample_unobserved(ζ, y, fc_steps)
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def sample_unobserved(self, ζ, y, fc_steps=0):
        assert y is not None
        a, b, c, _, _, _ = self.unpack_natural(ζ)
        # get a sample of states by filtering wrt y
        z = torch.empty((len(y) + fc_steps,))
        self.simulate_log_phatN(y=y, ζ=ζ, sample=z)
        # now project states forward fc_steps
        if fc_steps > 0:
            for t in range(self.input_length, self.input_length + fc_steps):
                z[t] = b + c * z[t - 1] + Normal(0, 1).sample()
        return Normal(0, torch.exp(a) * torch.exp(z / 2)).sample()

    def proposal_for(self, y: torch.Tensor, ζ: torch.Tensor) -> PFProposal:
        _, _, _, d, e, f = self.unpack_natural(ζ)
        return AR1Proposal(μ=d, ρ=e, σ=f)

    def __repr__(self):
        return (
            f"Stochastic volatility model with parameters {{a, b, c}}:\n"
            f"\ty_t = exp(a * z_t/2) ε_t        t=1,…,{self.input_length}\n"
            f"\tz_t = b + c * z_{{t-1}} + ν_t,    t=2,…,{self.input_length}\n"
            f"\tz_1 = b + 1/√(1 - c^2) ν_1\n"
            f"\twhere ε_t, ν_t ~ Ν(0,1)\n\n"
            f"Filter with {self.num_particles} particles; AR(1) proposal params {{d, e, f}}:\n"
            f"\tz_t = d + e * z_{{t-1}} + f η_t,  t=2,…,{self.input_length}\n"
            f"\tz_1 = d + f/√(1 - e^2) η_1\n"
            f"\twhere η_t ~ Ν(0,1)\n"
        )