Exemple #1
0
    def backward_sample(self, steps=1):
        """
        Generate state sequence using distributions:

        .. math:: p(\theta_{t} | \theta_{t + k} D_t)
        """
        from statlib.distributions import rmvnorm

        if steps != 1:
            raise Exception('only do one step backward sampling for now...')

        T = self.nobs
        # Backward sample
        mu_draws = np.zeros((T + 1, self.ndim))

        m = self.mu_mode
        C = self.mu_scale
        a = self.mu_forc_mode
        R = self.mu_forc_scale

        mu_draws[T] = rmvnorm(m[T], C[T])

        # initial values for smoothed dist'n
        for t in xrange(T-1, -1, -1):
            # B_{t} = C_t G_t+1' R_t+1^-1
            B = chain_dot(C[t], self.G.T, LA.inv(R[t+1]))

            # smoothed mean
            ht = m[t] + np.dot(B, mu_draws[t+1] - a[t+1])
            Ht = C[t] - chain_dot(B, R[t+1], B.T)

            mu_draws[t] = rmvnorm(ht, np.atleast_2d(Ht))

        return mu_draws.squeeze()
Exemple #2
0
    def backward_sample(self, steps=1):
        """
        Generate state sequence using distributions:

        .. math:: p(\theta_{t} | \theta_{t + k} D_t)
        """
        from statlib.distributions import rmvnorm

        if steps != 1:
            raise Exception('only do one step backward sampling for now...')

        T = self.nobs
        # Backward sample
        mu_draws = np.zeros((T + 1, self.ndim))

        m = self.mu_mode
        C = self.mu_scale
        a = self.mu_forc_mode
        R = self.mu_forc_scale

        mu_draws[T] = rmvnorm(m[T], C[T])

        # initial values for smoothed dist'n
        for t in xrange(T - 1, -1, -1):
            # B_{t} = C_t G_t+1' R_t+1^-1
            B = chain_dot(C[t], self.G.T, LA.inv(R[t + 1]))

            # smoothed mean
            ht = m[t] + np.dot(B, mu_draws[t + 1] - a[t + 1])
            Ht = C[t] - chain_dot(B, R[t + 1], B.T)

            mu_draws[t] = rmvnorm(ht, np.atleast_2d(Ht))

        return mu_draws.squeeze()
Exemple #3
0
    def _update_mu(v, w, phi, lam):
        # FFBS
        # allocate result arrays

        mode = np.zeros((T + 1, p))
        a = np.zeros((T + 1, p))
        C = np.zeros((T + 1, p))
        R = np.zeros((T + 1, p))

        # simple priors...
        mode[0] = 0
        C[0] = np.eye(p)

        # Forward filter

        Ft = m_([[1]])
        for i, obs in enumerate(y):
            t = i + 1

            at = phi * mode[t - 1] if t > 1 else mode[0]
            Rt = phi**2 * C[t - 1] + w if t > 1 else C[0]

            Vt = lam[t - 1] * v

            Qt = chain_dot(Ft.T, Rt, Ft) + Vt
            At = np.dot(Rt, Ft) / Qt

            # forecast theta as time t
            ft = np.dot(Ft.T, at)
            err = obs - ft

            # update mean parameters
            mode[t] = at + np.dot(At, err)
            C[t] = Rt - np.dot(At, np.dot(Qt, At.T))
            a[t] = at
            R[t] = Rt

        # Backward sample
        mu = np.zeros((T + 1, p))

        # initial values for smoothed dist'n
        fR = C[-1]
        fm = mode[-1]
        for t in xrange(T + 1):
            if t < T:
                # B_{t} = C_t G_t+1' R_t+1^-1
                B = np.dot(C[t] * phi, la.inv(np.atleast_2d(R[t + 1])))

                # smoothed mean
                fm = mode[t] + np.dot(B, mode[t + 1] - a[t + 1])
                fR = C[t] + chain_dot(B, C[t + 1] - R[t + 1], B.T)

            mu[t] = dist.rmvnorm(fm, np.atleast_2d(fR))

        return mu.squeeze()
Exemple #4
0
    def _update_mu(v, w, phi, lam):
        # FFBS
        # allocate result arrays

        mode = np.zeros((T + 1, p))
        a = np.zeros((T + 1, p))
        C = np.zeros((T + 1, p))
        R = np.zeros((T + 1, p))

        # simple priors...
        mode[0] = 0
        C[0] = np.eye(p)

        # Forward filter

        Ft = m_([[1]])
        for i, obs in enumerate(y):
            t = i + 1

            at = phi * mode[t - 1] if t > 1 else mode[0]
            Rt = phi ** 2 * C[t - 1] + w if t > 1 else C[0]

            Vt = lam[t - 1] * v

            Qt = chain_dot(Ft.T, Rt, Ft) + Vt
            At = np.dot(Rt, Ft) / Qt

            # forecast theta as time t
            ft = np.dot(Ft.T, at)
            err = obs - ft

            # update mean parameters
            mode[t] = at + np.dot(At, err)
            C[t] = Rt - np.dot(At, np.dot(Qt, At.T))
            a[t] = at
            R[t] = Rt

        # Backward sample
        mu = np.zeros((T + 1, p))

        # initial values for smoothed dist'n
        fR = C[-1]
        fm = mode[-1]
        for t in xrange(T + 1):
            if t < T:
                # B_{t} = C_t G_t+1' R_t+1^-1
                B = np.dot(C[t] * phi, la.inv(np.atleast_2d(R[t+1])))

                # smoothed mean
                fm = mode[t] + np.dot(B, mode[t+1] - a[t+1])
                fR = C[t] + chain_dot(B, C[t+1] - R[t+1], B.T)

            mu[t] = dist.rmvnorm(fm, np.atleast_2d(fR))

        return mu.squeeze()
Exemple #5
0
    def backward_smooth(self):
        """
        Compute posterior estimates of state vector given full data set,
        i.e. p(\theta_t | D_T)

        cf. W&H sections
            4.7 / 4.8: regular smoothing recurrences (Theorem 4.4)
            10.8.4 adjustments for variance discounting

        Notes
        -----
        \theta_{t-k} | D_t ~ T_{n_t} [a_t(-k), (S_t / S_{t-k}) R_t(-k)]

        Returns
        -------
        (filtered state mode,
         filtered state cov,
         filtered degrees of freedom)
        """
        beta = self.var_discount

        T = self.nobs
        a = self.mu_forc_mode
        R = self.mu_forc_scale

        fdf = self.df.copy()
        fS = self.var_est.copy()
        fm = self.mu_mode.copy()
        fC = self.mu_scale.copy()

        C = fC[T]
        for t in xrange(T - 1, -1, -1):
            B = chain_dot(fC[t], self.G.T, LA.inv(R[t+1]))

            # W&H p. 364
            fdf[t] = (1 - beta) * fdf[t] + beta * fdf[t + 1]
            fS[t] = 1 / ((1 - beta) / fS[t] + beta / fS[t+1])

            # W&H p. 113
            fm[t] = fm[t] + np.dot(B, fm[t+1] - a[t+1])
            C = fC[t] + chain_dot(B, C - R[t+1], B.T)
            fC[t] = C * fS[T] / fS[t]

        return fm, fC, fdf
Exemple #6
0
    def backward_smooth(self):
        """
        Compute posterior estimates of state vector given full data set,
        i.e. p(\theta_t | D_T)

        cf. W&H sections
            4.7 / 4.8: regular smoothing recurrences (Theorem 4.4)
            10.8.4 adjustments for variance discounting

        Notes
        -----
        \theta_{t-k} | D_t ~ T_{n_t} [a_t(-k), (S_t / S_{t-k}) R_t(-k)]

        Returns
        -------
        (filtered state mode,
         filtered state cov,
         filtered degrees of freedom)
        """
        beta = self.var_discount

        T = self.nobs
        a = self.mu_forc_mode
        R = self.mu_forc_scale

        fdf = self.df.copy()
        fS = self.var_est.copy()
        fm = self.mu_mode.copy()
        fC = self.mu_scale.copy()

        C = fC[T]
        for t in xrange(T - 1, -1, -1):
            B = chain_dot(fC[t], self.G.T, LA.inv(R[t + 1]))

            # W&H p. 364
            fdf[t] = (1 - beta) * fdf[t] + beta * fdf[t + 1]
            fS[t] = 1 / ((1 - beta) / fS[t] + beta / fS[t + 1])

            # W&H p. 113
            fm[t] = fm[t] + np.dot(B, fm[t + 1] - a[t + 1])
            C = fC[t] + chain_dot(B, C - R[t + 1], B.T)
            fC[t] = C * fS[T] / fS[t]

        return fm, fC, fdf
Exemple #7
0
        def calc_update(jt, jtp):
            model = self.models[jt]
            G = model.G
            prior_scale = self.mu_scale[t - 1, jtp]
            if t > 1:
                # only discount after first time step! hmm
                Wt = model.get_Wt(prior_scale)
                Rt = chain_dot(G, prior_scale, G.T) + Wt
            else:
                # use prior for t=1, because Mike does
                Rt = prior_scale

            return Rt
Exemple #8
0
        def calc_update(jt, jtp):
            model = self.models[jt]
            G = model.G
            prior_scale = self.mu_scale[t - 1, jtp]
            if t > 1:
                # only discount after first time step! hmm
                Wt = model.get_Wt(prior_scale)
                Rt = chain_dot(G, prior_scale, G.T) + Wt
            else:
                # use prior for t=1, because Mike does
                Rt = prior_scale

            return Rt
Exemple #9
0
 def get_Wt(self, Cprior):
     # Eq. 12.18
     disc_var = np.diag(Cprior) * (1 / self.deltas - 1)
     return chain_dot(self.G, np.diag(disc_var), self.G.T)
Exemple #10
0
 def calc_update(jt, jtp):
     prior_obs_var = (self.var_est[t - 1, jtp] *
                      self.models[jt].obs_var_mult)
     return chain_dot(Ft.T, Rt[jt, jtp], Ft) + prior_obs_var
Exemple #11
0
def filter_python(Y, F, G, delta, beta, df0, v0, m0, C0):
    """
    Univariate DLM update equations with unknown observation variance

    delta : state discount
    beta : variance discount
    """
    # cdef:
    #     Py_ssize_t i, t, nobs, ndim
    #     ndarray[double_t, ndim=1] df, Q, S
    #     ndarray a, C, R, mode

    #     ndarray at, mt, Ft, At, Ct, Rt
    #     double_t obs, ft, e, nt, dt, St, Qt

    nobs = len(Y)
    ndim = len(G)

    mode = nan_array(nobs + 1, ndim)
    a = nan_array(nobs + 1, ndim)
    C = nan_array(nobs + 1, ndim, ndim)
    R = nan_array(nobs + 1, ndim, ndim)

    S = nan_array(nobs + 1)
    Q = nan_array(nobs)
    df = nan_array(nobs + 1)

    mode[0] = mt = m0
    C[0] = Ct = C0
    df[0] = nt = df0
    S[0] = St = v0

    dt = df0 * v0

    # allocate result arrays
    for i in range(nobs):
        obs = Y[i]

        # column vector, for W&H notational consistency
        # Ft = F[i]
        Ft = F[i:i+1].T

        # advance index: y_1 through y_nobs, 0 is prior
        t = i + 1

        # derive innovation variance from discount factor
        at = mt
        Rt = Ct
        if t > 1:
            # only discount after first time step?
            if G is not None:
                at = np.dot(G, mt)
                Rt = chain_dot(G, Ct, G.T) / delta
            else:
                Rt = Ct / delta

        # Qt = chain_dot(Ft.T, Rt, Ft) + St
        Qt = chain_dot(Ft.T, Rt, Ft) + St
        At = np.dot(Rt, Ft) / Qt

        # forecast theta as time t
        ft = np.dot(Ft.T, at)
        e = obs - ft

        # update mean parameters
        mode[t] = mt = at + np.dot(At, e)
        dt = beta * dt + St * e * e / Qt
        nt = beta * nt + 1
        St = dt / nt

        S[t] = St
        Ct = (S[t] / S[t-1]) * (Rt - np.dot(At, At.T) * Qt)
        Ct = (Ct + Ct.T) / 2 # symmetrize

        df[t] = nt
        Q[t-1] = Qt

        C[t] = Ct
        a[t] = at
        R[t] = Rt

    return mode, a, C, df, S, Q, R
Exemple #12
0
def _mvfilter_python(Y, F, G, V, delta, beta, df0, v0, m0, C0):
    """
    Matrix-variate DLM update equations

    V : V_t sequence
    """
    nobs, k = Y.shape
    p = F.shape[1]
    mode = nan_array(nobs + 1, p, k) # theta posterior mode
    C = nan_array(nobs + 1, p, p) # theta posterior scale
    a = nan_array(nobs + 1, p, k) # theta forecast mode
    Q = nan_array(nobs) # variance multiplier term
    D = nan_array(nobs + 1, k, k) # scale matrix
    S = nan_array(nobs + 1, k, k) # covariance estimate D / n
    df = nan_array(nobs + 1)

    mode[0] = m0
    C[0] = C0
    df[0] = df0
    S[0] = v0

    Mt = m0
    Ct = C0
    n = df0
    d = n + k - 1
    Dt = D0
    St = Dt / n

    # allocate result arrays
    for t in xrange(1, nobs + 1):
        obs = Y[t - 1:t].T
        Ft = F[t - 1:t].T

        # derive innovation variance from discount factor
        # only discount after first time step?
        if G is not None:
            at = np.dot(G, Mt)
            Rt = chain_dot(G, Ct, G.T) / delta
        else:
            at = Mt
            Rt = Ct / delta

        et = obs - np.dot(at.T, Ft)
        qt = chain_dot(Ft.T, Rt, Ft) + V
        At = np.dot(Rt, Ft) / qt

        # update mean parameters
        n = beta * n
        b = (n + k - 1) / d
        n = n + 1
        d = n + k - 1

        Dt = b * Dt + np.dot(et, et.T) / qt
        St = Dt / n
        Mt = at + np.dot(At, et.T)
        Ct = Rt - np.dot(At, At.T) * qt

        C[t] = Ct; df[t] = n; S[t] = (St + St.T) / 2; mode[t] = Mt
        D[t] = Dt; a[t] = at; Q[t-1] = qt

    return mode, a, C, S, Q
Exemple #13
0
def filter_python(Y, F, G, delta, beta, df0, v0, m0, C0):
    """
    Univariate DLM update equations with unknown observation variance

    delta : state discount
    beta : variance discount
    """
    # cdef:
    #     Py_ssize_t i, t, nobs, ndim
    #     ndarray[double_t, ndim=1] df, Q, S
    #     ndarray a, C, R, mode

    #     ndarray at, mt, Ft, At, Ct, Rt
    #     double_t obs, ft, e, nt, dt, St, Qt

    nobs = len(Y)
    ndim = len(G)

    mode = nan_array(nobs + 1, ndim)
    a = nan_array(nobs + 1, ndim)
    C = nan_array(nobs + 1, ndim, ndim)
    R = nan_array(nobs + 1, ndim, ndim)

    S = nan_array(nobs + 1)
    Q = nan_array(nobs)
    df = nan_array(nobs + 1)

    mode[0] = mt = m0
    C[0] = Ct = C0
    df[0] = nt = df0
    S[0] = St = v0

    dt = df0 * v0

    # allocate result arrays
    for i in range(nobs):
        obs = Y[i]

        # column vector, for W&H notational consistency
        # Ft = F[i]
        Ft = F[i:i + 1].T

        # advance index: y_1 through y_nobs, 0 is prior
        t = i + 1

        # derive innovation variance from discount factor
        at = mt
        Rt = Ct
        if t > 1:
            # only discount after first time step?
            if G is not None:
                at = np.dot(G, mt)
                Rt = chain_dot(G, Ct, G.T) / delta
            else:
                Rt = Ct / delta

        # Qt = chain_dot(Ft.T, Rt, Ft) + St
        Qt = chain_dot(Ft.T, Rt, Ft) + St
        At = np.dot(Rt, Ft) / Qt

        # forecast theta as time t
        ft = np.dot(Ft.T, at)
        e = obs - ft

        # update mean parameters
        mode[t] = mt = at + np.dot(At, e)
        dt = beta * dt + St * e * e / Qt
        nt = beta * nt + 1
        St = dt / nt

        S[t] = St
        Ct = (S[t] / S[t - 1]) * (Rt - np.dot(At, At.T) * Qt)
        Ct = (Ct + Ct.T) / 2  # symmetrize

        df[t] = nt
        Q[t - 1] = Qt

        C[t] = Ct
        a[t] = at
        R[t] = Rt

    return mode, a, C, df, S, Q, R
Exemple #14
0
def _mvfilter_python(Y, F, G, V, delta, beta, df0, v0, m0, C0):
    """
    Matrix-variate DLM update equations

    V : V_t sequence
    """
    nobs, k = Y.shape
    p = F.shape[1]
    mode = nan_array(nobs + 1, p, k)  # theta posterior mode
    C = nan_array(nobs + 1, p, p)  # theta posterior scale
    a = nan_array(nobs + 1, p, k)  # theta forecast mode
    Q = nan_array(nobs)  # variance multiplier term
    D = nan_array(nobs + 1, k, k)  # scale matrix
    S = nan_array(nobs + 1, k, k)  # covariance estimate D / n
    df = nan_array(nobs + 1)

    mode[0] = m0
    C[0] = C0
    df[0] = df0
    S[0] = v0

    Mt = m0
    Ct = C0
    n = df0
    d = n + k - 1
    Dt = D0
    St = Dt / n

    # allocate result arrays
    for t in xrange(1, nobs + 1):
        obs = Y[t - 1:t].T
        Ft = F[t - 1:t].T

        # derive innovation variance from discount factor
        # only discount after first time step?
        if G is not None:
            at = np.dot(G, Mt)
            Rt = chain_dot(G, Ct, G.T) / delta
        else:
            at = Mt
            Rt = Ct / delta

        et = obs - np.dot(at.T, Ft)
        qt = chain_dot(Ft.T, Rt, Ft) + V
        At = np.dot(Rt, Ft) / qt

        # update mean parameters
        n = beta * n
        b = (n + k - 1) / d
        n = n + 1
        d = n + k - 1

        Dt = b * Dt + np.dot(et, et.T) / qt
        St = Dt / n
        Mt = at + np.dot(At, et.T)
        Ct = Rt - np.dot(At, At.T) * qt

        C[t] = Ct
        df[t] = n
        S[t] = (St + St.T) / 2
        mode[t] = Mt
        D[t] = Dt
        a[t] = at
        Q[t - 1] = qt

    return mode, a, C, S, Q
Exemple #15
0
 def get_Wt(self, Cprior):
     # Eq. 12.18
     disc_var = np.diag(Cprior) * (1 / self.deltas - 1)
     return chain_dot(self.G, np.diag(disc_var), self.G.T)
Exemple #16
0
 def calc_update(jt, jtp):
     prior_obs_var = (self.var_est[t - 1, jtp] *
                      self.models[jt].obs_var_mult)
     return chain_dot(Ft.T, Rt[jt, jtp], Ft) + prior_obs_var