Esempio n. 1
0
    def __init__(self, y, F, models, order, prior_model_prob,
                 m0=None, C0=None, n0=None, s0=None, approx_steps=1.):
        self.approx_steps = approx_steps
        self.dates = y.index
        self.y = np.array(y)
        self.nobs = len(y)

        if self.y.ndim == 1:
            pass
        else:
            raise Exception

        F = np.array(F)
        if F.ndim == 1:
            F = F.reshape((len(F), 1))
        self.F = F

        self.ndim = self.F.shape[1]
        self.nmodels = len(models)

        self.names = order
        self.models = [models[name] for name in order]
        self.prior_model_prob = np.array([prior_model_prob[name]
                                          for name in order])

        # only can do one step back for now
        self.approx_steps = 1
        # self.approx_steps = int(approx_steps)

        # set up result storage for all the models
        self.marginal_prob = nan_array(self.nobs + 1, self.nmodels)
        self.post_prob = nan_array(self.nobs + 1,
                                   self.nmodels, self.nmodels)

        self.mu_mode = nan_array(self.nobs + 1, self.nmodels, self.ndim)
        self.mu_forc_mode = nan_array(self.nobs + 1, self.nmodels, self.ndim)
        self.mu_scale = nan_array(self.nobs + 1, self.nmodels,
                                  self.ndim, self.ndim)
        self.mu_forc_var = nan_array(self.nobs + 1, self.nmodels, self.nmodels,
                                     self.ndim, self.ndim)

        self.forecast = np.zeros((self.nobs + 1, self.nmodels))

        # set in initial values
        self.marginal_prob[0] = self.prior_model_prob
        self.mu_mode[0] = m0
        self.mu_scale[0] = C0

        # observation variance stuff
        self.df = n0 + np.arange(self.nobs + 1) # precompute
        self.var_est = nan_array(self.nobs + 1, self.nmodels)
        self.var_scale = nan_array(self.nobs + 1, self.nmodels)
        self.var_est[0] = s0
        self.var_scale[0] = s0 * n0

        # forecasts are computed via mixture for now
        self.forc_var = nan_array(self.nobs, self.nmodels, self.nmodels)

        self._compute_parameters()
Esempio n. 2
0
    def _fill_updates(self, func, shape):
        total_shape = (self.nmodels, self.nmodels) + shape
        result = nan_array(*total_shape)
        for jt in range(self.nmodels):
            for jtp in range(self.nmodels):
                result[jt, jtp] = np.squeeze(func(jt, jtp))

        return result
Esempio n. 3
0
    def _fill_updates(self, func, shape):
        total_shape = (self.nmodels, self.nmodels) + shape
        result = nan_array(*total_shape)
        for jt in range(self.nmodels):
            for jtp in range(self.nmodels):
                result[jt, jtp] = np.squeeze(func(jt, jtp))

        return result
Esempio n. 4
0
    def _collapse_params(self, pstar, mt, Ct):
        coll_C = nan_array(self.nmodels, self.ndim, self.ndim)
        coll_m = nan_array(self.nmodels, self.ndim)
        for jt in range(self.nmodels):
            C = np.zeros((self.ndim, self.ndim))
            m = np.zeros(self.ndim)

            # collapse modes
            for jtp in range(self.nmodels):
                m += pstar[jt, jtp] * mt[jt, jtp]

            coll_m[jt] = m

            # collapse scales
            for jtp in range(self.nmodels):
                mdev = coll_m[jt] - mt[jt, jtp]
                C += pstar[jt, jtp] * (Ct[jt, jtp] + np.outer(mdev, mdev))

            coll_C[jt] = C

        return coll_m, coll_C
Esempio n. 5
0
    def _collapse_var(self, St, post_prob, marginal_post):
        result = nan_array(self.nmodels)

        # TODO: vectorize
        for jt in range(self.nmodels):
            prec = 0
            for jtp in range(self.nmodels):
                prec += post_prob[jt, jtp] / (St[jt, jtp] * marginal_post[jt])

            result[jt] = 1 / prec

        return result
Esempio n. 6
0
    def _collapse_params(self, pstar, mt, Ct):
        coll_C = nan_array(self.nmodels, self.ndim, self.ndim)
        coll_m = nan_array(self.nmodels, self.ndim)
        for jt in range(self.nmodels):
            C = np.zeros((self.ndim, self.ndim))
            m = np.zeros(self.ndim)

            # collapse modes
            for jtp in range(self.nmodels):
                m += pstar[jt, jtp] * mt[jt, jtp]

            coll_m[jt] = m

            # collapse scales
            for jtp in range(self.nmodels):
                mdev = coll_m[jt] - mt[jt, jtp]
                C += pstar[jt, jtp] * (Ct[jt, jtp] + np.outer(mdev, mdev))

            coll_C[jt] = C

        return coll_m, coll_C
Esempio n. 7
0
    def _collapse_var(self, St, post_prob, marginal_post):
        result = nan_array(self.nmodels)

        # TODO: vectorize
        for jt in range(self.nmodels):
            prec = 0
            for jtp in range(self.nmodels):
                prec += post_prob[jt, jtp] / (St[jt, jtp] * marginal_post[jt])

            result[jt] = 1 / prec

        return result
Esempio n. 8
0
    def __init__(self,
                 y,
                 F,
                 models,
                 order,
                 prior_model_prob,
                 m0=None,
                 C0=None,
                 n0=None,
                 s0=None,
                 approx_steps=1.):
        self.approx_steps = approx_steps
        self.dates = y.index
        self.y = np.array(y)
        self.nobs = len(y)

        if self.y.ndim == 1:
            pass
        else:
            raise Exception

        F = np.array(F)
        if F.ndim == 1:
            F = F.reshape((len(F), 1))
        self.F = F

        self.ndim = self.F.shape[1]
        self.nmodels = len(models)

        self.names = order
        self.models = [models[name] for name in order]
        self.prior_model_prob = np.array(
            [prior_model_prob[name] for name in order])

        # only can do one step back for now
        self.approx_steps = 1
        # self.approx_steps = int(approx_steps)

        # set up result storage for all the models
        self.marginal_prob = nan_array(self.nobs + 1, self.nmodels)
        self.post_prob = nan_array(self.nobs + 1, self.nmodels, self.nmodels)

        self.mu_mode = nan_array(self.nobs + 1, self.nmodels, self.ndim)
        self.mu_forc_mode = nan_array(self.nobs + 1, self.nmodels, self.ndim)
        self.mu_scale = nan_array(self.nobs + 1, self.nmodels, self.ndim,
                                  self.ndim)
        self.mu_forc_var = nan_array(self.nobs + 1, self.nmodels, self.nmodels,
                                     self.ndim, self.ndim)

        self.forecast = np.zeros((self.nobs + 1, self.nmodels))

        # set in initial values
        self.marginal_prob[0] = self.prior_model_prob
        self.mu_mode[0] = m0
        self.mu_scale[0] = C0

        # observation variance stuff
        self.df = n0 + np.arange(self.nobs + 1)  # precompute
        self.var_est = nan_array(self.nobs + 1, self.nmodels)
        self.var_scale = nan_array(self.nobs + 1, self.nmodels)
        self.var_est[0] = s0
        self.var_scale[0] = s0 * n0

        # forecasts are computed via mixture for now
        self.forc_var = nan_array(self.nobs, self.nmodels, self.nmodels)

        self._compute_parameters()
Esempio n. 9
0
File: dlm.py Progetto: wesm/statlib
def filter_python(Y, F, G, delta, beta, df0, v0, m0, C0):
    """
    Univariate DLM update equations with unknown observation variance

    delta : state discount
    beta : variance discount
    """
    # cdef:
    #     Py_ssize_t i, t, nobs, ndim
    #     ndarray[double_t, ndim=1] df, Q, S
    #     ndarray a, C, R, mode

    #     ndarray at, mt, Ft, At, Ct, Rt
    #     double_t obs, ft, e, nt, dt, St, Qt

    nobs = len(Y)
    ndim = len(G)

    mode = nan_array(nobs + 1, ndim)
    a = nan_array(nobs + 1, ndim)
    C = nan_array(nobs + 1, ndim, ndim)
    R = nan_array(nobs + 1, ndim, ndim)

    S = nan_array(nobs + 1)
    Q = nan_array(nobs)
    df = nan_array(nobs + 1)

    mode[0] = mt = m0
    C[0] = Ct = C0
    df[0] = nt = df0
    S[0] = St = v0

    dt = df0 * v0

    # allocate result arrays
    for i in range(nobs):
        obs = Y[i]

        # column vector, for W&H notational consistency
        # Ft = F[i]
        Ft = F[i:i+1].T

        # advance index: y_1 through y_nobs, 0 is prior
        t = i + 1

        # derive innovation variance from discount factor
        at = mt
        Rt = Ct
        if t > 1:
            # only discount after first time step?
            if G is not None:
                at = np.dot(G, mt)
                Rt = chain_dot(G, Ct, G.T) / delta
            else:
                Rt = Ct / delta

        # Qt = chain_dot(Ft.T, Rt, Ft) + St
        Qt = chain_dot(Ft.T, Rt, Ft) + St
        At = np.dot(Rt, Ft) / Qt

        # forecast theta as time t
        ft = np.dot(Ft.T, at)
        e = obs - ft

        # update mean parameters
        mode[t] = mt = at + np.dot(At, e)
        dt = beta * dt + St * e * e / Qt
        nt = beta * nt + 1
        St = dt / nt

        S[t] = St
        Ct = (S[t] / S[t-1]) * (Rt - np.dot(At, At.T) * Qt)
        Ct = (Ct + Ct.T) / 2 # symmetrize

        df[t] = nt
        Q[t-1] = Qt

        C[t] = Ct
        a[t] = at
        R[t] = Rt

    return mode, a, C, df, S, Q, R
Esempio n. 10
0
File: dlm.py Progetto: wesm/statlib
def _mvfilter_python(Y, F, G, V, delta, beta, df0, v0, m0, C0):
    """
    Matrix-variate DLM update equations

    V : V_t sequence
    """
    nobs, k = Y.shape
    p = F.shape[1]
    mode = nan_array(nobs + 1, p, k) # theta posterior mode
    C = nan_array(nobs + 1, p, p) # theta posterior scale
    a = nan_array(nobs + 1, p, k) # theta forecast mode
    Q = nan_array(nobs) # variance multiplier term
    D = nan_array(nobs + 1, k, k) # scale matrix
    S = nan_array(nobs + 1, k, k) # covariance estimate D / n
    df = nan_array(nobs + 1)

    mode[0] = m0
    C[0] = C0
    df[0] = df0
    S[0] = v0

    Mt = m0
    Ct = C0
    n = df0
    d = n + k - 1
    Dt = D0
    St = Dt / n

    # allocate result arrays
    for t in xrange(1, nobs + 1):
        obs = Y[t - 1:t].T
        Ft = F[t - 1:t].T

        # derive innovation variance from discount factor
        # only discount after first time step?
        if G is not None:
            at = np.dot(G, Mt)
            Rt = chain_dot(G, Ct, G.T) / delta
        else:
            at = Mt
            Rt = Ct / delta

        et = obs - np.dot(at.T, Ft)
        qt = chain_dot(Ft.T, Rt, Ft) + V
        At = np.dot(Rt, Ft) / qt

        # update mean parameters
        n = beta * n
        b = (n + k - 1) / d
        n = n + 1
        d = n + k - 1

        Dt = b * Dt + np.dot(et, et.T) / qt
        St = Dt / n
        Mt = at + np.dot(At, et.T)
        Ct = Rt - np.dot(At, At.T) * qt

        C[t] = Ct; df[t] = n; S[t] = (St + St.T) / 2; mode[t] = Mt
        D[t] = Dt; a[t] = at; Q[t-1] = qt

    return mode, a, C, S, Q
Esempio n. 11
0
def filter_python(Y, F, G, delta, beta, df0, v0, m0, C0):
    """
    Univariate DLM update equations with unknown observation variance

    delta : state discount
    beta : variance discount
    """
    # cdef:
    #     Py_ssize_t i, t, nobs, ndim
    #     ndarray[double_t, ndim=1] df, Q, S
    #     ndarray a, C, R, mode

    #     ndarray at, mt, Ft, At, Ct, Rt
    #     double_t obs, ft, e, nt, dt, St, Qt

    nobs = len(Y)
    ndim = len(G)

    mode = nan_array(nobs + 1, ndim)
    a = nan_array(nobs + 1, ndim)
    C = nan_array(nobs + 1, ndim, ndim)
    R = nan_array(nobs + 1, ndim, ndim)

    S = nan_array(nobs + 1)
    Q = nan_array(nobs)
    df = nan_array(nobs + 1)

    mode[0] = mt = m0
    C[0] = Ct = C0
    df[0] = nt = df0
    S[0] = St = v0

    dt = df0 * v0

    # allocate result arrays
    for i in range(nobs):
        obs = Y[i]

        # column vector, for W&H notational consistency
        # Ft = F[i]
        Ft = F[i:i + 1].T

        # advance index: y_1 through y_nobs, 0 is prior
        t = i + 1

        # derive innovation variance from discount factor
        at = mt
        Rt = Ct
        if t > 1:
            # only discount after first time step?
            if G is not None:
                at = np.dot(G, mt)
                Rt = chain_dot(G, Ct, G.T) / delta
            else:
                Rt = Ct / delta

        # Qt = chain_dot(Ft.T, Rt, Ft) + St
        Qt = chain_dot(Ft.T, Rt, Ft) + St
        At = np.dot(Rt, Ft) / Qt

        # forecast theta as time t
        ft = np.dot(Ft.T, at)
        e = obs - ft

        # update mean parameters
        mode[t] = mt = at + np.dot(At, e)
        dt = beta * dt + St * e * e / Qt
        nt = beta * nt + 1
        St = dt / nt

        S[t] = St
        Ct = (S[t] / S[t - 1]) * (Rt - np.dot(At, At.T) * Qt)
        Ct = (Ct + Ct.T) / 2  # symmetrize

        df[t] = nt
        Q[t - 1] = Qt

        C[t] = Ct
        a[t] = at
        R[t] = Rt

    return mode, a, C, df, S, Q, R
Esempio n. 12
0
def _mvfilter_python(Y, F, G, V, delta, beta, df0, v0, m0, C0):
    """
    Matrix-variate DLM update equations

    V : V_t sequence
    """
    nobs, k = Y.shape
    p = F.shape[1]
    mode = nan_array(nobs + 1, p, k)  # theta posterior mode
    C = nan_array(nobs + 1, p, p)  # theta posterior scale
    a = nan_array(nobs + 1, p, k)  # theta forecast mode
    Q = nan_array(nobs)  # variance multiplier term
    D = nan_array(nobs + 1, k, k)  # scale matrix
    S = nan_array(nobs + 1, k, k)  # covariance estimate D / n
    df = nan_array(nobs + 1)

    mode[0] = m0
    C[0] = C0
    df[0] = df0
    S[0] = v0

    Mt = m0
    Ct = C0
    n = df0
    d = n + k - 1
    Dt = D0
    St = Dt / n

    # allocate result arrays
    for t in xrange(1, nobs + 1):
        obs = Y[t - 1:t].T
        Ft = F[t - 1:t].T

        # derive innovation variance from discount factor
        # only discount after first time step?
        if G is not None:
            at = np.dot(G, Mt)
            Rt = chain_dot(G, Ct, G.T) / delta
        else:
            at = Mt
            Rt = Ct / delta

        et = obs - np.dot(at.T, Ft)
        qt = chain_dot(Ft.T, Rt, Ft) + V
        At = np.dot(Rt, Ft) / qt

        # update mean parameters
        n = beta * n
        b = (n + k - 1) / d
        n = n + 1
        d = n + k - 1

        Dt = b * Dt + np.dot(et, et.T) / qt
        St = Dt / n
        Mt = at + np.dot(At, et.T)
        Ct = Rt - np.dot(At, At.T) * qt

        C[t] = Ct
        df[t] = n
        S[t] = (St + St.T) / 2
        mode[t] = Mt
        D[t] = Dt
        a[t] = at
        Q[t - 1] = qt

    return mode, a, C, S, Q