Пример #1
0
    def test_ewma(self):
        ewma = EWMAVariance()

        sv = ewma.starting_values(self.resids)
        assert_equal(sv.shape[0], ewma.num_params)

        bounds = ewma.bounds(self.resids)
        assert_equal(len(bounds), 0)
        var_bounds = ewma.variance_bounds(self.resids)
        backcast = ewma.backcast(self.resids)
        parameters = np.array([])

        names = ewma.parameter_names()
        names_target = []
        assert_equal(names, names_target)

        ewma.compute_variance(parameters, self.resids, self.sigma2,
                              backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        parameters = np.array([0.0, 0.06, 0.94])
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        # sigma3 = np.zeros_like(self.sigma2)
        # sigma3[0] = backcast
        # for t in range(1,self.T):
        # sigma3[t] = 0.94 * sigma3[t-1] + 0.06 * self.resids[t-1]**2.0

        assert_allclose(self.sigma2 / cond_var_direct,
                        np.ones_like(self.sigma2))

        A, b = ewma.constraints()
        A_target = np.empty((0, 0))
        b_target = np.empty((0,))
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = ewma.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0

        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        sigma2[0] = initial_value
        data[0] = np.sqrt(initial_value)
        for t in range(1, self.T + 500):
            sigma2[t] = 0.94 * sigma2[t - 1] + 0.06 * data[t - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])

        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(ewma.num_params, 0)
        assert_equal(ewma.name, 'EWMA/RiskMetrics')
Пример #2
0
    def test_garch_no_symmetric(self):
        garch = GARCH(p=0, o=1, q=1)

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 2.0))
        assert_equal(bounds[2], (0.0, 1.0))
        var_bounds = garch.variance_bounds(self.resids)
        backcast = garch.backcast(self.resids)
        parameters = np.array([.1, .1, .8])

        names = garch.parameter_names()
        names_target = ['omega', 'gamma[1]', 'beta[1]']
        assert_equal(names, names_target)

        garch.compute_variance(parameters, self.resids, self.sigma2,
                               backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            0, 1, 1, self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = garch.constraints()
        a_target = np.vstack((np.eye(3), np.array([[0, -0.5, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = 0.5 * initial_value if t == 0 else \
                data[t - 1] ** 2.0 * (data[t - 1] < 0)
            sigma2[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma2[t - 1]
            sigma2[t] += parameters[2] * lagged_value
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(garch.p, 0)
        assert_equal(garch.o, 1)
        assert_equal(garch.q, 1)
        assert_equal(garch.num_params, 3)
        assert_equal(garch.name, 'GJR-GARCH')
Пример #3
0
    def test_riskmetrics(self):
        rm06 = RiskMetrics2006()

        sv = rm06.starting_values(self.resids)
        assert_equal(sv.shape[0], rm06.num_params)

        bounds = rm06.bounds(self.resids)
        assert_equal(len(bounds), 0)
        var_bounds = rm06.variance_bounds(self.resids)
        backcast = rm06.backcast(self.resids)
        assert_equal(backcast.shape[0], 14)
        parameters = np.array([])

        names = rm06.parameter_names()
        names_target = []
        assert_equal(names, names_target)

        # TODO: Test variance fit by RM06
        rm06.compute_variance(parameters, self.resids, self.sigma2,
                              backcast, var_bounds)

        A, b = rm06.constraints()
        A_target = np.empty((0, 0))
        b_target = np.empty((0,))
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)

        # TODO: Test RM06 Simulation
        state = np.random.get_state()
        rng = Normal()
        sim_data = rm06.simulate(parameters, self.T, rng.simulate([]))

        assert_equal(rm06.num_params, 0)
        assert_equal(rm06.name, 'RiskMetrics2006')
Пример #4
0
    def test_ewma(self):
        ewma = EWMAVariance()

        sv = ewma.starting_values(self.resids)
        assert_equal(sv.shape[0], ewma.num_params)

        bounds = ewma.bounds(self.resids)
        assert_equal(len(bounds), 0)
        var_bounds = ewma.variance_bounds(self.resids)
        backcast = ewma.backcast(self.resids)
        parameters = np.array([])

        names = ewma.parameter_names()
        names_target = []
        assert_equal(names, names_target)

        ewma.compute_variance(parameters, self.resids, self.sigma2,
                              backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        parameters = np.array([0.0, 0.06, 0.94])
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)

        assert_allclose(self.sigma2 / cond_var_direct,
                        np.ones_like(self.sigma2))

        a, b = ewma.constraints()
        a_target = np.empty((0, 0))
        b_target = np.empty((0,))
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = ewma.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0

        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        sigma2[0] = initial_value
        data[0] = np.sqrt(initial_value)
        for t in range(1, self.T + 500):
            sigma2[t] = 0.94 * sigma2[t - 1] + 0.06 * data[t - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])

        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(ewma.num_params, 0)
        assert_equal(ewma.name, 'EWMA/RiskMetrics')
        assert isinstance(ewma.__str__(), str)
        txt = ewma.__repr__()
        assert str(hex(id(ewma))) in txt
Пример #5
0
    def test_riskmetrics(self):
        rm06 = RiskMetrics2006()

        sv = rm06.starting_values(self.resids)
        assert_equal(sv.shape[0], rm06.num_params)

        bounds = rm06.bounds(self.resids)
        assert_equal(len(bounds), 0)
        var_bounds = rm06.variance_bounds(self.resids)
        backcast = rm06.backcast(self.resids)
        assert_equal(backcast.shape[0], 14)
        parameters = np.array([])

        names = rm06.parameter_names()
        names_target = []
        assert_equal(names, names_target)

        # TODO: Test variance fit by RM06
        rm06.compute_variance(parameters, self.resids, self.sigma2, backcast,
                              var_bounds)

        A, b = rm06.constraints()
        A_target = np.empty((0, 0))
        b_target = np.empty((0, ))
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)

        # TODO: Test RM06 Simulation
        state = np.random.get_state()
        rng = Normal()
        sim_data = rm06.simulate(parameters, self.T, rng.simulate([]))

        assert_equal(rm06.num_params, 0)
        assert_equal(rm06.name, 'RiskMetrics2006')
Пример #6
0
    def test_garch_no_symmetric(self):
        garch = GARCH(p=0, o=1, q=1)

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 2.0))
        assert_equal(bounds[2], (0.0, 1.0))
        var_bounds = garch.variance_bounds(self.resids)
        backcast = garch.backcast(self.resids)
        parameters = np.array([.1, .1, .8])

        names = garch.parameter_names()
        names_target = ['omega', 'gamma[1]', 'beta[1]']
        assert_equal(names, names_target)

        garch.compute_variance(parameters, self.resids, self.sigma2,
                               backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            0, 1, 1, self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = garch.constraints()
        A_target = np.vstack((np.eye(3), np.array([[0, -0.5, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = 0.5 * initial_value if t == 0 else \
                data[t - 1] ** 2.0 * (data[t - 1] < 0)
            sigma2[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma2[t - 1]
            sigma2[t] += parameters[2] * lagged_value
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(garch.p, 0)
        assert_equal(garch.o, 1)
        assert_equal(garch.q, 1)
        assert_equal(garch.num_params, 3)
        assert_equal(garch.name, 'GJR-GARCH')
Пример #7
0
    def test_garch(self):
        garch = GARCH()

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids**2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        backcast = garch.backcast(self.resids)
        w = 0.94**np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75]**2) * (w / w.sum())))
        var_bounds = garch.variance_bounds(self.resids)
        parameters = np.array([.1, .1, .8])
        garch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                               var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters, self.resids**2.0, np.sign(self.resids),
                            cond_var_direct, 1, 0, 1, self.T, backcast,
                            var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = garch.constraints()
        A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = initial_value if t == 0 else data[t - 1]**2.0
            sigma2[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma2[t - 1]
            sigma2[t] += parameters[2] * lagged_value
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data / sim_data[0], np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = garch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'beta[1]']
        assert_equal(names, names_target)

        assert_equal(garch.name, 'GARCH')
        assert_equal(garch.num_params, 3)
        assert_equal(garch.power, 2.0)
        assert_equal(garch.p, 1)
        assert_equal(garch.o, 0)
        assert_equal(garch.q, 1)
Пример #8
0
    def test_arch(self):
        arch = ARCH()

        sv = arch.starting_values(self.resids)
        assert_equal(sv.shape[0], arch.num_params)

        bounds = arch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))

        backcast = arch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))

        parameters = np.array([0.5, 0.7])
        var_bounds = arch.variance_bounds(self.resids)
        arch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                              var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.arch_recursion(parameters, self.resids, cond_var_direct, 1,
                           self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = arch.constraints()
        a_target = np.vstack((np.eye(2), np.array([[0, -1.0]])))
        b_target = np.array([0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = arch.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = initial_value if t == 0 else data[t - 1] ** 2.0
            sigma2[t] += parameters[1] * shock
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = arch.parameter_names()
        names_target = ['omega', 'alpha[1]']
        assert_equal(names, names_target)

        assert_equal(arch.name, 'ARCH')
        assert_equal(arch.num_params, 2)
        assert_equal(arch.p, 1)
        assert isinstance(arch.__str__(), str)
        txt = arch.__repr__()
        assert str(hex(id(arch))) in txt
Пример #9
0
    def test_arch(self):
        arch = ARCH()

        sv = arch.starting_values(self.resids)
        assert_equal(sv.shape[0], arch.num_params)

        bounds = arch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))

        backcast = arch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))

        parameters = np.array([0.5, 0.7])
        var_bounds = arch.variance_bounds(self.resids)
        arch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                              var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.arch_recursion(parameters, self.resids, cond_var_direct, 1,
                           self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = arch.constraints()
        A_target = np.vstack((np.eye(2), np.array([[0, -1.0]])))
        b_target = np.array([0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = arch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = initial_value if t == 0 else data[t - 1] ** 2.0
            sigma2[t] += parameters[1] * shock
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = arch.parameter_names()
        names_target = ['omega', 'alpha[1]']
        assert_equal(names, names_target)

        assert_equal(arch.name, 'ARCH')
        assert_equal(arch.num_params, 2)
        assert_equal(arch.p, 1)
        assert_true(isinstance(arch.__str__(), str))
        repr = arch.__repr__()
        assert_true(str(hex(id(arch))) in repr)
Пример #10
0
    def test_garch_power(self):
        garch = GARCH(power=1.0)
        assert_equal(garch.num_params, 3)
        assert_equal(garch.name, 'AVGARCH')
        assert_equal(garch.power, 1.0)

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(np.abs(self.resids))))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        var_bounds = garch.variance_bounds(self.resids)
        backcast = garch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum(np.abs(self.resids[:75]) * (w / w.sum())))

        parameters = np.array([.1, .1, .8])
        garch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                               var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters,
                            np.abs(self.resids),
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        cond_var_direct **= 2.0  # Square since recursion does not apply power
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = garch.constraints()
        a_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma[t] = parameters[0]
            shock = initial_value if t == 0 else np.abs(data[t - 1])
            sigma[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma[t - 1]
            sigma[t] += parameters[2] * lagged_value
            data[t] = e[t] * sigma[t]
        data = data[500:]
        sigma2 = sigma[500:] ** 2.0
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
Пример #11
0
    def test_arch_multiple_lags(self):
        arch = ARCH(p=5)

        sv = arch.starting_values(self.resids)
        assert_equal(sv.shape[0], arch.num_params)

        bounds = arch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        for i in range(1, 6):
            assert_equal(bounds[i], (0.0, 1.0))
        var_bounds = arch.variance_bounds(self.resids)
        backcast = arch.backcast(self.resids)
        parameters = np.array([0.25, 0.17, 0.16, 0.15, 0.14, 0.13])
        arch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                              var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.arch_recursion(parameters, self.resids, cond_var_direct, 5,
                           self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = arch.constraints()
        a_target = np.vstack((np.eye(6),
                              np.array([[0, -1.0, -1.0, -1.0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = arch.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            for i in range(5):
                if t - i - 1 < 0:
                    sigma2[t] += parameters[i + 1] * initial_value
                else:
                    sigma2[t] += parameters[i + 1] * data[t - i - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = arch.parameter_names()
        names_target = ['omega']
        names_target.extend(['alpha[' + str(i + 1) + ']' for i in range(5)])
        assert_equal(names, names_target)

        assert_equal(arch.num_params, 6)
        assert_equal(arch.name, 'ARCH')
Пример #12
0
    def test_garch_power(self):
        garch = GARCH(power=1.0)
        assert_equal(garch.num_params, 3)
        assert_equal(garch.name, 'AVGARCH')
        assert_equal(garch.power, 1.0)

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(np.abs(self.resids))))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        var_bounds = garch.variance_bounds(self.resids)
        backcast = garch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum(np.abs(self.resids[:75]) * (w / w.sum())))

        parameters = np.array([.1, .1, .8])
        garch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                               var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters,
                            np.abs(self.resids),
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        cond_var_direct **= 2.0  # Square since recursion does not apply power
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = garch.constraints()
        A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma[t] = parameters[0]
            shock = initial_value if t == 0 else np.abs(data[t - 1])
            sigma[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma[t - 1]
            sigma[t] += parameters[2] * lagged_value
            data[t] = e[t] * sigma[t]
        data = data[500:]
        sigma2 = sigma[500:] ** 2.0
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
Пример #13
0
    def test_garch_no_lagged_vol(self):
        garch = GARCH(p=1, o=1, q=0)
        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids**2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (-1.0, 2.0))

        backcast = garch.backcast(self.resids)
        parameters = np.array([.5, .25, .5])
        var_bounds = garch.variance_bounds(self.resids)

        garch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                               var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters, self.resids**2.0, np.sign(self.resids),
                            cond_var_direct, 1, 1, 0, self.T, backcast,
                            var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = garch.constraints()
        A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -0.5]])))
        A_target[2, 1] = 1.0
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = initial_value if t == 0 else data[t - 1]**2.0
            sigma2[t] += parameters[1] * shock
            shock = 0.5 * initial_value if t == 0 else \
                (data[t - 1] ** 2.0) * (data[t - 1] < 0)
            sigma2[t] += parameters[2] * shock
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(garch.p, 1)
        assert_equal(garch.o, 1)
        assert_equal(garch.q, 0)
        assert_equal(garch.num_params, 3)
        assert_equal(garch.name, 'GJR-GARCH')
Пример #14
0
    def test_arch_multiple_lags(self):
        arch = ARCH(p=5)

        sv = arch.starting_values(self.resids)
        assert_equal(sv.shape[0], arch.num_params)

        bounds = arch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        for i in range(1, 6):
            assert_equal(bounds[i], (0.0, 1.0))
        var_bounds = arch.variance_bounds(self.resids)
        backcast = arch.backcast(self.resids)
        parameters = np.array([0.25, 0.17, 0.16, 0.15, 0.14, 0.13])
        arch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                              var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.arch_recursion(parameters, self.resids, cond_var_direct, 5,
                           self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = arch.constraints()
        A_target = np.vstack((np.eye(6),
                              np.array([[0, -1.0, -1.0, -1.0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = arch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            for i in range(5):
                if t - i - 1 < 0:
                    sigma2[t] += parameters[i + 1] * initial_value
                else:
                    sigma2[t] += parameters[i + 1] * data[t - i - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = arch.parameter_names()
        names_target = ['omega']
        names_target.extend(['alpha[' + str(i + 1) + ']' for i in range(5)])
        assert_equal(names, names_target)

        assert_equal(arch.num_params, 6)
        assert_equal(arch.name, 'ARCH')
Пример #15
0
    def test_empty_mean(self):
        mod = HARX(self.y, None, None, False, volatility=ConstantVariance(),
                   distribution=Normal())
        res = mod.fit()

        mod = ZeroMean(self.y, volatility=ConstantVariance(), distribution=Normal())
        res_z = mod.fit()

        assert res.num_params == res_z.num_params
        assert_series_equal(res.params, res_z.params)
        assert res.loglikelihood == res_z.loglikelihood
Пример #16
0
    def test_constant_variance(self):
        cv = ConstantVariance()

        sv = cv.starting_values(self.resids)
        assert_equal(sv.shape[0], cv.num_params)

        bounds = cv.bounds(self.resids)
        mean_square = np.mean(self.resids ** 2.0)
        assert_almost_equal(bounds[0],
                            (self.resid_var / 100000.0, 10.0 * mean_square))

        backcast = cv.backcast(self.resids)
        var_bounds = cv.variance_bounds(self.resids)
        assert_almost_equal(self.resid_var, backcast)

        parameters = np.array([self.resid_var])

        cv.compute_variance(parameters, self.resids, self.sigma2, backcast,
                            var_bounds)
        assert_allclose(np.ones_like(self.sigma2) * self.resid_var,
                        self.sigma2)

        a, b = cv.constraints()
        a_target = np.eye(1)
        b_target = np.array([0.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)

        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = cv.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        sigma2 = np.zeros(self.T + 500)
        sigma2[:] = parameters[0]
        data = np.zeros(self.T + 500)
        data[:] = np.sqrt(sigma2) * e
        data = data[500:]
        sigma2 = sigma2[500:]

        names = cv.parameter_names()
        names_target = ['sigma2']
        assert_equal(names, names_target)

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(cv.num_params, 1)
        assert_equal(cv.name, 'Constant Variance')
        assert isinstance(cv.__str__(), str)
        txt = cv.__repr__()
        assert str(hex(id(cv))) in txt
Пример #17
0
    def test_constant_variance(self):
        cv = ConstantVariance()

        sv = cv.starting_values(self.resids)
        assert_equal(sv.shape[0], cv.num_params)

        bounds = cv.bounds(self.resids)
        mean_square = np.mean(self.resids ** 2.0)
        assert_almost_equal(bounds[0],
                            (self.resid_var / 100000.0, 10.0 * mean_square))

        backcast = cv.backcast(self.resids)
        var_bounds = cv.variance_bounds(self.resids)
        assert_almost_equal(self.resid_var, backcast)

        parameters = np.array([self.resid_var])

        cv.compute_variance(parameters, self.resids, self.sigma2, backcast,
                            var_bounds)
        assert_allclose(np.ones_like(self.sigma2) * self.resid_var,
                        self.sigma2)

        A, b = cv.constraints()
        A_target = np.eye(1)
        b_target = np.array([0.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)

        state = np.random.get_state()
        rng = Normal()
        sim_data = cv.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        sigma2 = np.zeros(self.T + 500)
        sigma2[:] = parameters[0]
        data = np.zeros(self.T + 500)
        data[:] = np.sqrt(sigma2) * e
        data = data[500:]
        sigma2 = sigma2[500:]

        names = cv.parameter_names()
        names_target = ['sigma2']
        assert_equal(names, names_target)

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(cv.num_params, 1)
        assert_equal(cv.name, 'Constant Variance')
        assert_true(isinstance(cv.__str__(), str))
        repr = cv.__repr__()
        assert_true(str(hex(id(cv))) in repr)
Пример #18
0
    def test_egarch_100(self):
        egarch = EGARCH(p=1, o=0, q=0)

        sv = egarch.starting_values(self.resids)
        assert_equal(sv.shape[0], egarch.num_params)

        backcast = egarch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        backcast_test = np.sum((self.resids[:75] ** 2) * (w / w.sum()))
        assert_almost_equal(backcast, np.log(backcast_test))

        var_bounds = egarch.variance_bounds(self.resids)
        parameters = np.array([.1, .4])
        egarch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                                var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lnsigma2 = np.empty(self.T)
        std_resids = np.empty(self.T)
        abs_std_resids = np.empty(self.T)
        rec.egarch_recursion(parameters, self.resids, cond_var_direct, 1, 0, 0,
                             self.T, backcast, var_bounds, lnsigma2,
                             std_resids, abs_std_resids)
        assert_allclose(self.sigma2, cond_var_direct)

        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = egarch.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 0.1 / (1 - 0.95)
        lnsigma2 = np.zeros(self.T + 500)
        lnsigma2[0] = initial_value
        sigma2 = np.zeros(self.T + 500)
        sigma2[0] = np.exp(lnsigma2[0])
        data = np.zeros(self.T + 500)
        data[0] = np.sqrt(sigma2[0]) * e[0]
        norm_const = np.sqrt(2 / np.pi)
        for t in range(1, self.T + 500):
            lnsigma2[t] = parameters[0]
            lnsigma2[t] += parameters[1] * (np.abs(e[t - 1]) - norm_const)

        sigma2 = np.exp(lnsigma2)
        data = e * np.sqrt(sigma2)

        data = data[500:]
        sigma2 = sigma2[500:]

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
Пример #19
0
    def test_egarch_100(self):
        egarch = EGARCH(p=1, o=0, q=0)

        sv = egarch.starting_values(self.resids)
        assert_equal(sv.shape[0], egarch.num_params)

        backcast = egarch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        backcast_test = np.sum((self.resids[:75] ** 2) * (w / w.sum()))
        assert_almost_equal(backcast, np.log(backcast_test))

        var_bounds = egarch.variance_bounds(self.resids)
        parameters = np.array([.1, .4])
        egarch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                                var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lnsigma2 = np.empty(self.T)
        std_resids = np.empty(self.T)
        abs_std_resids = np.empty(self.T)
        rec.egarch_recursion(parameters, self.resids, cond_var_direct, 1, 0, 0,
                             self.T, backcast, var_bounds, lnsigma2,
                             std_resids, abs_std_resids)
        assert_allclose(self.sigma2, cond_var_direct)

        state = np.random.get_state()
        rng = Normal()
        sim_data = egarch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 0.1 / (1 - 0.95)
        lnsigma2 = np.zeros(self.T + 500)
        lnsigma2[0] = initial_value
        sigma2 = np.zeros(self.T + 500)
        sigma2[0] = np.exp(lnsigma2[0])
        data = np.zeros(self.T + 500)
        data[0] = np.sqrt(sigma2[0]) * e[0]
        norm_const = np.sqrt(2 / np.pi)
        for t in range(1, self.T + 500):
            lnsigma2[t] = parameters[0]
            lnsigma2[t] += parameters[1] * (np.abs(e[t - 1]) - norm_const)

        sigma2 = np.exp(lnsigma2)
        data = e * np.sqrt(sigma2)

        data = data[500:]
        sigma2 = sigma2[500:]

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
Пример #20
0
    def test_errors(self):
        with pytest.raises(ValueError):
            ARX(self.y, lags=np.array([[1, 2], [3, 4]]))
        x = self.rng.randn(self.y.shape[0] + 1, 1)
        with pytest.raises(ValueError):
            ARX(self.y, x=x)
        with pytest.raises(ValueError):
            HARX(self.y, lags=np.eye(3))
        with pytest.raises(ValueError):
            ARX(self.y, lags=-1)
        with pytest.raises(ValueError):
            ARX(self.y, x=self.rng.randn(1, 1), lags=-1)

        ar = ARX(self.y, lags=1)
        with self.assertRaises(ValueError):
            d = Normal()
            ar.volatility = d

        with self.assertRaises(ValueError):
            v = GARCH()
            ar.distribution = v
        x = self.rng.randn(1000, 1)
        with pytest.raises(ValueError):
            ar.simulate(np.ones(5), 100, x=x)
        with pytest.raises(ValueError):
            ar.simulate(np.ones(5), 100)
        with pytest.raises(ValueError):
            ar.simulate(np.ones(3), 100, initial_value=self.rng.randn(10))

        with self.assertRaises(ValueError):
            ar.volatility = ConstantVariance()
            ar.fit(cov_type='unknown')
Пример #21
0
    def test_normal(self):
        dist = Normal()
        ll1 = dist.loglikelihoood([], self.resids, self.sigma2)
        scipy_dist = stats.norm
        ll2 = scipy_dist.logpdf(self.resids, scale=np.sqrt(self.sigma2)).sum()
        assert_almost_equal(ll1, ll2)

        assert_equal(dist.num_params, 0)

        bounds = dist.bounds(self.resids)
        assert_equal(len(bounds), 0)

        a, b = dist.constraints()
        assert_equal(len(a), 0)

        assert_array_equal(dist.starting_values(self.resids), np.empty((0,)))
Пример #22
0
    def setup_class(cls):
        cls.rng = RandomState(1234)
        cls.T = 1000
        cls.resids = cls.rng.standard_normal(cls.T)
        zm = ZeroMean()
        zm.volatility = GARCH()
        seed = 12345
        random_state = np.random.RandomState(seed)
        zm.distribution = Normal(random_state=random_state)
        sim_data = zm.simulate(np.array([0.1, 0.1, 0.8]), 1000)
        with pytest.raises(ValueError):
            zm.simulate(np.array([0.1, 0.1, 0.8]), 1000, initial_value=3.0)
        date_index = pd.date_range("2000-12-31", periods=1000, freq="W")
        cls.y = sim_data.data.values
        cls.y_df = pd.DataFrame(
            cls.y[:, None], columns=["LongVariableName"], index=date_index
        )

        cls.y_series = pd.Series(
            cls.y, name="VeryVeryLongLongVariableName", index=date_index
        )
        x = cls.resids + cls.rng.standard_normal(cls.T)
        cls.x = x[:, None]
        cls.x_df = pd.DataFrame(cls.x, columns=["LongExogenousName"])
        cls.resid_var = np.var(cls.resids)
        cls.sigma2 = np.zeros_like(cls.resids)
        cls.backcast = 1.0
Пример #23
0
    def test_fixed_variance(self):
        variance = np.arange(1000.0) + 1.0
        fv = FixedVariance(variance)
        fv.start, fv.stop = 0, 1000
        parameters = np.array([2.0])
        resids = self.resids
        sigma2 = np.empty_like(resids)
        backcast = fv.backcast(resids)
        var_bounds = fv.variance_bounds(resids, 2.0)
        fv.compute_variance(parameters, resids, sigma2, backcast, var_bounds)
        sv = fv.starting_values(resids)
        cons = fv.constraints()
        bounds = fv.bounds(resids)
        assert_allclose(sigma2, 2.0 * variance)
        assert_allclose(sv, (resids / np.sqrt(variance)).var())
        assert var_bounds.shape == (resids.shape[0], 2)
        assert fv.num_params == 1
        assert fv.parameter_names() == ['scale']
        assert fv.name == 'Fixed Variance'
        assert_equal(cons[0], np.ones((1, 1)))
        assert_equal(cons[1], np.zeros(1))
        assert_equal(bounds[0][0], sv[0] / 100000.0)

        sigma2 = np.empty(500)
        fv.start = 250
        fv.stop = 750
        fv.compute_variance(parameters, resids[250:750], sigma2, backcast,
                            var_bounds)
        assert_allclose(sigma2, 2.0 * variance[250:750])

        fv = FixedVariance(variance, unit_scale=True)
        fv.start, fv.stop = 0, 1000
        sigma2 = np.empty_like(resids)
        parameters = np.empty(0)
        fv.compute_variance(parameters, resids, sigma2, backcast, var_bounds)
        sv = fv.starting_values(resids)
        cons = fv.constraints()
        bounds = fv.bounds(resids)

        assert_allclose(sigma2, variance)
        assert_allclose(sv, np.empty(0))
        assert fv.num_params == 0
        assert fv.parameter_names() == []
        assert fv.name == 'Fixed Variance (Unit Scale)'
        assert_equal(cons[0], np.empty((0, 0)))
        assert_equal(cons[1], np.empty((0)))
        assert bounds == []
        rng = Normal()
        with pytest.raises(NotImplementedError):
            fv.simulate(parameters, 1000, rng)

        fv = FixedVariance(variance, unit_scale=True)
        fv.start, fv.stop = 123, 731
        sigma2 = np.empty_like(resids)
        parameters = np.empty(0)
        assert fv.start == 123
        assert fv.stop == 731
        with pytest.raises(ValueError):
            fv.compute_variance(parameters, resids, sigma2, backcast,
                                var_bounds)
Пример #24
0
def test_autoscale():
    rs = np.random.RandomState(34254321)
    dist = Normal(random_state=rs)
    am = arch_model(None)
    am.distribution = dist
    data = am.simulate([0, 0.0001, 0.05, 0.94], nobs=1000)
    am = arch_model(data.data)
    with pytest.warns(DataScaleWarning):
        res = am.fit(disp=DISPLAY)
    assert_almost_equal(res.scale, 1.0)

    am = arch_model(data.data, rescale=True)
    res_auto = am.fit(disp=DISPLAY)
    assert_almost_equal(res_auto.scale, 10.0)

    am = arch_model(10 * data.data)
    res_manual = am.fit(disp=DISPLAY)
    assert_series_equal(res_auto.params, res_manual.params)

    res_no = arch_model(data.data, rescale=False).fit(disp=DISPLAY)
    assert res_no.scale == 1.0

    am = arch_model(10000 * data.data, rescale=True)
    res_big = am.fit(disp=DISPLAY)
    assert_almost_equal(res_big.scale, 0.1)
Пример #25
0
def test_invalid_params():
    pits = np.arange(1, 100.0) / 100.0
    dist = Normal()
    with pytest.raises(ValueError):
        dist.ppf(pits, [1.0])
    dist = StudentsT()
    with pytest.raises(ValueError):
        dist.ppf(pits, [1.0])
Пример #26
0
def test_invalid_params():
    pits = np.arange(1, 100.0) / 100.0
    dist = Normal()
    with pytest.raises(ValueError):
        dist.ppf(pits, [1.0])
    dist = StudentsT()
    with pytest.raises(ValueError):
        dist.ppf(pits, [1.0])
Пример #27
0
 def test_optimization_options(self):
     norm = Normal(random_state=RandomState([12891298, 843084]))
     am = arch_model(None)
     am.distribution = norm
     data = am.simulate(np.array([0.0, 0.1, 0.1, 0.85]), 2500)
     am = arch_model(data.data)
     std = am.fit(disp=DISPLAY)
     loose = am.fit(tol=1e-2, disp=DISPLAY)
     assert std.loglikelihood >= loose.loglikelihood
     with warnings.catch_warnings(record=True) as w:
         short = am.fit(options={"maxiter": 3}, disp=DISPLAY)
     assert len(w) == 1
     assert std.loglikelihood >= short.loglikelihood
     assert short.convergence_flag != 0
Пример #28
0
    def test_normal(self):
        dist = Normal()
        ll1 = dist.loglikelihood([], self.resids, self.sigma2)
        scipy_dist = stats.norm
        ll2 = scipy_dist.logpdf(self.resids, scale=np.sqrt(self.sigma2)).sum()
        assert_almost_equal(ll1, ll2)

        assert_equal(dist.num_params, 0)

        bounds = dist.bounds(self.resids)
        assert_equal(len(bounds), 0)

        a, b = dist.constraints()
        assert_equal(len(a), 0)

        assert_array_equal(dist.starting_values(self.resids), np.empty((0, )))
Пример #29
0
    def test_cgarch(self):
        cgarch = CGARCH()
        sv = cgarch.starting_values(self.resids)
        assert_equal(sv.shape[0], cgarch.num_params)
        parameters = np.array([0.1, 0.4, 0.06, 0.8, 0.2])
        bounds = cgarch.bounds(self.resids)
        assert_equal(bounds[0], (0, 1))
        assert_equal(bounds[1], (0, 1))
        assert_equal(bounds[2], (0, 1))
        assert_equal(bounds[3], (0.79, 1))
        assert_equal(bounds[4], (0, 1))
        backcast = cgarch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))
        var_bounds = cgarch.variance_bounds(self.resids)
        cgarch.compute_variance(parameters, self.resids, self.sigma2,
                                backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        g2 = np.ndarray(self.T)
        q2 = g2.copy()
        rec.cgarch_recursion(parameters,
                             self.resids ** 2.0,
                             cond_var_direct,
                             backcast,
                             var_bounds, g2, q2)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = cgarch.constraints()
        a_target = np.array([[0, 1, 0, 0, -1], [-1, -1, 0, 1, 0], [0, 0, 0, -1, 0]])
        b_target = np.array([0, 0, -1])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        # test simulated data
        state = np.random.get_state()
        rng = Normal()
        sim_data = cgarch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        alpha, beta, omega, rho, phi = parameters
        converted_params = cgarch._covertparams(parameters)
        fromgarch = converted_params[0]/(1-(np.sum(converted_params[1:])))
        fromcg = omega/(1-rho)
        aver = (fromcg + fromgarch)/2
        if (aver > 0.0) and (aver < 0.2):
            initial_value = aver
        else:
            initial_value = 0.1
        sigma2 = np.zeros(self.T + 500)
        sigma2[0] = initial_value
        g2 = np.ndarray(self.T + 500)
        q2 = g2.copy()
        q2[0] = initial_value * 0.65
        g2[0] = initial_value - q2[0]
        data = np.zeros(self.T + 500)
        data[0] = e[0] * np.sqrt(sigma2[0])

        for i in range(1, self.T + 500):
            g2[i] = alpha * (data[i - 1]**2 - q2[i - 1]) + beta * g2[i - 1]
            q2[i] = omega + rho * q2[i - 1] + phi * (data[i - 1]**2 - sigma2[i - 1])
            sigma2[i] = g2[i] + q2[i]
            data[i] = e[i] * (np.sqrt(sigma2[i]))

        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data / sim_data[0], np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = cgarch.parameter_names()
        names_target = ["alpha", "beta", "omega", "rho", "phi"]
        assert_equal(names, names_target)

        assert isinstance(cgarch.__str__(), str)

        assert_equal(cgarch.name, 'ComponentGARCH')
        assert_equal(cgarch.num_params, 5)
        assert_equal(cgarch.p, 2)
        assert_equal(cgarch.q, 2)
Пример #30
0
    def test_egarch(self):
        egarch = EGARCH(p=1, o=1, q=1)

        sv = egarch.starting_values(self.resids)
        assert_equal(sv.shape[0], egarch.num_params)

        bounds = egarch.bounds(self.resids)
        assert_equal(len(bounds), egarch.num_params)
        const = np.log(10000.0)
        lnv = np.log(np.mean(self.resids ** 2.0))
        assert_equal(bounds[0], (lnv - const, lnv + const))
        assert_equal(bounds[1], (-np.inf, np.inf))
        assert_equal(bounds[2], (-np.inf, np.inf))
        assert_equal(bounds[3], (0.0, 1.0))
        backcast = egarch.backcast(self.resids)

        w = 0.94 ** np.arange(75)
        backcast_test = np.sum((self.resids[:75] ** 2) * (w / w.sum()))
        assert_almost_equal(backcast, np.log(backcast_test))

        var_bounds = egarch.variance_bounds(self.resids)
        parameters = np.array([.1, .1, -.1, .95])
        egarch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                                var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lnsigma2 = np.empty(self.T)
        std_resids = np.empty(self.T)
        abs_std_resids = np.empty(self.T)
        rec.egarch_recursion(parameters, self.resids, cond_var_direct, 1, 1, 1,
                             self.T, backcast, var_bounds, lnsigma2,
                             std_resids, abs_std_resids)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = egarch.constraints()
        a_target = np.vstack((np.array([[0, 0, 0, -1.0]])))
        b_target = np.array([-1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)

        state = np.random.get_state()
        rng = Normal()
        sim_data = egarch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 0.1 / (1 - 0.95)
        lnsigma2 = np.zeros(self.T + 500)
        lnsigma2[0] = initial_value
        sigma2 = np.zeros(self.T + 500)
        sigma2[0] = np.exp(lnsigma2[0])
        data = np.zeros(self.T + 500)
        data[0] = np.sqrt(sigma2[0]) * e[0]
        norm_const = np.sqrt(2 / np.pi)
        for t in range(1, self.T + 500):
            lnsigma2[t] = parameters[0]
            lnsigma2[t] += parameters[1] * (np.abs(e[t - 1]) - norm_const)
            lnsigma2[t] += parameters[2] * e[t - 1]
            lnsigma2[t] += parameters[3] * lnsigma2[t - 1]

        sigma2 = np.exp(lnsigma2)
        data = e * np.sqrt(sigma2)

        data = data[500:]
        sigma2 = sigma2[500:]

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = egarch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'gamma[1]', 'beta[1]']
        assert_equal(names, names_target)
        assert_equal(egarch.name, 'EGARCH')
        assert_equal(egarch.num_params, 4)

        assert_equal(egarch.p, 1)
        assert_equal(egarch.o, 1)
        assert_equal(egarch.q, 1)
        assert isinstance(egarch.__str__(), str)
        txt = egarch.__repr__()
        assert str(hex(id(egarch))) in txt

        with pytest.raises(ValueError):
            EGARCH(p=0, o=0, q=1)
        with pytest.raises(ValueError):
            EGARCH(p=1, o=1, q=-1)
Пример #31
0
    def test_harch(self):
        harch = HARCH(lags=[1, 5, 22])

        sv = harch.starting_values(self.resids)
        assert_equal(sv.shape[0], harch.num_params)

        bounds = harch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        assert_equal(bounds[3], (0.0, 1.0))
        var_bounds = harch.variance_bounds(self.resids)
        backcast = harch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))

        parameters = np.array([.1, .4, .3, .2])

        var_bounds = harch.variance_bounds(self.resids)
        harch.compute_variance(parameters, self.resids, self.sigma2,
                               backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lags = np.array([1, 5, 22], dtype=np.int32)
        rec.harch_recursion(parameters,
                            self.resids,
                            cond_var_direct,
                            lags,
                            self.T,
                            backcast,
                            var_bounds)

        names = harch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'alpha[5]', 'alpha[22]']
        assert_equal(names, names_target)

        assert_allclose(self.sigma2, cond_var_direct)

        a, b = harch.constraints()
        a_target = np.vstack((np.eye(4), np.array([[0, -1.0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = harch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        lagged = np.zeros(22)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            lagged[:] = backcast
            if t > 0:
                if t == 1:
                    lagged[0] = data[0] ** 2.0
                elif t < 22:
                    lagged[:t] = data[t - 1::-1] ** 2.0
                else:
                    lagged = data[t - 1:t - 22:-1] ** 2.0

            shock1 = data[t - 1] ** 2.0 if t > 0 else backcast
            if t >= 5:
                shock5 = np.mean(data[t - 5:t] ** 2.0)
            else:
                shock5 = 0.0
                for i in range(5):
                    shock5 += data[t - i - 1] if t - i - 1 >= 0 else backcast
                shock5 = shock5 / 5.0

            if t >= 22:
                shock22 = np.mean(data[t - 22:t] ** 2.0)
            else:
                shock22 = 0.0
                for i in range(22):
                    shock22 += data[t - i - 1] if t - i - 1 >= 0 else backcast
                shock22 = shock22 / 22.0

            sigma2[t] += parameters[1] * shock1 + parameters[2] * shock5 + parameters[3] * shock22

            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(harch.name, 'HARCH')
        assert_equal(harch.lags, [1, 5, 22])
        assert_equal(harch.num_params, 4)
        assert isinstance(harch.__str__(), str)
        txt = harch.__repr__()
        assert str(hex(id(harch))) in txt
Пример #32
0
def arch_model(
    y: Optional[ArrayLike],
    x: Optional[ArrayLike] = None,
    mean: str = "Constant",
    lags: Optional[Union[int, List[int], NDArray]] = 0,
    vol: str = "Garch",
    p: Union[int, List[int]] = 1,
    o: int = 0,
    q: int = 1,
    power: float = 2.0,
    dist: str = "Normal",
    hold_back: Optional[int] = None,
    rescale: Optional[bool] = None,
) -> HARX:
    """
    Initialization of common ARCH model specifications

    Parameters
    ----------
    y : {ndarray, Series, None}
        The dependent variable
    x : {np.array, DataFrame}, optional
        Exogenous regressors.  Ignored if model does not permit exogenous
        regressors.
    mean : str, optional
        Name of the mean model.  Currently supported options are: 'Constant',
        'Zero', 'LS', 'AR', 'ARX', 'HAR' and  'HARX'
    lags : int or list (int), optional
        Either a scalar integer value indicating lag length or a list of
        integers specifying lag locations.
    vol : str, optional
        Name of the volatility model.  Currently supported options are:
        'GARCH' (default), 'ARCH', 'EGARCH', 'FIARCH' and 'HARCH'
    p : int, optional
        Lag order of the symmetric innovation
    o : int, optional
        Lag order of the asymmetric innovation
    q : int, optional
        Lag order of lagged volatility or equivalent
    power : float, optional
        Power to use with GARCH and related models
    dist : int, optional
        Name of the error distribution.  Currently supported options are:

            * Normal: 'normal', 'gaussian' (default)
            * Students's t: 't', 'studentst'
            * Skewed Student's t: 'skewstudent', 'skewt'
            * Generalized Error Distribution: 'ged', 'generalized error"

    hold_back : int
        Number of observations at the start of the sample to exclude when
        estimating model parameters.  Used when comparing models with different
        lag lengths to estimate on the common sample.
    rescale : bool
        Flag indicating whether to automatically rescale data if the scale
        of the data is likely to produce convergence issues when estimating
        model parameters. If False, the model is estimated on the data without
        transformation.  If True, than y is rescaled and the new scale is
        reported in the estimation results.

    Returns
    -------
    model : ARCHModel
        Configured ARCH model

    Examples
    --------
    >>> import datetime as dt
    >>> import pandas_datareader.data as web
    >>> djia = web.get_data_fred('DJIA')
    >>> returns = 100 * djia['DJIA'].pct_change().dropna()

    A basic GARCH(1,1) with a constant mean can be constructed using only
    the return data

    >>> from arch.univariate import arch_model
    >>> am = arch_model(returns)

    Alternative mean and volatility processes can be directly specified

    >>> am = arch_model(returns, mean='AR', lags=2, vol='harch', p=[1, 5, 22])

    This example demonstrates the construction of a zero mean process
    with a TARCH volatility process and Student t error distribution

    >>> am = arch_model(returns, mean='zero', p=1, o=1, q=1,
    ...                 power=1.0, dist='StudentsT')

    Notes
    -----
    Input that are not relevant for a particular specification, such as `lags`
    when `mean='zero'`, are silently ignored.
    """
    known_mean = ("zero", "constant", "harx", "har", "ar", "arx", "ls")
    known_vol = ("arch", "figarch", "garch", "harch", "constant", "egarch")
    known_dist = (
        "normal",
        "gaussian",
        "studentst",
        "t",
        "skewstudent",
        "skewt",
        "ged",
        "generalized error",
    )
    mean = mean.lower()
    vol = vol.lower()
    dist = dist.lower()
    if mean not in known_mean:
        raise ValueError("Unknown model type in mean")
    if vol.lower() not in known_vol:
        raise ValueError("Unknown model type in vol")
    if dist.lower() not in known_dist:
        raise ValueError("Unknown model type in dist")

    if mean == "harx":
        am = HARX(y, x, lags, hold_back=hold_back, rescale=rescale)
    elif mean == "har":
        am = HARX(y, None, lags, hold_back=hold_back, rescale=rescale)
    elif mean == "arx":
        am = ARX(y, x, lags, hold_back=hold_back, rescale=rescale)
    elif mean == "ar":
        am = ARX(y, None, lags, hold_back=hold_back, rescale=rescale)
    elif mean == "ls":
        am = LS(y, x, hold_back=hold_back, rescale=rescale)
    elif mean == "constant":
        am = ConstantMean(y, hold_back=hold_back, rescale=rescale)
    else:  # mean == "zero"
        am = ZeroMean(y, hold_back=hold_back, rescale=rescale)

    if vol in ("arch", "garch", "figarch",
               "egarch") and not isinstance(p, int):
        raise TypeError(
            "p must be a scalar int for all volatility processes except HARCH."
        )

    if vol == "constant":
        v: VolatilityProcess = ConstantVariance()
    elif vol == "arch":
        assert isinstance(p, int)
        v = ARCH(p=p)
    elif vol == "figarch":
        assert isinstance(p, int)
        v = FIGARCH(p=p, q=q)
    elif vol == "garch":
        assert isinstance(p, int)
        v = GARCH(p=p, o=o, q=q, power=power)
    elif vol == "egarch":
        assert isinstance(p, int)
        v = EGARCH(p=p, o=o, q=q)
    else:  # vol == 'harch'
        v = HARCH(lags=p)

    if dist in ("skewstudent", "skewt"):
        d: Distribution = SkewStudent()
    elif dist in ("studentst", "t"):
        d = StudentsT()
    elif dist in ("ged", "generalized error"):
        d = GeneralizedError()
    else:  # ('gaussian', 'normal')
        d = Normal()

    am.volatility = v
    am.distribution = d

    return am
Пример #33
0
def test_bad_input():
    with pytest.raises(TypeError):
        Normal(random_state="random_state")
Пример #34
0
    def test_egarch(self):
        egarch = EGARCH(p=1, o=1, q=1)

        sv = egarch.starting_values(self.resids)
        assert_equal(sv.shape[0], egarch.num_params)

        bounds = egarch.bounds(self.resids)
        assert_equal(len(bounds), egarch.num_params)
        const = np.log(10000.0)
        lnv = np.log(np.mean(self.resids ** 2.0))
        assert_equal(bounds[0], (lnv - const, lnv + const))
        assert_equal(bounds[1], (-np.inf, np.inf))
        assert_equal(bounds[2], (-np.inf, np.inf))
        assert_equal(bounds[3], (0.0, 1.0))
        backcast = egarch.backcast(self.resids)

        w = 0.94 ** np.arange(75)
        backcast_test = np.sum((self.resids[:75] ** 2) * (w / w.sum()))
        assert_almost_equal(backcast, np.log(backcast_test))

        var_bounds = egarch.variance_bounds(self.resids)
        parameters = np.array([.1, .1, -.1, .95])
        egarch.compute_variance(parameters, self.resids, self.sigma2, backcast,
                                var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lnsigma2 = np.empty(self.T)
        std_resids = np.empty(self.T)
        abs_std_resids = np.empty(self.T)
        rec.egarch_recursion(parameters, self.resids, cond_var_direct, 1, 1, 1,
                             self.T, backcast, var_bounds, lnsigma2, std_resids,
                             abs_std_resids)
        assert_allclose(self.sigma2, cond_var_direct)

        A, b = egarch.constraints()
        A_target = np.vstack((np.array([[0, 0, 0, -1.0]])))
        b_target = np.array([-1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)

        state = np.random.get_state()
        rng = Normal()
        sim_data = egarch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 0.1 / (1 - 0.95)
        lnsigma2 = np.zeros(self.T + 500)
        lnsigma2[0] = initial_value
        sigma2 = np.zeros(self.T + 500)
        sigma2[0] = np.exp(lnsigma2[0])
        data = np.zeros(self.T + 500)
        data[0] = np.sqrt(sigma2[0]) * e[0]
        norm_const = np.sqrt(2 / np.pi)
        for t in range(1, self.T + 500):
            lnsigma2[t] = parameters[0]
            lnsigma2[t] += parameters[1] * (np.abs(e[t - 1]) - norm_const)
            lnsigma2[t] += parameters[2] * e[t - 1]
            lnsigma2[t] += parameters[3] * lnsigma2[t - 1]

        sigma2 = np.exp(lnsigma2)
        data = e * np.sqrt(sigma2)

        data = data[500:]
        sigma2 = sigma2[500:]

        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = egarch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'gamma[1]', 'beta[1]']
        assert_equal(names, names_target)
        assert_equal(egarch.name, 'EGARCH')
        assert_equal(egarch.num_params, 4)

        assert_equal(egarch.p, 1)
        assert_equal(egarch.o, 1)
        assert_equal(egarch.q, 1)
Пример #35
0
    def test_ewma_estimated(self):
        ewma = EWMAVariance(lam=None)

        sv = ewma.starting_values(self.resids)
        assert sv == 0.94
        assert sv.shape[0] == ewma.num_params

        bounds = ewma.bounds(self.resids)
        assert len(bounds) == 1
        assert bounds == [(0, 1)]

        ewma.variance_bounds(self.resids)

        backcast = ewma.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))

        parameters = np.array([0.9234])

        var_bounds = ewma.variance_bounds(self.resids)
        ewma.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        cond_var_direct[0] = backcast
        parameters = np.array([0, 1 - parameters[0], parameters[0]])
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)
        assert_allclose(self.sigma2 / cond_var_direct, np.ones_like(self.sigma2))

        names = ewma.parameter_names()
        names_target = ['lam']
        assert_equal(names, names_target)

        a, b = ewma.constraints()
        a_target = np.ones((1, 1))
        b_target = np.zeros((1,))
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)

        assert_equal(ewma.num_params, 1)
        assert_equal(ewma.name, 'EWMA/RiskMetrics')
        assert isinstance(ewma.__str__(), str)
        txt = ewma.__repr__()
        assert str(hex(id(ewma))) in txt

        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        lam = parameters[-1]
        sim_data = ewma.simulate([lam], self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0

        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        sigma2[0] = initial_value
        data[0] = np.sqrt(initial_value)
        for t in range(1, self.T + 500):
            sigma2[t] = lam * sigma2[t - 1] + (1-lam) * data[t - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])

        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
Пример #36
0
def arch_model(y,
               x=None,
               mean='Constant',
               lags=0,
               vol='Garch',
               p=1,
               o=0,
               q=1,
               power=2.0,
               dist='Normal',
               hold_back=None):
    """
    Convenience function to simplify initialization of ARCH models

    Parameters
    ----------
    y : {ndarray, Series, None}
        The dependent variable
    x : {np.array, DataFrame}, optional
        Exogenous regressors.  Ignored if model does not permit exogenous
        regressors.
    mean : str, optional
        Name of the mean model.  Currently supported options are: 'Constant',
        'Zero', 'ARX' and  'HARX'
    lags : int or list (int), optional
        Either a scalar integer value indicating lag length or a list of
        integers specifying lag locations.
    vol : str, optional
        Name of the volatility model.  Currently supported options are:
        'GARCH' (default),  "EGARCH', 'ARCH' and 'HARCH'
    p : int, optional
        Lag order of the symmetric innovation
    o : int, optional
        Lag order of the asymmetric innovation
    q : int, optional
        Lag order of lagged volatility or equivalent
    power : float, optional
        Power to use with GARCH and related models
    dist : int, optional
        Name of the error distribution.  Currently supported options are:

            * Normal: 'normal', 'gaussian' (default)
            * Students's t: 't', 'studentst'
            * Skewed Student's t: 'skewstudent', 'skewt'
            * Generalized Error Distribution: 'ged', 'generalized error"

    hold_back : int
        Number of observations at the start of the sample to exclude when
        estimating model parameters.  Used when comparing models with different
        lag lengths to estimate on the common sample.

    Returns
    -------
    model : ARCHModel
        Configured ARCH model

    Examples
    --------
    >>> import datetime as dt
    >>> import pandas_datareader.data as web
    >>> djia = web.get_data_fred('DJIA')
    >>> returns = 100 * djia['DJIA'].pct_change().dropna()

    A basic GARCH(1,1) with a constant mean can be constructed using only
    the return data

    >>> from arch.univariate import arch_model
    >>> am = arch_model(returns)

    Alternative mean and volatility processes can be directly specified

    >>> am = arch_model(returns, mean='AR', lags=2, vol='harch', p=[1, 5, 22])

    This example demonstrates the construction of a zero mean process
    with a TARCH volatility process and Student t error distribution

    >>> am = arch_model(returns, mean='zero', p=1, o=1, q=1,
    ...                 power=1.0, dist='StudentsT')

    Notes
    -----
    Input that are not relevant for a particular specification, such as `lags`
    when `mean='zero'`, are silently ignored.
    """
    known_mean = ('zero', 'constant', 'harx', 'har', 'ar', 'arx', 'ls')
    known_vol = ('arch', 'garch', 'harch', 'constant', 'egarch')
    known_dist = ('normal', 'gaussian', 'studentst', 't', 'skewstudent',
                  'skewt', 'ged', 'generalized error')
    mean = mean.lower()
    vol = vol.lower()
    dist = dist.lower()
    if mean not in known_mean:
        raise ValueError('Unknown model type in mean')
    if vol.lower() not in known_vol:
        raise ValueError('Unknown model type in vol')
    if dist.lower() not in known_dist:
        raise ValueError('Unknown model type in dist')

    if mean == 'zero':
        am = ZeroMean(y, hold_back=hold_back)
    elif mean == 'constant':
        am = ConstantMean(y, hold_back=hold_back)
    elif mean == 'harx':
        am = HARX(y, x, lags, hold_back=hold_back)
    elif mean == 'har':
        am = HARX(y, None, lags, hold_back=hold_back)
    elif mean == 'arx':
        am = ARX(y, x, lags, hold_back=hold_back)
    elif mean == 'ar':
        am = ARX(y, None, lags, hold_back=hold_back)
    else:
        am = LS(y, x, hold_back=hold_back)

    if vol == 'constant':
        v = ConstantVariance()
    elif vol == 'arch':
        v = ARCH(p=p)
    elif vol == 'garch':
        v = GARCH(p=p, o=o, q=q, power=power)
    elif vol == 'egarch':
        v = EGARCH(p=p, o=o, q=q)
    else:  # vol == 'harch'
        v = HARCH(lags=p)

    if dist in ('skewstudent', 'skewt'):
        d = SkewStudent()
    elif dist in ('studentst', 't'):
        d = StudentsT()
    elif dist in ('ged', 'generalized error'):
        d = GeneralizedError()
    else:  # ('gaussian', 'normal')
        d = Normal()

    am.volatility = v
    am.distribution = d

    return am
Пример #37
0
    def test_harch(self):
        harch = HARCH(lags=[1, 5, 22])

        sv = harch.starting_values(self.resids)
        assert_equal(sv.shape[0], harch.num_params)

        bounds = harch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        assert_equal(bounds[3], (0.0, 1.0))
        var_bounds = harch.variance_bounds(self.resids)
        backcast = harch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))

        parameters = np.array([.1, .4, .3, .2])

        var_bounds = harch.variance_bounds(self.resids)
        harch.compute_variance(parameters, self.resids, self.sigma2,
                               backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        lags = np.array([1, 5, 22], dtype=np.int32)
        rec.harch_recursion(parameters,
                            self.resids,
                            cond_var_direct,
                            lags,
                            self.T,
                            backcast,
                            var_bounds)

        names = harch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'alpha[5]', 'alpha[22]']
        assert_equal(names, names_target)

        assert_allclose(self.sigma2, cond_var_direct)

        A, b = harch.constraints()
        A_target = np.vstack((np.eye(4), np.array([[0, -1.0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, 0.0, -1.0])
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = harch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        lagged = np.zeros(22)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            lagged[:] = backcast
            if t > 0:
                if t == 1:
                    lagged[0] = data[0] ** 2.0
                elif t < 22:
                    lagged[:t] = data[t - 1::-1] ** 2.0
                else:
                    lagged = data[t - 1:t - 22:-1] ** 2.0

            shock1 = data[t - 1] ** 2.0 if t > 0 else backcast
            if t >= 5:
                shock5 = np.mean(data[t - 5:t] ** 2.0)
            else:
                shock5 = 0.0
                for i in range(5):
                    shock5 += data[t - i - 1] if t - i - 1 >= 0 else backcast
                shock5 = shock5 / 5.0

            if t >= 22:
                shock22 = np.mean(data[t - 22:t] ** 2.0)
            else:
                shock22 = 0.0
                for i in range(22):
                    shock22 += data[t - i - 1] if t - i - 1 >= 0 else backcast
                shock22 = shock22 / 22.0

            sigma2[t] += parameters[1] * shock1 \
                + parameters[2] * shock5 \
                + parameters[3] * shock22

            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(harch.name, 'HARCH')
        assert_equal(harch.lags, [1, 5, 22])
        assert_equal(harch.num_params, 4)
Пример #38
0
def simulated_data(request):
    rs = np.random.RandomState(1)
    zm = ZeroMean(volatility=GARCH(), distribution=Normal(rs))
    sim_data = zm.simulate(np.array([0.1, 0.1, 0.88]), 1000)
    return np.asarray(sim_data.data) if request.param else sim_data.data
Пример #39
0
    def test_midas_symmetric(self):
        midas = MIDASHyperbolic()

        sv = midas.starting_values(self.resids)
        assert_equal(sv.shape[0], midas.num_params)

        bounds = midas.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        backcast = midas.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast, np.sum((self.resids[:75] ** 2) * (w / w.sum())))
        var_bounds = midas.variance_bounds(self.resids)
        parameters = np.array([.1, .9, .4])
        midas.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        weights = midas._weights(parameters)
        theta = parameters[-1]
        j = np.arange(1, 22 + 1)
        direct_weights = gamma(j + theta) / (gamma(j+1)*gamma(theta))
        direct_weights = direct_weights / direct_weights.sum()
        assert_allclose(weights, direct_weights)
        resids = self.resids
        direct_params = parameters.copy()
        direct_params[-1] = 0.0  # gamma, strip theta
        rec.midas_recursion_python(direct_params, weights, resids, cond_var_direct, self.T,
                                   backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = midas.constraints()
        a_target = np.zeros((5, 3))
        a_target[0, 0] = 1
        a_target[1, 1] = 1
        a_target[2, 1] = -1
        a_target[3, 2] = 1
        a_target[4, 2] = -1
        b_target = np.array([0.0, 0.0, -1.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        sim_data = midas.simulate(parameters, self.T, rng.simulate([]))
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        sigma2[:22] = initial_value
        omega, alpha = parameters[:2]
        for t in range(22, self.T + 500):
            sigma2[t] = omega
            for i in range(22):
                shock = initial_value if t - i - 1 < 22 else data[t - i - 1] ** 2.0
                sigma2[t] += alpha * weights[i] * shock
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
        assert_almost_equal(data / sim_data[0], np.ones_like(data))

        names = midas.parameter_names()
        names_target = ['omega', 'alpha', 'theta']
        assert_equal(names, names_target)

        assert isinstance(midas.__str__(), str)
        txt = midas.__repr__()
        assert str(hex(id(midas))) in txt

        assert_equal(midas.name, 'MIDAS Hyperbolic')
        assert_equal(midas.num_params, 3)
        assert_equal(midas.m, 22)

        with pytest.warns(InitialValueWarning):
            parameters = np.array([.1, 1.1, .4])
            midas.simulate(parameters, self.T, rng.simulate([]))
Пример #40
0
    def test_garch(self):
        garch = GARCH()

        sv = garch.starting_values(self.resids)
        assert_equal(sv.shape[0], garch.num_params)

        bounds = garch.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (0.0, 1.0))
        backcast = garch.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast,
                            np.sum((self.resids[:75] ** 2) * (w / w.sum())))
        var_bounds = garch.variance_bounds(self.resids)
        parameters = np.array([.1, .1, .8])
        garch.compute_variance(parameters, self.resids, self.sigma2,
                               backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = garch.constraints()
        a_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]])))
        b_target = np.array([0.0, 0.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = garch.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        for t in range(self.T + 500):
            sigma2[t] = parameters[0]
            shock = initial_value if t == 0 else data[t - 1] ** 2.0
            sigma2[t] += parameters[1] * shock
            lagged_value = initial_value if t == 0 else sigma2[t - 1]
            sigma2[t] += parameters[2] * lagged_value
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data / sim_data[0], np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = garch.parameter_names()
        names_target = ['omega', 'alpha[1]', 'beta[1]']
        assert_equal(names, names_target)

        assert isinstance(garch.__str__(), str)
        txt = garch.__repr__()
        assert str(hex(id(garch))) in txt

        assert_equal(garch.name, 'GARCH')
        assert_equal(garch.num_params, 3)
        assert_equal(garch.power, 2.0)
        assert_equal(garch.p, 1)
        assert_equal(garch.o, 0)
        assert_equal(garch.q, 1)
Пример #41
0
def simulated_data():
    rs = np.random.RandomState(1)
    zm = ZeroMean(volatility=GARCH(), distribution=Normal(rs))
    sim_data = zm.simulate(np.array([0.1, 0.1, 0.88]), 1000)
    return sim_data.data
Пример #42
0
    def test_midas_asymmetric(self):
        midas = MIDASHyperbolic(33, asym=True)

        sv = midas.starting_values(self.resids)
        assert_equal(sv.shape[0], midas.num_params)

        bounds = midas.bounds(self.resids)
        assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0)))
        assert_equal(bounds[1], (0.0, 1.0))
        assert_equal(bounds[2], (-1.0, 2.0))
        assert_equal(bounds[3], (0.0, 1.0))
        backcast = midas.backcast(self.resids)
        w = 0.94 ** np.arange(75)
        assert_almost_equal(backcast, np.sum((self.resids[:75] ** 2) * (w / w.sum())))
        var_bounds = midas.variance_bounds(self.resids)
        parameters = np.array([.1, .3, 1.2, .4])
        midas.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        weights = midas._weights(parameters)
        wlen = len(weights)
        theta = parameters[-1]
        j = np.arange(1, wlen+1)
        direct_weights = gammaln(j + theta) - gammaln(j + 1) - gammaln(theta)
        direct_weights = np.exp(direct_weights)
        direct_weights = direct_weights/direct_weights.sum()
        assert_allclose(direct_weights, weights)
        resids = self.resids
        direct_params = parameters[:3].copy()
        rec.midas_recursion_python(direct_params, weights, resids, cond_var_direct, self.T,
                                   backcast, var_bounds)
        assert_allclose(self.sigma2, cond_var_direct)

        a, b = midas.constraints()
        a_target = np.zeros((5, 4))
        a_target[0, 0] = 1
        a_target[1, 1] = 1
        a_target[1, 2] = 1
        a_target[2, 1] = -1
        a_target[2, 2] = -0.5
        a_target[3, 3] = 1
        a_target[4, 3] = -1
        b_target = np.array([0.0, 0.0, -1.0, 0.0, -1.0])
        assert_array_equal(a, a_target)
        assert_array_equal(b, b_target)
        state = self.rng.get_state()
        rng = Normal()
        rng.random_state.set_state(state)
        burn = wlen
        sim_data = midas.simulate(parameters, self.T, rng.simulate([]), burn=burn)
        self.rng.set_state(state)
        e = self.rng.standard_normal(self.T + burn)
        initial_value = 1.0
        sigma2 = np.zeros(self.T + burn)
        data = np.zeros(self.T + burn)
        sigma2[:wlen] = initial_value
        omega, alpha, gamma = parameters[:3]
        for t in range(wlen, self.T + burn):
            sigma2[t] = omega
            for i in range(wlen):
                if t - i - 1 < wlen:
                    shock = initial_value
                    coeff = (alpha + 0.5 * gamma) * weights[i]
                else:
                    shock = data[t - i - 1] ** 2.0
                    coeff = (alpha + gamma * (data[t - i - 1] < 0)) * weights[i]
                sigma2[t] += coeff * shock
            data[t] = e[t] * np.sqrt(sigma2[t])
        data = data[burn:]
        sigma2 = sigma2[burn:]
        assert_almost_equal(data / sim_data[0], np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        names = midas.parameter_names()
        names_target = ['omega', 'alpha', 'gamma', 'theta']
        assert_equal(names, names_target)

        assert isinstance(midas.__str__(), str)
        txt = midas.__repr__()
        assert str(hex(id(midas))) in txt

        assert_equal(midas.name, 'MIDAS Hyperbolic')
        assert_equal(midas.num_params, 4)
        assert_equal(midas.m, 33)

        with pytest.warns(InitialValueWarning):
            parameters = np.array([.1, .3, 1.6, .4])
            midas.simulate(parameters, self.T, rng.simulate([]))
Пример #43
0
 def __init__(self, random_state=None):
     DistributionMixin.__init__(self)
     N.__init__(self, random_state)