def test_garch_no_symmetric(self): garch = GARCH(p=0, o=1, q=1) sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0))) assert_equal(bounds[1], (0.0, 2.0)) assert_equal(bounds[2], (0.0, 1.0)) var_bounds = garch.variance_bounds(self.resids) backcast = garch.backcast(self.resids) parameters = np.array([.1, .1, .8]) names = garch.parameter_names() names_target = ['omega', 'gamma[1]', 'beta[1]'] assert_equal(names, names_target) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, self.resids ** 2.0, np.sign(self.resids), cond_var_direct, 0, 1, 1, self.T, backcast, var_bounds) assert_allclose(self.sigma2, cond_var_direct) a, b = garch.constraints() a_target = np.vstack((np.eye(3), np.array([[0, -0.5, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(a, a_target) assert_array_equal(b, b_target) state = self.rng.get_state() rng = Normal() rng.random_state.set_state(state) sim_data = garch.simulate(parameters, self.T, rng.simulate([])) self.rng.set_state(state) e = self.rng.standard_normal(self.T + 500) initial_value = 1.0 sigma2 = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma2[t] = parameters[0] shock = 0.5 * initial_value if t == 0 else \ data[t - 1] ** 2.0 * (data[t - 1] < 0) sigma2[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma2[t - 1] sigma2[t] += parameters[2] * lagged_value data[t] = e[t] * np.sqrt(sigma2[t]) data = data[500:] sigma2 = sigma2[500:] assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) assert_equal(garch.p, 0) assert_equal(garch.o, 1) assert_equal(garch.q, 1) assert_equal(garch.num_params, 3) assert_equal(garch.name, 'GJR-GARCH')
def test_garch_no_symmetric(self): garch = GARCH(p=0, o=1, q=1) sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0))) assert_equal(bounds[1], (0.0, 2.0)) assert_equal(bounds[2], (0.0, 1.0)) var_bounds = garch.variance_bounds(self.resids) backcast = garch.backcast(self.resids) parameters = np.array([.1, .1, .8]) names = garch.parameter_names() names_target = ['omega', 'gamma[1]', 'beta[1]'] assert_equal(names, names_target) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, self.resids ** 2.0, np.sign(self.resids), cond_var_direct, 0, 1, 1, self.T, backcast, var_bounds) assert_allclose(self.sigma2, cond_var_direct) A, b = garch.constraints() A_target = np.vstack((np.eye(3), np.array([[0, -0.5, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(A, A_target) assert_array_equal(b, b_target) state = np.random.get_state() rng = Normal() sim_data = garch.simulate(parameters, self.T, rng.simulate([])) np.random.set_state(state) e = np.random.standard_normal(self.T + 500) initial_value = 1.0 sigma2 = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma2[t] = parameters[0] shock = 0.5 * initial_value if t == 0 else \ data[t - 1] ** 2.0 * (data[t - 1] < 0) sigma2[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma2[t - 1] sigma2[t] += parameters[2] * lagged_value data[t] = e[t] * np.sqrt(sigma2[t]) data = data[500:] sigma2 = sigma2[500:] assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) assert_equal(garch.p, 0) assert_equal(garch.o, 1) assert_equal(garch.q, 1) assert_equal(garch.num_params, 3) assert_equal(garch.name, 'GJR-GARCH')
def test_garch(self): garch = GARCH() sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids**2.0))) assert_equal(bounds[1], (0.0, 1.0)) assert_equal(bounds[2], (0.0, 1.0)) backcast = garch.backcast(self.resids) w = 0.94**np.arange(75) assert_almost_equal(backcast, np.sum((self.resids[:75]**2) * (w / w.sum()))) var_bounds = garch.variance_bounds(self.resids) parameters = np.array([.1, .1, .8]) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, self.resids**2.0, np.sign(self.resids), cond_var_direct, 1, 0, 1, self.T, backcast, var_bounds) assert_allclose(self.sigma2, cond_var_direct) A, b = garch.constraints() A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(A, A_target) assert_array_equal(b, b_target) state = np.random.get_state() rng = Normal() sim_data = garch.simulate(parameters, self.T, rng.simulate([])) np.random.set_state(state) e = np.random.standard_normal(self.T + 500) initial_value = 1.0 sigma2 = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma2[t] = parameters[0] shock = initial_value if t == 0 else data[t - 1]**2.0 sigma2[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma2[t - 1] sigma2[t] += parameters[2] * lagged_value data[t] = e[t] * np.sqrt(sigma2[t]) data = data[500:] sigma2 = sigma2[500:] assert_almost_equal(data / sim_data[0], np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) names = garch.parameter_names() names_target = ['omega', 'alpha[1]', 'beta[1]'] assert_equal(names, names_target) assert_equal(garch.name, 'GARCH') assert_equal(garch.num_params, 3) assert_equal(garch.power, 2.0) assert_equal(garch.p, 1) assert_equal(garch.o, 0) assert_equal(garch.q, 1)
def test_garch_power(self): garch = GARCH(power=1.0) assert_equal(garch.num_params, 3) assert_equal(garch.name, 'AVGARCH') assert_equal(garch.power, 1.0) sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(np.abs(self.resids)))) assert_equal(bounds[1], (0.0, 1.0)) assert_equal(bounds[2], (0.0, 1.0)) var_bounds = garch.variance_bounds(self.resids) backcast = garch.backcast(self.resids) w = 0.94 ** np.arange(75) assert_almost_equal(backcast, np.sum(np.abs(self.resids[:75]) * (w / w.sum()))) parameters = np.array([.1, .1, .8]) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, np.abs(self.resids), np.sign(self.resids), cond_var_direct, 1, 0, 1, self.T, backcast, var_bounds) cond_var_direct **= 2.0 # Square since recursion does not apply power assert_allclose(self.sigma2, cond_var_direct) a, b = garch.constraints() a_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(a, a_target) assert_array_equal(b, b_target) state = self.rng.get_state() rng = Normal() rng.random_state.set_state(state) sim_data = garch.simulate(parameters, self.T, rng.simulate([])) self.rng.set_state(state) e = self.rng.standard_normal(self.T + 500) initial_value = 1.0 sigma = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma[t] = parameters[0] shock = initial_value if t == 0 else np.abs(data[t - 1]) sigma[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma[t - 1] sigma[t] += parameters[2] * lagged_value data[t] = e[t] * sigma[t] data = data[500:] sigma2 = sigma[500:] ** 2.0 assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
def test_garch_power(self): garch = GARCH(power=1.0) assert_equal(garch.num_params, 3) assert_equal(garch.name, 'AVGARCH') assert_equal(garch.power, 1.0) sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(np.abs(self.resids)))) assert_equal(bounds[1], (0.0, 1.0)) assert_equal(bounds[2], (0.0, 1.0)) var_bounds = garch.variance_bounds(self.resids) backcast = garch.backcast(self.resids) w = 0.94 ** np.arange(75) assert_almost_equal(backcast, np.sum(np.abs(self.resids[:75]) * (w / w.sum()))) parameters = np.array([.1, .1, .8]) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, np.abs(self.resids), np.sign(self.resids), cond_var_direct, 1, 0, 1, self.T, backcast, var_bounds) cond_var_direct **= 2.0 # Square since recursion does not apply power assert_allclose(self.sigma2, cond_var_direct) A, b = garch.constraints() A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(A, A_target) assert_array_equal(b, b_target) state = np.random.get_state() rng = Normal() sim_data = garch.simulate(parameters, self.T, rng.simulate([])) np.random.set_state(state) e = np.random.standard_normal(self.T + 500) initial_value = 1.0 sigma = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma[t] = parameters[0] shock = initial_value if t == 0 else np.abs(data[t - 1]) sigma[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma[t - 1] sigma[t] += parameters[2] * lagged_value data[t] = e[t] * sigma[t] data = data[500:] sigma2 = sigma[500:] ** 2.0 assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))
def test_garch_no_lagged_vol(self): garch = GARCH(p=1, o=1, q=0) sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids**2.0))) assert_equal(bounds[1], (0.0, 1.0)) assert_equal(bounds[2], (-1.0, 2.0)) backcast = garch.backcast(self.resids) parameters = np.array([.5, .25, .5]) var_bounds = garch.variance_bounds(self.resids) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, self.resids**2.0, np.sign(self.resids), cond_var_direct, 1, 1, 0, self.T, backcast, var_bounds) assert_allclose(self.sigma2, cond_var_direct) A, b = garch.constraints() A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -0.5]]))) A_target[2, 1] = 1.0 b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(A, A_target) assert_array_equal(b, b_target) state = np.random.get_state() rng = Normal() sim_data = garch.simulate(parameters, self.T, rng.simulate([])) np.random.set_state(state) e = np.random.standard_normal(self.T + 500) initial_value = 1.0 sigma2 = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma2[t] = parameters[0] shock = initial_value if t == 0 else data[t - 1]**2.0 sigma2[t] += parameters[1] * shock shock = 0.5 * initial_value if t == 0 else \ (data[t - 1] ** 2.0) * (data[t - 1] < 0) sigma2[t] += parameters[2] * shock data[t] = e[t] * np.sqrt(sigma2[t]) data = data[500:] sigma2 = sigma2[500:] assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) assert_equal(garch.p, 1) assert_equal(garch.o, 1) assert_equal(garch.q, 0) assert_equal(garch.num_params, 3) assert_equal(garch.name, 'GJR-GARCH')
def test_garch(self): garch = GARCH() sv = garch.starting_values(self.resids) assert_equal(sv.shape[0], garch.num_params) bounds = garch.bounds(self.resids) assert_equal(bounds[0], (0.0, 10.0 * np.mean(self.resids ** 2.0))) assert_equal(bounds[1], (0.0, 1.0)) assert_equal(bounds[2], (0.0, 1.0)) backcast = garch.backcast(self.resids) w = 0.94 ** np.arange(75) assert_almost_equal(backcast, np.sum((self.resids[:75] ** 2) * (w / w.sum()))) var_bounds = garch.variance_bounds(self.resids) parameters = np.array([.1, .1, .8]) garch.compute_variance(parameters, self.resids, self.sigma2, backcast, var_bounds) cond_var_direct = np.zeros_like(self.sigma2) rec.garch_recursion(parameters, self.resids ** 2.0, np.sign(self.resids), cond_var_direct, 1, 0, 1, self.T, backcast, var_bounds) assert_allclose(self.sigma2, cond_var_direct) A, b = garch.constraints() A_target = np.vstack((np.eye(3), np.array([[0, -1.0, -1.0]]))) b_target = np.array([0.0, 0.0, 0.0, -1.0]) assert_array_equal(A, A_target) assert_array_equal(b, b_target) state = np.random.get_state() rng = Normal() sim_data = garch.simulate(parameters, self.T, rng.simulate([])) np.random.set_state(state) e = np.random.standard_normal(self.T + 500) initial_value = 1.0 sigma2 = np.zeros(self.T + 500) data = np.zeros(self.T + 500) for t in range(self.T + 500): sigma2[t] = parameters[0] shock = initial_value if t == 0 else data[t - 1] ** 2.0 sigma2[t] += parameters[1] * shock lagged_value = initial_value if t == 0 else sigma2[t - 1] sigma2[t] += parameters[2] * lagged_value data[t] = e[t] * np.sqrt(sigma2[t]) data = data[500:] sigma2 = sigma2[500:] assert_almost_equal(data / sim_data[0], np.ones_like(data)) assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) names = garch.parameter_names() names_target = ['omega', 'alpha[1]', 'beta[1]'] assert_equal(names, names_target) assert_true(isinstance(garch.__str__(), str)) repr = garch.__repr__() assert_true(str(hex(id(garch))) in repr) assert_equal(garch.name, 'GARCH') assert_equal(garch.num_params, 3) assert_equal(garch.power, 2.0) assert_equal(garch.p, 1) assert_equal(garch.o, 0) assert_equal(garch.q, 1)