def func(self, _params: np.ndarray): # split params if self.use_mu: mu = _params[-1] else: mu = self.mu_arr const, alpha, beta, _ = np.split(_params, self.split_index) alpha = ilogit(alpha) beta = ilogit(beta) const = np.exp(const) square_arr = (self.arr - mu)**2 arch_x_arr = stack_delay_arr(square_arr, self.alpha_num) if self.beta_num > 0: # estimate latent_arr self.latent_arr[self.beta_num:] = alpha.dot(arch_x_arr) + const self.latent_arr[:self.beta_num] = square_arr.mean() garch_recursion(self.latent_arr, beta) else: self.latent_arr = alpha.dot(arch_x_arr) + const loss = -logpdf(self.y_arr, mu, self.latent_arr[self.beta_num:]).mean() return loss
def func(self, _params: np.ndarray): # split params if self.use_mu: mu = _params[-1] else: mu = self.mu_arr const, phi, theta, alpha, beta, _ = np.split(_params, self.split_index) const = np.exp(const) alpha = ilogit(alpha) beta = ilogit(beta) # arma part if self.phi_num > 0: mu = phi.dot(self.x_arr) + mu if self.theta_num > 0: self.latent_arma_arr[:self.theta_num] = 0.0 self.latent_arma_arr[self.theta_num:] = self.y_arr - mu arma_recursion(self.latent_arma_arr, theta) else: self.latent_arma_arr = self.y_arr - mu # garch part new_info_arr = self.latent_arma_arr[self.theta_num:] square_arr = new_info_arr**2 arch_x_arr = stack_delay_arr(square_arr, self.alpha_num) if self.beta_num > 0: # estimate latent_arr self.latent_garch_arr[self. beta_num:] = alpha.dot(arch_x_arr) + const self.latent_garch_arr[:self.beta_num] = square_arr.mean() garch_recursion(self.latent_garch_arr, beta) else: self.latent_garch_arr = alpha.dot(arch_x_arr) + const loss = -logpdf(new_info_arr[self.alpha_num:], 0, self.latent_garch_arr[self.beta_num:]).mean() return loss
def optimize( self, _init_params: np.ndarray, _max_iter: int = 200, _disp: bool = False, ): cons = { 'type': 'eq', 'fun': lambda x: ilogit(x[self.split_index[2]:self.split_index[4]]).sum() - 1.0, } ret = minimize(self.func, _init_params, constraints=cons, method='SLSQP', options={ 'maxiter': _max_iter, 'disp': _disp }) return ret.x
def optimize( self, _init_params: np.ndarray, _max_iter: int = 50, _disp: bool = False, ): cons = { 'type': 'eq', 'fun': lambda x: ilogit(x[1:self.alpha_num + self.beta_num + 1]).sum() - 1.0, } ret = minimize(self.func, _init_params, constraints=cons, method='SLSQP', options={ 'maxiter': _max_iter, 'disp': _disp }) return ret.x
def getBetas(self) -> np.ndarray: return ilogit(self.logit_beta_arr)
def getAlphas(self) -> np.ndarray: return ilogit(self.logit_alpha_arr)