beta0_logit, beta_logit = logistic.intercept_, logistic.coef_[0] # conditional probability predicted from last observation p_beta_logit = logistic.predict_proba(z_cubic[[-1], :])[0, 1] # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_lfm_executive_summary-implementation-step03): Perform generalized probabilistic inference annualized_vol = np.sqrt(s2_xz[1, 1]) * np.sqrt(252) p_base = np.ones(z_t.shape[0]) / z_t.shape[0] mu_base = z_t @ p_base z_ineq = -np.atleast_2d(z_t**2) mu_view_ineq = -np.atleast_1d(sig_view**2 + mu_base**2) z_eq = np.atleast_2d(z_t) mu_view_eq = np.atleast_1d(mu_base) p_upd = min_rel_entropy_sp(p_base, z_ineq, mu_view_ineq, z_eq, mu_view_eq, normalize=False) # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_lfm_executive_summary-implementation-step04): Fit linear state-space model h_t = fit_state_space(z_t, k_=1, p=p_upd)[0] # ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_lfm_executive_summary-implementation-step05): Fit logistic model with Lasso penalty C = 1 / lambda_lasso logistic_lasso = LogisticRegression(penalty='l1', C=C, class_weight='balanced', solver='liblinear', random_state=1,
n_cum_trans, tau_hl_credit) # constraints on the credit transition matrix via minimum relative entropy # probability constraint a = {} a[0] = np.diagflat(np.ones((1, c_)), 1) -\ np.diagflat(np.ones((1, c_+1)), 0) a[0] = a[0][:-1] b = np.zeros((c_)) # monotonicity constraint (initialize) a_eq = np.ones((1, c_ + 1)) b_eq = np.array([1]) # minimum relative entropy p_credit = np.zeros((c_, c_ + 1)) for c in range(c_): p_credit[c, :] = min_rel_entropy_sp(p_credit_prior[c, :], a[c], b, a_eq, b_eq, False) # update monotonicity constraint a_temp = a.get(c).copy() a_temp[c, :] = -a_temp[c, :] a[c + 1] = a_temp.copy() # default constraint default_constraint = np.append(np.zeros(c_), 1).reshape(1, -1) p_credit = np.r_[p_credit, default_constraint] # - # ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step02-implementation-step06): Save databases # + dates = dates[1:]
return np.array([sk * sig**3 + 3 * mu * sig**2 + mu**3]) def mu_view_eq(mu, sig): return np.array([mu, mu**2 + sig**2]) z_ineq_base = -v(x)[:1] mu_ineq_base = -mu_view_ineq(mu_x_base, sig_x_base, sk_x_base) z_eq_base = v(x)[1:] mu_view_eq_base = mu_view_eq(mu_x_base, sig_x_base) p_base = min_rel_entropy_sp(p_base_unif, z_ineq_base, mu_ineq_base, z_eq_base, mu_view_eq_base, normalize=False) # - # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_entropy_view-implementation-step03): Compute updated probabilities # + # Generate parameters specifying constraints for updated distribution z_ineq = v(x)[:1] mu_ineq = mu_view_ineq(-mu_x_base, sig_x_base, -sk_x_base) z_eq = v(x)[1:] mu_view_eq = mu_view_eq(-mu_x_base, sig_x_base)
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_fit_discrete_markov_chain-implementation-step02): Compute final transition matrix # + # probability constraint a_eq = np.ones((1, c_+1)) b_eq = np.array([1]) # monotonicity constraint (initialize) a_ineq = {} a_ineq[0] = np.diagflat(np.ones((1, c_)), 1) -\ np.diagflat(np.ones((1, c_+1)), 0) a_ineq[0] = a_ineq[0][:-1] b_ineq = np.zeros((c_)) # relative entropy minimization p = np.zeros((c_, c_+1)) for c in range(c_): p[c, :] = min_rel_entropy_sp(p_[c, :], a_ineq[c], b_ineq, a_eq, b_eq, False) # update monotonicity constraint a_temp = a_ineq.get(c).copy() a_temp[c, :] = -a_temp[c, :] a_ineq[c+1] = a_temp.copy() # default constraint p = np.r_[p, np.array([np.r_[np.zeros(7), 1]])] # - # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_fit_discrete_markov_chain-implementation-step03): Compute cdf # + f = np.cumsum(p[r-1, :]) # -
z_i = z_eq[i] covariance[i] = np.cov(z_i) if np.linalg.matrix_rank(covariance[i]) > 1: effrank[i] = eff_rank(np.corrcoef(z_i)) # - # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_views_cond_exp-implementation-step03): Compute updated probabilities # + i_san_check = np.where(effrank > 1)[0] p_upd_i = np.zeros((j_, j_)) entropy = np.zeros(j_) for i in range(j_): if i in i_san_check: p_upd_i[i] = min_rel_entropy_sp(p_base, None, None, z_eq[i], mu_view_eq_c, normalize=False) entropy[i] = p_upd_i[i] @ np.log(p_upd_i[i] / p_base) p_upd_san = p_upd_i[i_san_check] p_upd_ihat = p_upd_san[np.argmin(entropy[i_san_check])] p_upd = p_upd_ihat[np.argsort(np.argsort(v(x)))] # - # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_views_cond_exp-implementation-step04): Compute additive/multiplicative confidence-weighted probabilities p_c_add = c * p_upd + (1 - c) * p_base p_c_mul = p_upd ** c * p_base ** (1 - c) /\ np.sum(p_upd ** c * p_base ** (1 - c))