def setup_class(cls): endog_bin = (endog > endog.mean()).astype(int) cls.cov_type = 'cluster' mod1 = GLM(endog_bin, exog, family=families.Gaussian(link=links.CDFLink())) cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group)) mod1 = smd.Probit(endog_bin, exog) cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
def setup_class(cls): endog_bin = (endog > endog.mean()).astype(int) cls.cov_type = 'cluster' mod1 = GLM(endog_bin, exog, family=families.Binomial(link=links.probit())) cls.res1 = mod1.fit(method='newton', cov_type='cluster', cov_kwds=dict(groups=group)) mod1 = smd.Probit(endog_bin, exog) cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group)) cls.rtol = 1e-6
def compute_marg_likelihood_and_NSE(X, y, iters, init, hypers): ''' Compute the marginal likelihood from the Gibbs Sampler output according to Chib (1995) X (ndarray): exogeneous variables y (array-like): endogeneous variables iters (int): length of the MCMC init (dict): initialisation parameters hypers (array-like): hyper-parameters returns (float): the marginal likelihood/normalizing constant ''' # Initialisation a, A = init['a'], init['A'] beta, beta_z, B = GibbsSampler(X, y, iters, init, hypers) beta_star = np.array(beta).mean(axis=0) beta_z = np.array(beta_z) ## Marginal likelihood computation P7, right column # First term: log_like = sm.Probit(endog=y, exog=X).loglike(params=beta_star) # Second term prior = multivariate_normal.logpdf(x=beta_star, mean=a, cov=A) # Third term conditional_densities = np.array([ multivariate_normal.pdf(x=beta_star, mean=beta_z[i], cov=B) for i in range(iters) ]) posterior = np.log(conditional_densities.mean()) # pdf renvoie un gros nombre...: Compréhension de liste peut être amélioré # Marginal likelihood log_marg_likelihood = log_like + prior - posterior #Numerical Standard Error NSE = np.sqrt( compute_var_h(conditional_densities, q=10) / (conditional_densities.mean()**2)) return log_marg_likelihood, NSE