def nllf(p): a0, alpha_Base, alpha_Trt, alpha_BT, alpha_Age, alpha_V4, tau_b1, tau_b = p[:8] b = p[8:8+N*T].reshape(N, T) b1 = p[8+N*T:8+N*T+N] mu = exp( a0 + alpha_Base * (log_Base4[:, None] - log_Base4_bar) + alpha_BT * (BT[:, None] - BT_bar) + alpha_Age * (log_Age[:, None] - log_Age_bar) + alpha_V4 * (V4[None, :] - V4_bar) + b1[:, None] + b ) cost = 0 cost += np.sum(dpois_llf(y, mu)) cost += np.sum(dnorm_llf(b, 0.0, tau_b)) cost += np.sum(dnorm_llf(b1, 0.0, tau_b1)) cost += dnorm_llf(a0, 0, 1e-4) cost += dnorm_llf(alpha_Base, 0, 1e-4) cost += dnorm_llf(alpha_Trt, 0, 1e-4) cost += dnorm_llf(alpha_BT, 0, 1e-4) cost += dnorm_llf(alpha_Age, 0, 1e-4) cost += dnorm_llf(alpha_V4, 0, 1e-4) cost += dgamma_llf(tau_b1, 1e-3, 1e-3) cost += dgamma_llf(tau_b, 1e-3, 1e-3) return -cost
def nllf(p): beta_age, beta_sex = p[:2] beta_dis_ = np.empty(4) beta_dis_[0] = beta_dis[0] beta_dis_[1:] = p[2:5] alpha, r, tau = p[5:8] b = p[8:] mu = exp(alpha + beta_age*age + beta_sex*sex[:, None] + beta_dis_[disease[:, None]-1] # numpy is 0-origin + b[:, None]) cost = 0 cost += np.sum(dweib_C_llf(t, r, mu, lower=t_cen)) cost += np.sum(dnorm_llf(b, 0.0, tau)) cost += dnorm_llf(alpha, 0, 0.0001) cost += dnorm_llf(beta_age, 0, 0.0001) cost += dnorm_llf(beta_sex, 0, 0.0001) cost += np.sum(dnorm_llf(beta_dis_[1:], 0, 0.0001)) cost += dgamma_llf(tau, 1e-3, 1e-3) cost += dgamma_llf(r, 1, 1e-3) return -cost
def nllf(p): theta, tau_with, tau_btw = p[:3] mu = p[3:] cost = 0 cost += np.sum(dnorm_llf(mu, theta, tau_btw)) cost += np.sum(dnorm_llf(y, mu[:, None], tau_with)) cost += dgamma_llf(tau_with, 0.001, 0.001) cost += dgamma_llf(tau_btw, 0.001, 0.001) cost += dnorm_llf(theta, 0, 1e-6) return -cost
def nllf(p): beta, tau, dL0, b = p[0], p[1], p[2:2 + T], p[2 + T:] Y = step(obs_t[0:N, None] - t[None, 0:T] + eps) dN = Y * step(t[None, 1:T + 1] - obs_t[0:N, None] - eps) * fail[0:N, None] Idt = Y * exp(beta * Z[0:N, None] + b[pair[0:N, None] - 1]) * dL0[None, 0:T] cost = 0 cost += np.sum(dpois_llf(dN, Idt)) cost += np.sum(dgamma_llf(dL0, mu, c)) cost += np.sum(dnorm_llf(b, 0.0, tau)) cost += dgamma_llf(tau, 0.001, 0.001) cost += dnorm_llf(beta, 0.0, 0.000001) return -cost
def nllf(p): alpha, beta = p[0:N], p[N:2 * N] alpha_c, alpha_tau, beta_c, beta_tau, tau_c = p[2 * N:2 * N + 5] mu = alpha[:, None] + beta[:, None] * (x[None, :] - xbar) cost = 0. cost += np.sum(dnorm_llf(Y, mu, tau_c)) cost += np.sum(dnorm_llf(alpha, alpha_c, alpha_tau)) cost += np.sum(dnorm_llf(beta, beta_c, beta_tau)) cost += dgamma_llf(tau_c, 0.001, 0.001) cost += dnorm_llf(alpha_c, 0.0, 1e-6) cost += dgamma_llf(alpha_tau, 0.001, 0.001) cost += dnorm_llf(beta_c, 0.0, 1e-6) cost += dgamma_llf(beta_tau, 0.001, 0.001) return -cost
def nllf(p): mu, phi, pi, tau1, tau2 = p[:5] delta = p[5:] m = mu + sign_T * phi / 2 + sign_k[None, :] * pi / 2 + delta[:, None] cost = 0 cost += np.sum(dnorm_llf(Y, m, tau1)) cost += np.sum(dnorm_llf(delta, 0.0, tau2)) cost += dgamma_llf(tau1, 0.001, 0.001) cost += dgamma_llf(tau2, 0.001, 0.001) cost += dnorm_llf(mu, 0, 1e-6) cost += dnorm_llf(phi, 0, 1e-6) cost += dnorm_llf(pi, 0, 1e-6) return -cost
def nllf(p): tau, alpha, beta = p[0], p[1:Nage + 1], p[Nage + 1:Nage + 1 + K] alpha[0] = 0. # alpha 0 is not a fitting parameter mu = exp(log(pyr) + alpha[age_index] + beta[year_index]) betamean = np.empty(beta.shape) betamean[0] = 2 * beta[1] - beta[2] betamean[1] = (2 * beta[0] + 4 * beta[2] - beta[3]) / 5 betamean[2:K - 2] = (4 * beta[1:K - 3] + 4 * beta[3:K - 1] - beta[0:K - 4] - beta[4:K]) / 6 betamean[-2] = (2 * beta[-1] + 4 * beta[-3] - beta[-4]) / 5 betamean[-1] = 2 * beta[-2] - beta[-3] logRR = beta - beta[4] Nneighs = np.empty(beta.shape, 'i') Nneighs[0] = 1 Nneighs[1] = 5 Nneighs[2:K - 2] = 6 Nneighs[-2] = 5 Nneighs[-1] = 1 betaprec = Nneighs * tau tau_like = Nneighs * beta * (beta - betamean) d = 0.0001 + np.sum(tau_like) / 2 r = 0.0001 + K / 2 cost = 0 cost += np.sum(dpois_llf(cases, mu)) cost += np.sum(dnorm_llf(beta, betamean, betaprec)) cost += np.sum(dnorm_llf(alpha[1:], 0, 1e-6)) cost += dgamma_llf(tau, r, d) return -cost
def nllf(p): theta = p[:K*3].reshape(K, 3) mu, tau = p[K*3:K*3+3], p[K*3+3:K*3+3+3] tauC = p[K*3+3+3] phi0, phi1, phi2 = exp(theta[:, 0]), expm1(theta[:, 1]), -exp(theta[:, 2]) eta = phi0[:, None] / (1 + phi1[:, None] * exp(phi2[:, None] * x[None, :])) cost = 0 cost += np.sum(dnorm_llf(Y, eta, tauC)) cost += np.sum(dnorm_llf(theta, mu[None, :], tau[None, :])) cost += dgamma_llf(tauC, 0.001, 0.001) cost += np.sum(dnorm_llf(mu, 0, 1e-4)) cost += np.sum(dgamma_llf(tau, 1e-3, 1e-3)) return -cost
def nllf(p): if PRIOR > 0: sigma = np.array([sigma_prior[PRIOR-1](p[0])]) mu = np.array([p[1]]) else: #inv_tau_sqrd_1, tau_sqrd_2, tau_3, B0, D0, tau_sqrd_6 = p[:6] sigma = np.array([f(v) for f, v in zip(sigma_prior, p[:6])]) mu = p[6:12] tau = 1/np.sqrt(sigma) cost = 0 for _ in range(MARGINALIZATION_COUNT): theta = np.random.normal(mu[:, None], sigma[:, None], size=(Nprior, Nstudy)) pc = np.random.rand(Nprior, Nstudy) pt = ilogit(theta + logit(pc)) cost += np.sum(dnorm_llf(theta, mu[:, None], tau[:, None])) cost += np.sum(dbin_llf(rt[None, :], pt, nt[None, :])) cost += np.sum(dbin_llf(rc[None, :], pc, nc[None, :])) cost /= MARGINALIZATION_COUNT #cost += np.sum(dunif_llf(mu, -10, 10)) # Prior 1: Gamma(0.001, 0.001) on inv.tau.sqrd if PRIOR == 1: cost += dgamma_llf(p[0], 0.001, 0.001) elif PRIOR == 0: cost += dgamma_llf(p[0], 0.001, 0.001) # Prior 2: Uniform(0, 50) on tau.sqrd #cost += dunif_llf(tau_sqrd_2, 0, 50) # Prior 3: Uniform(0, 50) on tau #cost += dunif_llf(tau_3, 0, 50) # Prior 4: Uniform shrinkage on tau.sqrd #cost += dunif_llf(B0, 0, 1) # Prior 5: Dumouchel on tau #cost += dunif_llf(D0, 0, 1) # Prior 6: Half-Normal on tau.sqrd if PRIOR == 6: cost += dnorm_llf(p[0], 0, prior_6_p0) elif PRIOR == 0: cost += dnorm_llf(p[5], 0, prior_6_p0) return -cost
def nllf(p): alpha, beta, tau, gamma = p mu = alpha - beta * gamma**x cost = 0 cost += np.sum(dnorm_llf(Y, mu, tau)) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): beta, dL0 = p[0], p[1:] Y = step(obs_t[:N, None] - t[None, :T] + eps) dN = Y * step(t[None, 1:] - obs_t[:, None] - eps) * fail[:, None] Idt = Y*exp(beta * Z[:, None]) * dL0[None, :] cost = 0 cost += np.sum(dpois_llf(dN, Idt)) cost += np.sum(dgamma_llf(dL0, mu, c)) cost += dnorm_llf(beta, 0.0, 0.000001) return -cost
def nllf(p): b, tau, mu = p[:-2], p[-2], p[-1] p = ilogit(b) cost = 0 cost += np.sum(dnorm_llf(b, mu, tau)) cost += np.sum(dbin_llf(r, p, n)) cost += dnorm_llf(mu, 0.0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): alpha0, alpha1, alpha2, alpha12, tau = p[:5] b = p[5:] p = ilogit(alpha0 + alpha1 * x1 + alpha2 * x2 + alpha12 * x1 * x2 + b) cost = 0 cost += np.sum(dbin_llf(r, p, n)) cost += np.sum(dnorm_llf(b, 0., tau)) cost += dnorm_llf(alpha0, 0.0, 1e-6) cost += dnorm_llf(alpha1, 0.0, 1e-6) cost += dnorm_llf(alpha2, 0.0, 1e-6) cost += dnorm_llf(alpha12, 0.0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): alpha, beta, gamma, tau = p[:4] lambda_ = p[4:].reshape(doses, plates) mu = exp(alpha + beta * log(x[:, None] + 10) + gamma * x[:, None] + lambda_) cost = 0 cost += np.sum(dpois_llf(y, mu)) cost += np.sum(dnorm_llf(lambda_, 0.0, tau)) cost += dnorm_llf(alpha, 0.0, 1e-6) cost += dnorm_llf(beta, 0.0, 1e-6) cost += dnorm_llf(gamma, 0.0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(pars): d, delta_new, tau = pars[0:3] mu, delta = pars[3:3+Num], pars[3+Num:3+2*Num] pc = ilogit(mu) pt = ilogit(mu + delta) cost = 0 cost += np.sum(dbin_llf(rc, pc, nc)) cost += np.sum(dbin_llf(rt, pt, nt)) cost += np.sum(dnorm_llf(mu, 0, 1e-5)) cost += np.sum(dt_llf(delta, d, tau, 4)) cost += dnorm_llf(d, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) cost += dt_llf(delta_new, d, tau, 4) return -cost
def nllf(p): lambda1, P1, theta, tau = p[:4] T[1:-1] = p[4:4 + N - 2] lambda2 = lambda1 + theta P2 = 1 - P1 # P must sum to 1 for dirichlet P = np.array([P1, P2]) T_int = np.asarray(np.floor(T), 'i') mu = np.array([lambda1, lambda2])[T_int - 1] cost = 0 cost += np.sum(dnorm_llf(y, mu, tau)) cost += np.sum(dcat_llf(T_int, P)) cost += ddirich_llf(P, alpha) cost += dunif_llf(theta, 0, 1000) cost += dnorm_llf(lambda1, 0.001, 0.001) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): theta = p[:K * 3].reshape(K, 3) mu, tau_L = p[K * 3:K * 3 + 3], p[K * 3 + 3:K * 3 + 3 + n_tau_L] tauC = p[K * 3 + 3 + n_tau_L] tau = expand_pd(tau_L, 3) phi0, phi1, phi2 = exp(theta[:, 0]), expm1(theta[:, 1]), -exp(theta[:, 2]) eta = phi0[:, None] / (1 + phi1[:, None] * exp(phi2[:, None] * x[None, :])) cost = 0 cost += np.sum(dnorm_llf(Y, eta, tauC)) cost += sum(dmnorm_llf(theta_k, mu, tau) for theta_k in theta) cost += dgamma_llf(tauC, 0.001, 0.001) cost += dmnorm_llf(mu, mean, prec) cost += dwish_llf(tau, R, 3) return -cost
def nllf(p): beta, pi, kappa, tau = p[:4] a = p[4:7] b = p[7:] ## quick rejection of unordered a points if not(-1000 <= a[0] and a[0] <= a[1] and a[1] <= a[2] and a[2] <= 1000): return inf if tau <= 0: return inf sigma = 1 / sqrt(tau) mu = beta * treat/2 + pi * period/2 + kappa * carry prob = np.empty((N, T, Ncut+1)) ## Marginalize over random effects (b[N] ~ N(0, tau)) cost = 0 for _ in range(MARGINALIZATION_COUNT): b = np.random.normal(0.0, sigma, size=N) #cost += np.sum(dnorm_llf(b, 0, tau)) Q = ilogit(-(a[None, None, :] + mu[group, :, None] + b[:, None, None])) prob[:, :, 0] = 1 - Q[:, :, 0] for j in range(1, Ncut): prob[:, :, j] = Q[:, :, j-1] - Q[:, :, j] prob[:, :, -1] = Q[:, :, -1] cost += np.sum(dcat_llf(response, prob)) cost /= MARGINALIZATION_COUNT cost += dnorm_llf(beta, 0, 1e-6) cost += dnorm_llf(pi, 0, 1e-6) cost += dnorm_llf(kappa, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) ## ordered cut points for underlying continuous latent variable #cost += dflat_llf(a[0]) if -1000 <= a[0] <= a[1] else -inf #cost += dflat_llf(a[1]) if a[0] <= a[1] <= a[2] else -inf #cost += dflat_llf(a[2]) if a[1] <= a[2] <= 1000 else -inf ## PAK: model looks over-parameterized: anchor a[1] #cost += dnorm_llf(a[0], 0.707, 1/0.1364**2) return -cost
def nllf(p): alpha, beta1, beta2, tau = p[:4] mu, b = p[4:4 + K], p[4 + K:] # median values from fit #alpha, beta1, beta2, tau = 0.5793, -0.0457, 0.007004, 1/0.08059**2 logPsi = alpha + beta1 * year + beta2 * (year * year - 22) + b p0 = ilogit(mu) p1 = ilogit(mu + logPsi) cost = 0 cost += np.sum(dbin_llf(r0, p0, n0)) cost += np.sum(dbin_llf(r1, p1, n1)) cost += np.sum(dnorm_llf(b, 0, tau)) cost += np.sum(dnorm_llf(mu, 0, 1e-6)) cost += dnorm_llf(alpha, 0, 1e-6) cost += dnorm_llf(beta1, 0, 1e-6) cost += dnorm_llf(beta2, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost