def nllf(pars): d, delta_new, tau = pars[0:3] mu, delta = pars[3:3+Num], pars[3+Num:3+2*Num] pc = ilogit(mu) pt = ilogit(mu + delta) cost = 0 cost += np.sum(dbin_llf(rc, pc, nc)) cost += np.sum(dbin_llf(rt, pt, nt)) cost += np.sum(dnorm_llf(mu, 0, 1e-5)) cost += np.sum(dt_llf(delta, d, tau, 4)) cost += dnorm_llf(d, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) cost += dt_llf(delta_new, d, tau, 4) return -cost
def nllf(p): delta, alpha, state = p[0], p[1], p[2:] beta = exp(alpha) theta = ilogit(delta) P = np.array([ilogit(alpha), 0]) state = np.asarray(np.floor(state), 'i') #state1 = state + 1 # zero-indexing in numpy #prop = P[state] # unused cost = 0 cost += np.sum(dbin_llf(y, P[state], t)) cost += np.sum(dbern_llf(state, theta)) cost += dnorm_llf(alpha, 0, 1e-4) cost += dnorm_llf(delta, 0, 1e-4) return -cost
def nllf(p): alpha, beta, theta = p[:T], p[T], p[T + 1:] p = ilogit(beta * theta[:, None] - alpha[None, :]) cost = 0 cost += np.sum(dbern_llf(r, p)) cost += np.sum(dnorm_llf(theta, 0.0, 1.0)) cost += np.sum(dnorm_llf(alpha, 0, 0.0001)) cost += dflat_llf(beta) return -cost
def nllf(p): b, tau, mu = p[:-2], p[-2], p[-1] p = ilogit(b) cost = 0 cost += np.sum(dnorm_llf(b, mu, tau)) cost += np.sum(dbin_llf(r, p, n)) cost += dnorm_llf(mu, 0.0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): alpha, beta1, beta2, tau = p[:4] mu, b = p[4:4 + K], p[4 + K:] # median values from fit #alpha, beta1, beta2, tau = 0.5793, -0.0457, 0.007004, 1/0.08059**2 logPsi = alpha + beta1 * year + beta2 * (year * year - 22) + b p0 = ilogit(mu) p1 = ilogit(mu + logPsi) cost = 0 cost += np.sum(dbin_llf(r0, p0, n0)) cost += np.sum(dbin_llf(r1, p1, n1)) cost += np.sum(dnorm_llf(b, 0, tau)) cost += np.sum(dnorm_llf(mu, 0, 1e-6)) cost += dnorm_llf(alpha, 0, 1e-6) cost += dnorm_llf(beta1, 0, 1e-6) cost += dnorm_llf(beta2, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): beta0C, beta, phi, q, x[x_index] = p[0], p[1], p[2:6].reshape(2, 2), p[6], p[7:] p = ilogit(beta0C + beta*np.floor(x)) x_int, d_int = [np.asarray(np.floor(v), 'i') for v in (x, d)] cost = 0 cost += np.sum(dbern_llf(d, q)) cost += np.sum(dbern_llf(d, p)) cost += np.sum(dbern_llf(w, phi[x_int, d_int])) cost += dnorm_llf(beta0C, 0, 0.00001) cost += dnorm_llf(beta, 0, 0.00001) return -cost
def nllf(p): alpha0, alpha1, alpha2, alpha12, tau = p[:5] b = p[5:] p = ilogit(alpha0 + alpha1 * x1 + alpha2 * x2 + alpha12 * x1 * x2 + b) cost = 0 cost += np.sum(dbin_llf(r, p, n)) cost += np.sum(dnorm_llf(b, 0., tau)) cost += dnorm_llf(alpha0, 0.0, 1e-6) cost += dnorm_llf(alpha1, 0.0, 1e-6) cost += dnorm_llf(alpha2, 0.0, 1e-6) cost += dnorm_llf(alpha12, 0.0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) return -cost
def nllf(p): if PRIOR > 0: sigma = np.array([sigma_prior[PRIOR-1](p[0])]) mu = np.array([p[1]]) else: #inv_tau_sqrd_1, tau_sqrd_2, tau_3, B0, D0, tau_sqrd_6 = p[:6] sigma = np.array([f(v) for f, v in zip(sigma_prior, p[:6])]) mu = p[6:12] tau = 1/np.sqrt(sigma) cost = 0 for _ in range(MARGINALIZATION_COUNT): theta = np.random.normal(mu[:, None], sigma[:, None], size=(Nprior, Nstudy)) pc = np.random.rand(Nprior, Nstudy) pt = ilogit(theta + logit(pc)) cost += np.sum(dnorm_llf(theta, mu[:, None], tau[:, None])) cost += np.sum(dbin_llf(rt[None, :], pt, nt[None, :])) cost += np.sum(dbin_llf(rc[None, :], pc, nc[None, :])) cost /= MARGINALIZATION_COUNT #cost += np.sum(dunif_llf(mu, -10, 10)) # Prior 1: Gamma(0.001, 0.001) on inv.tau.sqrd if PRIOR == 1: cost += dgamma_llf(p[0], 0.001, 0.001) elif PRIOR == 0: cost += dgamma_llf(p[0], 0.001, 0.001) # Prior 2: Uniform(0, 50) on tau.sqrd #cost += dunif_llf(tau_sqrd_2, 0, 50) # Prior 3: Uniform(0, 50) on tau #cost += dunif_llf(tau_3, 0, 50) # Prior 4: Uniform shrinkage on tau.sqrd #cost += dunif_llf(B0, 0, 1) # Prior 5: Dumouchel on tau #cost += dunif_llf(D0, 0, 1) # Prior 6: Half-Normal on tau.sqrd if PRIOR == 6: cost += dnorm_llf(p[0], 0, prior_6_p0) elif PRIOR == 0: cost += dnorm_llf(p[5], 0, prior_6_p0) return -cost
def nllf(p): beta, pi, kappa, tau = p[:4] a = p[4:7] b = p[7:] ## quick rejection of unordered a points if not(-1000 <= a[0] and a[0] <= a[1] and a[1] <= a[2] and a[2] <= 1000): return inf if tau <= 0: return inf sigma = 1 / sqrt(tau) mu = beta * treat/2 + pi * period/2 + kappa * carry prob = np.empty((N, T, Ncut+1)) ## Marginalize over random effects (b[N] ~ N(0, tau)) cost = 0 for _ in range(MARGINALIZATION_COUNT): b = np.random.normal(0.0, sigma, size=N) #cost += np.sum(dnorm_llf(b, 0, tau)) Q = ilogit(-(a[None, None, :] + mu[group, :, None] + b[:, None, None])) prob[:, :, 0] = 1 - Q[:, :, 0] for j in range(1, Ncut): prob[:, :, j] = Q[:, :, j-1] - Q[:, :, j] prob[:, :, -1] = Q[:, :, -1] cost += np.sum(dcat_llf(response, prob)) cost /= MARGINALIZATION_COUNT cost += dnorm_llf(beta, 0, 1e-6) cost += dnorm_llf(pi, 0, 1e-6) cost += dnorm_llf(kappa, 0, 1e-6) cost += dgamma_llf(tau, 0.001, 0.001) ## ordered cut points for underlying continuous latent variable #cost += dflat_llf(a[0]) if -1000 <= a[0] <= a[1] else -inf #cost += dflat_llf(a[1]) if a[0] <= a[1] <= a[2] else -inf #cost += dflat_llf(a[2]) if a[1] <= a[2] <= 1000 else -inf ## PAK: model looks over-parameterized: anchor a[1] #cost += dnorm_llf(a[0], 0.707, 1/0.1364**2) return -cost
def bones(pars): theta = pars[:13] grade[missing_grade_index] = np.floor(pars[13:] + 0.5) p = np.empty((nChild, nInd, 5)) # max(ncat) is 5 Q = np.empty((nChild, nInd, 4)) Q = ilogit(delta[None, :, None] * (theta[:, None, None] - gamma[None, :, :])) p[:, :, 1:-1] = Q[:, :, :-1] - Q[:, :, 1:] p[:, :, 0] = 1 - Q[:, :, 0] p[:, :, ncat - 1] = Q[:, :, ncat - 2] cost = 0 cost += np.sum(dnorm_llf(theta, 0.0, 0.001)) for i in range(nChild): for j in range(nInd): if 1 <= grade[i, j] <= ncat[j]: cost += dcat_llf(grade[i, j], p[i, j, :ncat[j]]) else: cost = -inf return -cost
def post(p): delta, alpha, state = p[0], p[1], p[2:] beta = exp(alpha) theta = ilogit(delta) return beta, theta
def post(p): b, tau, mu = p[:-2], p[-2], p[-1] p = ilogit(b) pop_mean = ilogit(mu) sigma = 1.0 / sqrt(tau) return np.vstack((p, pop_mean, sigma))