def EIC(mean, std, best): # Constrained expected improvement delta = -(mean[0, :] - best) deltap = -(mean[0, :] - best) deltap = np.clip(deltap, a_min=0.) Z = delta / std[0, :] EI = deltap - np.abs(deltap) * norm.cdf(-Z) + std * norm.pdf(Z) constraints = np.prod(norm.cdf(mean[1:, :] / std[1:, :]), axis=0) return -EI[0] * constraints[0]
def EI(mean, std, best): # from https://people.orie.cornell.edu/pfrazier/Presentations/2011.11.INFORMS.Tutorial.pdf delta = -(mean - best) deltap = -(mean - best) deltap = np.clip(deltap, a_min=0.) Z = delta / std EI = deltap - np.abs(deltap) * norm.cdf(-Z) + std * norm.pdf(Z) return -EI[0]
def actor_loss(actor_params, fixed_critic_params, env_dynamics, batch): state, _, _, _, alpha = batch inputs = np.concatenate((state, alpha), 1) action = actor_forward(actor_params, inputs) inputs = np.concatenate((state, action, alpha), 1) q_s, upsilon_s = critic_forward(fixed_critic_params, inputs) cvar = q_s - (norm.pdf(alpha) / norm.cdf(alpha)) * np.sqrt(upsilon_s) return -cvar.mean()
def normal_cdf(self, x, mu=0, sigma=1): """ The cumulative distribution function for the Normal distribution Example: >>> import pyhf >>> pyhf.set_backend("jax") >>> pyhf.tensorlib.normal_cdf(0.8) DeviceArray(0.7881446, dtype=float64) >>> values = pyhf.tensorlib.astensor([0.8, 2.0]) >>> pyhf.tensorlib.normal_cdf(values) DeviceArray([0.7881446 , 0.97724987], dtype=float64) Args: x (:obj:`tensor` or :obj:`float`): The observed value of the random variable to evaluate the CDF for mu (:obj:`tensor` or :obj:`float`): The mean of the Normal distribution sigma (:obj:`tensor` or :obj:`float`): The standard deviation of the Normal distribution Returns: JAX ndarray: The CDF """ return norm.cdf(x, loc=mu, scale=sigma)
def cdf(self, value): return norm.cdf(value, loc=self._loc, scale=self._scale)
def LW_LCBC(mean, std, weights, kappa=2.0, threshold=3.0): lcb = mean[0, :] - threshold - kappa * std[0, :] * weights constraints = np.prod(norm.cdf(mean[1:, :] / std[1:, :]), axis=0) return lcb[0] * constraints[0]
def logredshiftprior(x, a, b): lognorm = np.log(norm.cdf(zmax, loc=a, scale=b) - norm.cdf(0, loc=a, scale=b)) return norm.logpdf(x, loc=a, scale=b) - lognorm
def cdf(idx): x = std_gaussian_bins(prior_prec)[idx] return _nearest_uint(norm.cdf(x, mean, stdd) * (1 << post_prec))
def constrain(self, unconstrained_x: jnp.ndarray): return self.prior_mins + norm.cdf(unconstrained_x) * (self.prior_maxs - self.prior_mins)
def predict(self, X, w): p = jnorm.cdf(jnp.dot(X, w)) y = np.array((p > 0.5), dtype='int32') return y, p
w_pred = compute_w_gmm(X_star, **kwargs) # Compute the upper and lower bounds of the posterior distributions lower = mean - 2.0 * std upper = mean + 2.0 * std print(mean.shape, std.shape, lower.shape, upper.shape) # Evaluate the acquisition function acq_fn1 = lambda x: gp_model.constrained_acquisition(x, **kwargs) LW_LCBCacq = vmap(acq_fn1)(X_star) # Compute the ratio and weights derived by the constraints and convert everything into numpy for plotting ratio1 = mean[1, :] / std[1, :] weight1 = norm.cdf(mean[1, :] / std[1, :]) LW_LCBCacq = onp.array(LW_LCBCacq) mean = onp.array(mean) std = onp.array(std) ratio1 = onp.array(ratio1) weight1 = onp.array(weight1) y_f_pred = onp.array(mean[0, :]) y1_c_pred = onp.array(mean[1, :]) y_f_std = onp.array(std[0, :])