Exemple #1
0
def statistics(net_params: List[jnp.ndarray], deq_params: List[jnp.ndarray],
               rng: random.PRNGKey):
    # Split pseudo-random number key.
    rng, rng_sample, rng_xobs, rng_kl = random.split(rng, 4)
    # Compute comparison statistics.
    _, xsph, _ = ode_forward(rng_sample, net_params, 10000, 4)
    xobs = rejection_sampling(rng_xobs, len(xsph), 4, embedded_sphere_density)
    mean_mse = jnp.square(jnp.linalg.norm(xsph.mean(0) - xobs.mean(0)))
    cov_mse = jnp.square(jnp.linalg.norm(jnp.cov(xsph.T) - jnp.cov(xobs.T)))
    approx = importance_density(rng_kl, net_params, deq_params, 10000, xsph)
    log_approx = jnp.log(approx)
    target = embedded_sphere_density(xsph)
    w = target / approx
    Z = jnp.nanmean(w)
    log_approx = jnp.log(approx)
    log_target = jnp.log(target)
    klqp = jnp.nanmean(log_approx - log_target) + jnp.log(Z)
    ess = jnp.square(jnp.nansum(w)) / jnp.nansum(jnp.square(w))
    ress = 100 * ess / len(w)
    del w, Z, log_approx, approx, log_target, target, xsph
    approx = importance_density(rng_kl, net_params, deq_params, 10000, xobs)
    log_approx = jnp.log(approx)
    target = embedded_sphere_density(xobs)
    w = approx / target
    Z = jnp.nanmean(w)
    log_target = jnp.log(target)
    klpq = jnp.nanmean(log_target - log_approx) + jnp.log(Z)
    del w, Z, log_approx, approx, log_target, target
    method = 'deqode ({})'.format('ELBO' if args.elbo_loss else 'KL')
    print(
        '{} - Mean MSE: {:.5f} - Covariance MSE: {:.5f} - KL$(q\Vert p)$ = {:.5f} - KL$(p\Vert q)$ = {:.5f} - Rel. ESS: {:.2f}%'
        .format(method, mean_mse, cov_mse, klqp, klpq, ress))
Exemple #2
0
def main():
    # Set pseudo-random number generator keys.
    rng = random.PRNGKey(args.seed)
    rng, rng_net = random.split(rng, 2)
    rng, rng_sample, rng_xobs, rng_basis = random.split(rng, 4)
    rng, rng_fwd, rng_rev = random.split(rng, 3)
    rng, rng_kl = random.split(rng, 2)

    # Initialize the parameters of the ambient vector field network.
    _, params = net_init(rng_net, (-1, 4))
    opt_state = opt_init(params)

    for it in range(args.num_steps):
        opt_state, kl = step(opt_state, it, args.num_samples)
        print('iter.: {} - kl: {:.4f}'.format(it, kl))

    params = get_params(opt_state)
    count = lambda x: jnp.prod(jnp.array(x.shape))
    num_params = jnp.array(
        tree_util.tree_map(count,
                           tree_util.tree_flatten(params)[0])).sum()
    print('number of parameters: {}'.format(num_params))

    # Compute comparison statistics.
    xsph, log_approx = manifold_ode_log_prob(params, rng_sample, 10000)
    xobs = rejection_sampling(rng_xobs, len(xsph), 3, embedded_sphere_density)
    mean_mse = jnp.square(jnp.linalg.norm(xsph.mean(0) - xobs.mean(0)))
    cov_mse = jnp.square(jnp.linalg.norm(jnp.cov(xsph.T) - jnp.cov(xobs.T)))
    approx = jnp.exp(log_approx)
    target = embedded_sphere_density(xsph)
    w = target / approx
    Z = jnp.nanmean(w)
    log_approx = jnp.log(approx)
    log_target = jnp.log(target)
    klqp = jnp.nanmean(log_approx - log_target) + jnp.log(Z)
    ess = jnp.square(jnp.nansum(w)) / jnp.nansum(jnp.square(w))
    ress = 100 * ess / len(w)
    del w, Z, log_approx, approx, log_target, target
    log_approx = manifold_reverse_ode_log_prob(params, rng_kl, xobs)
    approx = jnp.exp(log_approx)
    target = embedded_sphere_density(xobs)
    w = approx / target
    Z = jnp.nanmean(w)
    log_target = jnp.log(target)
    klpq = jnp.nanmean(log_target - log_approx) + jnp.log(Z)
    del w, Z, log_approx, approx, log_target, target
    print(
        'manode - Mean MSE: {:.5f} - Covariance MSE: {:.5f} - KL$(q\Vert p)$ = {:.5f} - KL$(p\Vert q)$ = {:.5f} - Rel. ESS: {:.2f}%'
        .format(mean_mse, cov_mse, klqp, klpq, ress))
Exemple #3
0
    def _predict_transition_probabilities_jax(
        X: np.ndarray,
        W: np.ndarray,
        softmax_scale: float = 1,
    ):
        # pearson correlation
        W -= W.mean(axis=1)[:, None]
        X -= X.mean()

        W_norm = jnp.linalg.norm(W, axis=1)
        X_norm = jnp.linalg.norm(X)
        denom = X_norm * W_norm

        mask = jnp.isclose(denom, 0)
        denom = jnp.where(jnp.isclose(denom, 0), 1, denom)  # essential

        x = W.dot(X) / denom

        numerator = x * softmax_scale
        numerator = jnp.exp(numerator - jnp.nanmax(numerator))
        numerator = jnp.where(mask, 0, numerator)  # essential

        softmax = numerator / jnp.nansum(numerator)

        return softmax
  def _mean_sen_spe_utility(particle_weights, particles):
    """Expected mean sensitivity/specificity of the marginal predictor.

    This function returns the mean sensitivity/specificity utility of a
    distribution encoded as a weighted sum of Dirac measures at particles. The
    mean sensitivity/specificity utility is the expected mean
    sensitivity/specificity of the marginal distribution thresholded at
    'threshold' as predictor.

    Args:
     particle_weights: weights of particles
     particles: particles summarizing belief about infection status

    Returns:
     The mean sensitivity/specificity utility of the distribution.
    """
    num_patients = particles.shape[1]
    marginal = np.sum(particle_weights[:, np.newaxis] * particles, axis=0)
    y_pred = marginal > threshold
    total_pos = np.sum(particles, axis=1)
    total_neg = num_patients - total_pos
    sensitivities = np.sum(y_pred * particles, axis=1) / total_pos
    specifities = np.sum((1-y_pred) * (1-particles), axis=1) / total_neg
    sum_sen_spe = sensitivities + specifities
    return np.nansum(sum_sen_spe * particle_weights) / 2
Exemple #5
0
  def test_autograd_sinkhorn(self, lse_mode):
    """Test gradient w.r.t. probability weights."""
    d = 3
    n, m = 11, 13
    eps = 1e-3  # perturbation magnitude
    keys = jax.random.split(self.rng, 5)
    x = jax.random.uniform(keys[0], (n, d))
    y = jax.random.uniform(keys[1], (m, d))
    a = jax.random.uniform(keys[2], (n,)) + eps
    b = jax.random.uniform(keys[3], (m,)) + eps
    # Adding zero weights to test proper handling
    a = jax.ops.index_update(a, 0, 0)
    b = jax.ops.index_update(b, 3, 0)
    a = a / jnp.sum(a)
    b = b / jnp.sum(b)
    geom = pointcloud.PointCloud(x, y, epsilon=0.1)

    def reg_ot(a, b):
      return sinkhorn.sinkhorn(geom, a=a, b=b, lse_mode=lse_mode).reg_ot_cost

    reg_ot_and_grad = jax.jit(jax.value_and_grad(reg_ot))
    _, grad_reg_ot = reg_ot_and_grad(a, b)
    delta = jax.random.uniform(keys[4], (n,))
    delta = delta * (a > 0)  # ensures only perturbing non-zero coords.
    delta = delta - jnp.sum(delta) / jnp.sum(a > 0)  # center perturbation
    delta = delta * (a > 0)  # ensures only perturbing non-zero coords.
    reg_ot_delta_plus = reg_ot(a + eps * delta, b)
    reg_ot_delta_minus = reg_ot(a - eps * delta, b)
    delta_dot_grad = jnp.nansum(delta * grad_reg_ot)
    self.assertIsNot(jnp.any(jnp.isnan(delta_dot_grad)), True)
    self.assertAllClose(delta_dot_grad,
                        (reg_ot_delta_plus - reg_ot_delta_minus) / (2 * eps),
                        rtol=1e-03, atol=1e-02)
    def _softmax_masked_jax(x: np.ndarray, mask: np.ndarray,
                            softmax_scale) -> np.ndarray:
        numerator = x * softmax_scale
        numerator = jnp.exp(numerator - jnp.nanmax(numerator))
        numerator = jnp.where(mask, 0, numerator)  # essential

        return numerator / jnp.nansum(numerator)
  def auc_utility(particle_weights, particles):
    """Expected AUC of the marginal predictor as utility.

    This function returns the AUC utility of a distribution encoded as a
    weighted sum of Dirac measures at particles. The AUC utility is the expected
    AUC of the marginal distribution as predictor.

    Args:
     particle_weights: weights of particles
     particles: particles summarizing belief about infection status

    Returns:
     The AUC utility of the distribution.
    """
    marginal = np.sum(particle_weights[:, np.newaxis] * particles, axis=0)
    sorted_particles = particles[:, np.argsort(marginal)]
    false_count = np.cumsum(1 - sorted_particles, axis=1)
    area = np.sum(sorted_particles * false_count, axis=1)
    aucs = area / (
        false_count[:, -1] * (sorted_particles.shape[1] - false_count[:, -1]))
    return np.nansum(aucs * particle_weights)
Exemple #8
0
 def entropy_fn(preferences: ArrayLike, epsilon=epsilon):
   probs = _argmax_with_random_tie_breaking(preferences)
   probs = _mix_with_uniform(probs, epsilon)
   return -jnp.nansum(probs * jnp.log(probs), axis=-1)
Exemple #9
0
 def entropy_fn(preferences: ArrayLike):
   probs = _argmax_with_random_tie_breaking(preferences)
   return -jnp.nansum(probs * jnp.log(probs), axis=-1)
xamb = forward(bij_params, bij_fns, xamb)
xtor = jnp.mod(xamb, 2.0 * jnp.pi)
lp = induced_torus_log_density(bij_params, bij_fns, xtor)
xobs = rejection_sampling(rng_xobs, len(xtor), torus_density, args.beta)

# Compute comparison statistics.
mean_mse = jnp.square(jnp.linalg.norm(xtor.mean(0) - xobs.mean(0)))
cov_mse = jnp.square(jnp.linalg.norm(jnp.cov(xtor.T) - jnp.cov(xobs.T)))
approx = jnp.exp(lp)
target = torus_density(xtor)
w = target / approx
Z = jnp.nanmean(w)
log_approx = jnp.log(approx)
log_target = jnp.log(target)
klqp = jnp.nanmean(log_approx - log_target) + jnp.log(Z)
ess = jnp.square(jnp.nansum(w)) / jnp.nansum(jnp.square(w))
ress = 100 * ess / len(w)
del w, Z, log_approx, log_target
log_approx = induced_torus_log_density(bij_params, bij_fns, xobs)
approx = jnp.exp(log_approx)
target = torus_density(xobs)
log_target = jnp.log(target)
w = approx / target
Z = jnp.mean(w)
klpq = jnp.mean(log_target - log_approx) + jnp.log(Z)
del w, Z, log_approx, log_target
print(
    'direct - {} - seed: {} - Mean MSE: {:.5f} - Covariance MSE: {:.5f} - KL$(q\Vert p)$ = {:.5f} - KL$(p\Vert q)$ = {:.5f} - Rel. ESS: {:.2f}%'
    .format(args.density, args.seed, mean_mse, cov_mse, klqp, klpq, ress))

fig, axes = plt.subplots(1, 2, figsize=(10, 4))
Exemple #11
0
def nansum(a, axis=None, dtype=None, keepdims=None):
  if isinstance(a, JaxArray): a = a.value
  r = jnp.nansum(a=a, axis=axis, dtype=dtype, keepdims=keepdims)
  return r if axis is None else JaxArray(r)
Exemple #12
0
 def entropy_fn(logits: Array):
     probs = jax.nn.softmax(logits / temperature)
     probs = _mix_with_uniform(probs, epsilon)
     return -jnp.nansum(probs * jnp.log(probs), axis=-1)
def get_reparametrized_errors(agg_res):
    if isinstance(agg_res.model, dict):
        model = agg_res.params
    else:
        model = agg_res.model

    disk = model['disk']
    if 'bulge' in model:
        bulge = model['bulge']
    else:
        bulge = EMPTY_SERSIC
    if 'bar' in model:
        bar = model['bar']
    else:
        bar = EMPTY_SERSIC

    disk_e = agg_res.errors['disk']
    if 'bulge' in agg_res.errors:
        bulge_e = agg_res.errors['bulge']
    else:
        bulge_e = EMPTY_SERSIC_ERR
    if 'bar' in agg_res.errors:
        bar_e = agg_res.errors['bar']
    else:
        bar_e = EMPTY_SERSIC_ERR

    errs = pd.DataFrame([],
                        columns=['disk', 'bulge', 'bar', 'spiral'],
                        dtype=jnp.float64)
    errs['disk'] = disk_e.copy()
    # it is possible that we have zero error for ellipticity, which will
    # cause problems. Instead, fix it as a small value

    errs.loc['q', 'disk'] = max(0.001, errs.loc['q', 'disk'])
    errs.loc['L', 'disk'] = jnp.inf
    errs.loc['I', 'disk'] = jnp.nan
    errs.loc['n', 'disk'] = jnp.nan
    errs.loc['c', 'disk'] = jnp.nan

    errs['bulge'] = bulge_e.copy()
    errs.loc['q', 'bulge'] = max(0.001, errs.loc['q', 'bulge'])
    errs.loc['scale', 'bulge'] = bulge.Re / disk.Re * jnp.sqrt(
        bulge_e.Re**2 / bulge.Re**2 + disk_e.Re**2 / disk.Re**2)
    errs.loc['frac', 'bulge'] = jnp.inf
    errs.loc['I', 'bulge'] = jnp.nan
    errs.loc['Re', 'bulge'] = jnp.nan
    errs.loc['c', 'bulge'] = jnp.nan

    errs['bar'] = bar_e.copy()
    errs.loc['q', 'bar'] = max(0.001, errs.loc['q', 'bar'])
    errs.loc['scale',
             'bar'] = bar.Re / disk.Re * jnp.sqrt(bar_e.Re**2 / bar.Re**2 +
                                                  disk_e.Re**2 / disk.Re**2)
    errs.loc['frac', 'bar'] = jnp.inf
    errs.loc['I', 'bar'] = jnp.nan
    errs.loc['Re', 'bar'] = jnp.nan

    errs.loc['mux', 'centre'] = jnp.sqrt(
        jnp.nansum(jnp.array((disk_e.mux**2, bulge_e.mux**2))))
    errs.loc['muy', 'centre'] = jnp.sqrt(
        jnp.nansum(jnp.array((disk_e.muy**2, bulge_e.muy**2))))

    for i in range(len(agg_res.spiral_arms)):
        errs.loc['I.{}'.format(i), 'spiral'] = jnp.inf
        errs.loc['falloff.{}'.format(i), 'spiral'] = jnp.inf
        errs.loc['spread.{}'.format(i), 'spiral'] = jnp.inf
        errs.loc['A.{}'.format(i), 'spiral'] = 0.01
        errs.loc['phi.{}'.format(i), 'spiral'] = 1
        errs.loc['t_min.{}'.format(i), 'spiral'] = jnp.deg2rad(0.5)
        errs.loc['t_max.{}'.format(i), 'spiral'] = jnp.deg2rad(0.5)
    return df_to_dict(errs)
Exemple #14
0
 def entropy_fn(preferences: rlax.ArrayLike, legal: rlax.ArrayLike, epsilon=epsilon):
     probs = DQNPolicy._argmax_with_random_tie_breaking(preferences)
     probs = DQNPolicy._mix_with_legal_uniform(probs, epsilon, legal)
     return -jnp.nansum(probs * jnp.log(probs), axis=-1)
Exemple #15
0
 def nanmean(x):
     return np.nansum(x)/(x.size-np.sum(np.isnan(x)))
 def E5(self, r):
     """
     E[log q(Z)]
     """
     terms = r * jnp.log(r)
     return jnp.nansum(terms)