Exemple #1
0
    def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
        # Normalize the input
        x = x.astype(jnp.float32) / 255.0

        # Block 1
        x = eg.Conv(32, [3, 3], strides=[2, 2])(x)
        x = eg.Dropout(0.05)(x)
        x = jax.nn.relu(x)

        # Block 2
        x = eg.Conv(64, [3, 3], strides=[2, 2])(x)
        x = eg.BatchNorm()(x)
        x = eg.Dropout(0.1)(x)
        x = jax.nn.relu(x)

        # Block 3
        x = eg.Conv(128, [3, 3], strides=[2, 2])(x)

        # Global average pooling
        x = x.mean(axis=(1, 2))

        # Classification layer
        x = eg.Linear(10)(x)

        return x
Exemple #2
0
    def __call__(self, x: jnp.ndarray, training: bool) -> jnp.ndarray:
        # Normalize the input
        x = x.astype(jnp.float32) / 255.0

        # Block 1
        x = linen.Conv(32, [3, 3], strides=[2, 2])(x)
        x = linen.Dropout(0.05, deterministic=not training)(x)
        x = jax.nn.relu(x)

        # Block 2
        x = linen.Conv(64, [3, 3], strides=[2, 2])(x)
        x = linen.BatchNorm(use_running_average=not training)(x)
        x = linen.Dropout(0.1, deterministic=not training)(x)
        x = jax.nn.relu(x)

        # Block 3
        x = linen.Conv(128, [3, 3], strides=[2, 2])(x)

        # Global average pooling
        x = x.mean(axis=(1, 2))

        # Classification layer
        x = linen.Dense(10)(x)

        return x
Exemple #3
0
def richardson_component_prior(data: jnp.ndarray):
    """ 
    Compute the parameters of the parameters of the Gaussian prior distrbution on Gaussian components,
    as described by Richardson et al. in
    https://people.maths.bris.ac.uk/~mapjg/papers/RichardsonGreenRSSB.pdf, p735

    Args:
        data (jnp.ndarray(shape=(Npoints, dim))): input data

    Returns:
        mu_bar (float): prior component mean
        sigma2_mu (float): prior component variance 
    """

    mu_bar = data.mean(axis=0)
    R = np.abs(data - mu_bar).max()
    sigma2_mu = .5 * R * R

    return mu_bar, sigma2_mu
def plot_final_bounds(x: np.ndarray,
                      y: np.ndarray,
                      xstar: np.ndarray,
                      bounds: np.ndarray,
                      data_xstar: np.ndarray,
                      data_ystar: np.ndarray,
                      coeff_2sls: np.ndarray = None,
                      x_kiv: np.ndarray = None,
                      y_kiv: np.ndarray = None) -> plt.Figure:
  fig = plt.figure()
  plt.scatter(x, y, **data_kwargs)
  plt.plot(xstar, bounds[:, 0], 'g--x', label="lower", lw=2, markersize=10)
  plt.plot(xstar, bounds[:, 1], 'r--x', label="upper", lw=2, markersize=10)
  if data_xstar is not None and data_ystar is not None:
    if data_ystar.ndim > 1:
      data_ystar = data_ystar.mean(0)
    plt.plot(data_xstar, data_ystar, label=f"$E[Y | do(x^*)]$", lw=2)
  if coeff_2sls is not None:
    tt = np.linspace(np.min(x), np.max(x), 10)
    y_2sls = coeff_2sls[0] + coeff_2sls[1] * tt
    plt.plot(tt, y_2sls, ls='dotted', c="tab:purple", lw=2, label="2sls")
  if x_kiv is not None and y_kiv is not None:
    plt.plot(x_kiv, y_kiv, ls='dashdot', c="tab:olive", lw=2, label="KIV")

  def get_limits(vals):
    lo = np.min(vals)
    hi = np.max(vals)
    extend = (hi - lo) / 15.
    return lo - extend, hi + extend

  plt.xlim(get_limits(x))
  plt.ylim(get_limits(y))
  plt.xlabel('x')
  plt.ylabel('y')
  plt.title("Lower and upper bound on actual effect")
  plt.legend()
  return fig
Exemple #5
0
 def _test_mean(self,
                vals: jnp.ndarray,
                precision: float = 3.0):
     samp_mean = vals.mean(axis=0)
     self.assertLess(jnp.abs(self.true_unconstrained_params - samp_mean).sum(), precision)
Exemple #6
0
 def _test_mean(self,
                val: jnp.ndarray,
                precision: float = 3.0):
     samp_mean = val.mean(axis=0)
     self.assertLess(jnp.abs(self.posterior_mean - samp_mean).sum(), precision)