def test_CovOp(plot = False, center = False):   
    from scipy.stats import multivariate_normal

    nsamps = 1000
    samps_unif = None
    regul_C_ref=0.0001
    D = 1
    import pylab as pl
    if samps_unif is None:
        samps_unif = nsamps
    gk_x = GaussianKernel(0.2)

    targ = mixt(D, [multivariate_normal(3*np.ones(D), np.eye(D)*0.7**2), multivariate_normal(7*np.ones(D), np.eye(D)*1.5**2)], [0.5, 0.5])
    out_samps = targ.rvs(nsamps).reshape([nsamps, 1]).astype(float)
    out_fvec = FiniteVec(gk_x, out_samps, np.ones(nsamps), center = center)
    out_meanemb = out_fvec.sum()
    

    x = np.linspace(-2.5, 15, samps_unif)[:, np.newaxis].astype(float)
    ref_fvec = FiniteVec(gk_x, x, np.ones(len(x)), center = center)
    ref_elem = ref_fvec.sum()

    C_ref = CovOp(ref_fvec, regul=0., center = center) # CovOp_compl(out_fvec.k, out_fvec.inspace_points, regul=0.)

    inv_Gram_ref = np.linalg.inv(inner(ref_fvec))

    C_samps = CovOp(out_fvec, regul=regul_C_ref, center = center)
    unif_obj = C_samps.solve(out_meanemb).dens_proj()
    C_ref = CovOp(ref_fvec, regul=regul_C_ref, center = center)
    dens_obj = C_ref.solve(out_meanemb).dens_proj()
    


    targp = np.exp(targ.logpdf(ref_fvec.insp_pts.squeeze())).squeeze()
    estp = np.squeeze(inner(dens_obj, ref_fvec))
    estp2 = np.squeeze(inner(dens_obj, ref_fvec))
    est_sup = unif_obj(x).squeeze()
    assert (np.abs(targp.squeeze()-estp).mean() < 0.8), "Estimated density strongly deviates from true density"
    if plot:
        pl.plot(ref_fvec.insp_pts.squeeze(), estp/np.max(estp) * np.max(targp), "b--", label="scaled estimate")
        pl.plot(ref_fvec.insp_pts.squeeze(), estp2/np.max(estp2) * np.max(targp), "g-.", label="scaled estimate (uns)")
        pl.plot(ref_fvec.insp_pts.squeeze(), targp, label = "truth")
        pl.plot(x.squeeze(), est_sup.squeeze(), label = "support")
        
        #pl.plot(ref_fvec.inspace_points.squeeze(), np.squeeze(inner(unif_obj, ref_fvec)), label="unif")
        pl.legend(loc="best")
        pl.show()
    supp = unif_obj(x).squeeze()
    assert (np.std(supp) < 0.15), "Estimated support has high variance, in data points, while it should be almost constant."
Exemple #2
0
 def test_rollout(self, variant):
   peep = 0
   pip = 1
   use_noise = False
   loss_fn = lambda x, y: (jnp.abs(x - y)).mean()
   loss = jnp.array(0.)
   duration = 0.87
   dt = 0.03
   tt = jnp.linspace(0, duration, int(duration / dt))
   variant_rollout = variant(rollout, static_argnums=(6,))
   value, _ = jax.value_and_grad(variant_rollout)(self.controller, self.sim,
                                                  tt, use_noise, peep, pip,
                                                  loss_fn, loss)
   print('test_rollout loss:' + str(value))
   self.assertTrue(jnp.allclose(float(value), 11.748882))
Exemple #3
0
    def test_jvp_jit(self):
        f_dex = primitive(
            dex.eval(r'\x:((Fin 10) => Float) y:((Fin 10) => Float). '
                     'for i. x.i * x.i + 2.0 * y.i'))

        def f_jax(x, y):
            return x**2 + 2 * y

        x = jnp.arange(10.)
        y = jnp.linspace(-0.2, 0.5, num=10)
        u = jnp.linspace(0.1, 0.3, num=10)
        v = jnp.linspace(2.0, -5.0, num=10)

        def jvp_dex(args, tangents):
            return jax.jvp(f_dex, args, tangents)

        def jvp_jax(args, tangents):
            return jax.jvp(f_jax, args, tangents)

        output_dex, tangent_dex = jax.jit(jvp_dex)((x, y), (u, v))
        output_jax, tangent_jax = jax.jit(jvp_jax)((x, y), (u, v))

        np.testing.assert_allclose(output_dex, output_jax)
        np.testing.assert_allclose(tangent_dex, tangent_jax)
Exemple #4
0
  def test_float_weights_should_give_close_output(self, weight_prec):
    inputs = random.uniform(self.rng_key, shape=(2, 3))
    model, state = self.init_model_with_1_layer(
        inputs, num_features=4, weight_prec=weight_prec)
    float_weights = jnp.linspace(-1 / 3, 1 / 3, num=12).reshape((3, 4))

    exp_output_without_quant = jnp.matmul(inputs, float_weights)
    state = state.unfreeze()
    state['params']['kernel'] = float_weights
    state = flax.core.freeze(state)
    outputs_with_quant = model.apply(state, inputs, padding_mask=None)
    onp.testing.assert_raises(AssertionError, onp.testing.assert_array_equal,
                              outputs_with_quant, exp_output_without_quant)
    test_utils.assert_all_close_prec(exp_output_without_quant,
                                     outputs_with_quant, weight_prec)
Exemple #5
0
def test_non_conjugate_rv(n):
    key = jr.PRNGKey(123)
    f = posterior = Prior(kernel=RBF()) * Bernoulli()
    x = jnp.sort(jr.uniform(key, shape=(n, 1), minval=-1.0, maxval=1.0), axis=0)
    y = 0.5 * jnp.sign(jnp.cos(3 * x + jr.normal(key, shape=x.shape) * 0.05)) + 0.5
    D = Dataset(X=x, y=y)

    sample_points = jnp.linspace(-1.0, 1.0, num=n).reshape(-1, 1)

    hyperparams = {"lengthscale": jnp.array([1.0]), "variance": jnp.array([1.0])}
    params = complete(hyperparams, posterior, x.shape[0])
    rv = random_variable(f, params, D)
    assert isinstance(rv, Callable)
    fstar = rv(sample_points)
    assert isinstance(fstar, tfd.ProbitBernoulli)
Exemple #6
0
def generate_dataset(n_i, n_b, n_cx, n_ct):
	x_i = jnp.linspace(*domain[:, 0], n_i).reshape((-1, 1))
	t_i = jnp.zeros_like(x_i)
	u_i = u0_fn(x_i, t_i)
	v_i = v0_fn(x_i, t_i)
	
	x_l = jnp.ones((n_b, 1))*domain[0, 0]
	x_r = jnp.ones((n_b, 1))*domain[1, 0]
	t_b = jnp.linspace(*domain[:, 1], n_b).reshape((-1, 1))
	u_l = ul_fn(x_l, t_b)
	u_r = ur_fn(x_r, t_b)
	v_l = vl_fn(x_l, t_b)
	v_r = vr_fn(x_r, t_b)

	x_c = jnp.linspace(*domain[:, 0], n_cx).reshape((-1, 1))
	t_c = jnp.linspace(*domain[:, 1], n_ct).reshape((-1, 1))
	xt_c = tensor_grid([x_c, t_c])

	dirichlet = dataset_Dirichlet(jnp.vstack([x_i, x_l, x_r]), 
								jnp.vstack([t_i, t_b, t_b]),
								jnp.vstack([u_i, u_l, u_r]),
								jnp.vstack([v_i, v_l, v_r]))
	collocation = dataset_Collocation(xt_c[:, 0:1], xt_c[:, 1:2])
	return dirichlet, collocation
def plot_vi_gp(obj, mu, Sigma, X, y):
    gp = GaussianProcess(GaussianLinearKernel(0., 0., 0., 0., 0., 0.), X, y)

    xlim, = obj.boundaries
    x_gt = np.linspace(xlim[0], xlim[1], 100)
    xx = np.linspace(xlim[0] - 2, xlim[1] + 2, 200)

    plt.plot(x_gt, obj.evaluate_without_noise(x_gt), c='c')

    plt.title(f"Gaussian Process Regression")
    mu = mu.flatten()
    for _ in range(500):
        sample_gp_parameter = onp.random.multivariate_normal(mu, Sigma)
        gp.set_kernel_parameters(*sample_gp_parameter)
        function_sample = gp.get_sample(xx.reshape((-1, 1)))
        plt.plot(xx, function_sample, alpha=0.3, c='C0')
        plt.scatter(gp.array_dataset,
                    gp.array_objective_function_values,
                    c='m',
                    marker="+",
                    zorder=1000,
                    s=(30, ))
        plt.pause(0.01)
    plt.show()
Exemple #8
0
def meshgrid(height, width, is_homogeneous=True):
    """Construct a 2D meshgrid.
    Args:
        height: height of the grid
        width: width of the grid
        is_homogeneous: whether to return in homogeneous coordinates
    Returns:
        x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
    """
    x_t = jnp.matmul(
        jnp.ones(shape=[height, 1]),
        jnp.transpose(jnp.expand_dims(jnp.linspace(-1.0, 1.0, width), 1),
                      [1, 0]))
    y_t = jnp.matmul(jnp.expand_dims(jnp.linspace(-1.0, 1.0, height), 1),
                     jnp.ones(shape=[1, width]))
    x_t = (x_t + 1.0) * 0.5 * jnp.array(width - 1, dtype='float32')
    y_t = (y_t + 1.0) * 0.5 * jnp.array(height - 1, dtype='float32')
    if is_homogeneous:
        ones = jnp.ones_like(x_t)
        coords = jnp.stack([x_t, y_t, ones], axis=0)
    else:
        coords = jnp.stack([x_t, y_t], axis=0)
    # coords = jnp.tile(jnp.expand_dims(coords, 0), [batch, 1, 1, 1])
    return coords
    def generate_donut(nmeans = 10, nsamps_per_mean = 50):
        from scipy.stats import multivariate_normal
        from numpy import exp

        def pol2cart(theta, rho):
            x = (rho * np.cos(theta)).reshape(-1,1)
            y = (rho * np.sin(theta)).reshape(-1,1)
            return np.concatenate([x, y], axis = 1)

        comp_distribution = multivariate_normal(np.zeros(2), np.eye(2)/100)
        means = pol2cart(np.linspace(0,2*3.141, nmeans + 1)[:-1], 1)

        rvs = comp_distribution.rvs(nmeans * nsamps_per_mean) + np.repeat(means, nsamps_per_mean, 0)
        true_dens = lambda samps: exp(location_mixture_logpdf(samps, means, np.ones(nmeans) / nmeans, comp_distribution))
        return rvs, means, true_dens
def generate_data():
    T = 1000
    tec = jnp.cumsum(15. * random.normal(random.PRNGKey(0), shape=(T, )))
    TEC_CONV = -8.4479745e6  # mTECU/Hz
    freqs = jnp.linspace(121e6, 168e6, 24)
    phase = tec[:, None] / freqs * TEC_CONV
    Y = jnp.concatenate([jnp.cos(phase), jnp.sin(phase)], axis=1)
    Y_obs = Y + 0.75 * random.normal(random.PRNGKey(1), shape=Y.shape)
    # Y_obs[500:550:2, :] += 3. * random.normal(random.PRNGKey(1),shape=Y[500:550:2, :].shape)
    Sigma = 0.5**2 * jnp.eye(48)
    Omega = jnp.diag(jnp.array([30.]))**2
    mu0 = jnp.zeros(1)
    Gamma0 = jnp.diag(jnp.array([200.]))**2
    amp = jnp.ones_like(phase)
    return Gamma0, Omega, Sigma, T, Y_obs, amp, mu0, tec, freqs
def create_grid(
    samples_per_dim, value_range = (-1.0, 1.0)
):
  """Creates a tensor with equidistant entries from -1 to +1 in each dim.

  Args:
    samples_per_dim: Number of points to have along each dimension.
    value_range: In each dimension, points will go from range[0] to range[1]

  Returns:
      A tensor of shape [samples_per_dim] + [len(samples_per_dim)].
  """
  s = [jnp.linspace(value_range[0], value_range[1], n) for n in samples_per_dim]
  pe = jnp.stack(jnp.meshgrid(*s, sparse=False, indexing="ij"), axis=-1)
  return jnp.array(pe)
Exemple #12
0
def test_CP():
    from pOP import CP as pCP
    x = np.linspace(0, 2, num=10)
    z = (x - x[0]) * 2. / (x[-1] - x[0]) - 1.
    cp1 = CP(0., 2., np.array([], dtype=np.int32), 5)
    cp2 = CP(0., 2., np.array([], dtype=np.int32), 10)
    Fc1 = cp1.H(x, 0, False)
    Fc2 = cp2.H(x, 3, False)

    z = z.reshape(10, 1)
    Fp1 = pCP(z, 4)
    Fp2 = pCP(z, 9, d=3)

    assert (np.linalg.norm(Fc1 - Fp1, ord='fro') < 1e-14)
    assert (np.linalg.norm(Fc2 - Fp2, ord='fro') < 1e-14)
def test_mean_and_var_mid():
    t0 = 0.0
    t1 = 3.0
    y0 = np.linspace(0.1, 0.9, D)
    num_samples = 500

    vals = onp.zeros((num_samples, D))
    for i in range(num_samples):
        rng = random.PRNGKey(i)
        bm = make_brownian_motion(t0, np.zeros(y0.shape), t1, rng)
        vals[i, :] = bm(t1 / 2.0)

    print(np.mean(vals), np.var(vals))
    assert np.allclose(np.mean(vals), 0.0, atol=1e-1, rtol=1e-1)
    assert np.allclose(np.var(vals), t1 / 2.0, atol=1e-1, rtol=1e-1)
    def test_exponential_global_convolution(self):
        init_fn, apply_fn = neural_xc.exponential_global_convolution(
            num_channels=2,
            grids=jnp.linspace(-1, 1, 5),
            minval=0.1,
            maxval=2.)

        output_shape, init_params = init_fn(random.PRNGKey(0),
                                            input_shape=(-1, 5, 1))
        output = apply_fn(init_params, jnp.array(np.random.rand(1, 5, 1)))

        self.assertEqual(output_shape, (-1, 5, 2))
        self.assertLen(init_params, 1)
        self.assertEqual(init_params[0].shape, (2, ))
        self.assertEqual(output.shape, (1, 5, 2))
Exemple #15
0
def test_broadcast_kde_cdf_shape(n_samples):

    bw = 0.1
    precision = 10

    x = objax.random.normal((n_samples,), generator=generator)
    lb, ub = get_domain_extension(x, 10)
    support = np.linspace(lb, ub, precision)

    factor = normalization_factor(x, bw)

    quantiles = broadcast_kde_cdf(support, x, factor)

    # checks
    chex.assert_shape(quantiles, (precision,))
Exemple #16
0
def test_fwd_back():
    # Run a system forwards then backwards,
    # and check that we end up in the same place.
    D = 10
    t0 = 0.1
    t1 = 2.2
    y0 = np.linspace(0.1, 0.9, D)

    def f(y, t):
        return -np.sqrt(t) - y + 0.1 - np.mean((y + 0.2)**2)

    ys = odeint(f, y0, np.array([t0, t1]), atol=1e-8, rtol=1e-8)
    rys = odeint(f, ys[-1], np.array([t1, t0]), atol=1e-8, rtol=1e-8)

    assert np.allclose(y0, rys[-1])
Exemple #17
0
def generate_dataset(n_i, n_b, n_cx, n_ct, n_dx, n_dt):
    x_i = jnp.linspace(*domain[:, 0], n_i).reshape((-1, 1))
    t_i = jnp.zeros_like(x_i)
    u_i = u0_fn(x_i, t_i)
    v_i = v0_fn(x_i, t_i)

    x_l = jnp.ones((n_b, 1)) * domain[0, 0]
    x_r = jnp.ones((n_b, 1)) * domain[1, 0]
    t_b = jnp.linspace(*domain[:, 1], n_b).reshape((-1, 1))
    u_l = ul_fn(x_l, t_b)
    u_r = ur_fn(x_r, t_b)
    v_l = vl_fn(x_l, t_b)
    v_r = vr_fn(x_r, t_b)

    x_c = jnp.linspace(*domain[:, 0], n_cx).reshape((-1, 1))
    t_c = jnp.linspace(*domain[:, 1], n_ct).reshape((-1, 1))
    xt_c = tensor_grid([x_c, t_c])

    data = loadmat(data_file)
    u_d, v_d, x_d, t_d = data["u_snapshots"], data["v_snapshots"], data[
        "x"], data["t"].T
    u_d = u_d[1:-1:(len(x_d) // n_dx), 1:-1:(len(t_d) // n_dt)].reshape(
        (-1, 1))
    v_d = v_d[1:-1:(len(x_d) // n_dx), 1:-1:(len(t_d) // n_dt)].reshape(
        (-1, 1))
    x_d = x_d[1:-1:(len(x_d) // n_dx)]
    t_d = t_d[1:-1:(len(t_d) // n_dt)]
    xt_d = tensor_grid([x_d, t_d])

    dirichlet = dataset_Dirichlet(jnp.vstack([x_i, x_l, x_r, xt_d[:, 0:1]]),
                                  jnp.vstack([t_i, t_b, t_b, xt_d[:, 1:2]]),
                                  jnp.vstack([u_i, u_l, u_r, u_d]),
                                  jnp.vstack([v_i, v_l, v_r, v_d]))
    collocation = dataset_Collocation(jnp.vstack([xt_c[:, 0:1], xt_d[:, 0:1]]),
                                      jnp.vstack([xt_c[:, 1:2], xt_d[:, 1:2]]))
    return dirichlet, collocation
Exemple #18
0
  def test_complex_odeint(self):
    # https://github.com/google/jax/issues/3986

    def dy_dt(y, t, alpha):
      return alpha * y

    def f(y0, ts, alpha):
      return odeint(dy_dt, y0, ts, alpha).real

    alpha = 3 + 4j
    y0 = 1 + 2j
    ts = jnp.linspace(0., 1., 11)
    tol = 1e-1 if jtu.num_float_bits(np.float64) == 32 else 1e-3

    jtu.check_grads(f, (y0, ts, alpha), modes=["rev"], order=2, atol=tol, rtol=tol)
Exemple #19
0
 def test_get_atomic_chain_potential_exponential_coulomb(self):
     potential = utils.get_atomic_chain_potential(
         grids=jnp.linspace(-10, 10, 201),
         locations=jnp.array([0., 1.]),
         nuclear_charges=jnp.array([2, 1]),
         interaction_fn=utils.exponential_coulomb)
     # -2 * 1.071295 * jnp.exp(-np.abs(10) / 2.385345) - 1.071295 * jnp.exp(
     #     -np.abs(11) / 2.385345) = -0.04302427
     self.assertAlmostEqual(float(potential[0]), -0.04302427)
     # -2 * 1.071295 * jnp.exp(-np.abs(0) / 2.385345) - 1.071295 * jnp.exp(
     #     -np.abs(1) / 2.385345) = -2.84702559
     self.assertAlmostEqual(float(potential[100]), -2.84702559)
     # -2 * 1.071295 * jnp.exp(-np.abs(10) / 2.385345) - 1.071295 * jnp.exp(
     #     -np.abs(9) / 2.385345) = -0.05699946
     self.assertAlmostEqual(float(potential[200]), -0.05699946)
  def test_shape(self, num_classes):
    resolutions = [8, 4]
    channels = [1, 2]
    batch_size = 2
    model = models.CNNClassifier(num_classes,
                                 resolutions,
                                 channels,
                                 axis_name=None)
    resolution = resolutions[0]
    shape = [batch_size, resolution, resolution, 1, channels[0]]
    inputs = jnp.linspace(-1, 1, np.prod(shape)).reshape(shape)
    params = model.init(_JAX_RANDOM_KEY, inputs, train=False)
    outputs = model.apply(params, inputs, train=False)

    self.assertEqual(outputs.shape, (batch_size, num_classes))
Exemple #21
0
 def test_get_xc_potential(self):
   grids = jnp.linspace(-5, 5, 10001)
   # We use the form of 3d LDA exchange functional as an example. So the
   # correlation contribution is 0.
   # exchange energy = -0.73855 \int n^(4 / 3) dx
   # exchange potential should be -0.73855 * (4 / 3) n^(1 / 3)
   # by taking functional derivative on exchange energy.
   xc_energy_density_fn = lambda density: -0.73855 * density ** (1 / 3)
   density = jnp.exp(-(grids - 1) ** 2)
   np.testing.assert_allclose(
       scf.get_xc_potential(
           density,
           xc_energy_density_fn=xc_energy_density_fn,
           grids=grids),
       -0.73855 * (4 / 3) * density ** (1 / 3))
def piecewise_constant_pdf(key, bins, weights, num_samples, randomized):
  """Piecewise-Constant PDF sampling.

  Args:
    key: jnp.ndarray(float32), [2,], random number generator.
    bins: jnp.ndarray(float32), [batch_size, num_bins + 1].
    weights: jnp.ndarray(float32), [batch_size, num_bins].
    num_samples: int, the number of samples.
    randomized: bool, use randomized samples.

  Returns:
    z_samples: jnp.ndarray(float32), [batch_size, num_samples].
  """
  eps = 1e-5

  # Get pdf
  weights += eps  # prevent nans
  pdf = weights / weights.sum(axis=-1, keepdims=True)
  cdf = jnp.cumsum(pdf, axis=-1)
  cdf = jnp.concatenate([jnp.zeros(list(cdf.shape[:-1]) + [1]), cdf], axis=-1)

  # Take uniform samples
  if randomized:
    u = random.uniform(key, list(cdf.shape[:-1]) + [num_samples])
  else:
    u = jnp.linspace(0., 1., num_samples)
    u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])

  # Invert CDF. This takes advantage of the fact that `bins` is sorted.
  mask = (u[Ellipsis, None, :] >= cdf[Ellipsis, :, None])

  def minmax(x):
    x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
    x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
    x0 = jnp.minimum(x0, x[Ellipsis, -2:-1])
    x1 = jnp.maximum(x1, x[Ellipsis, 1:2])
    return x0, x1

  bins_g0, bins_g1 = minmax(bins)
  cdf_g0, cdf_g1 = minmax(cdf)

  denom = (cdf_g1 - cdf_g0)
  denom = jnp.where(denom < eps, 1., denom)
  t = (u - cdf_g0) / denom
  z_samples = bins_g0 + t * (bins_g1 - bins_g0)

  # Prevent gradient from backprop-ing through samples
  return lax.stop_gradient(z_samples)
Exemple #23
0
def plot_loss(ax, paths=None):
    x, y = np.meshgrid(np.arange(xmin, xmax + xstep, xstep),
                       np.arange(ymin, ymax + ystep, ystep))
    z = x + y
    ax.contourf(x,
                y,
                z,
                alpha=0.15,
                levels=np.linspace(0., 10., 100),
                cmap=plt.cm.jet,
                antialiased=True,
                extend='both')
    path_lines = []
    path_points = []
    if paths is not None:
        for path, alpha in zip(paths, ALPHAS):
            pathx, pathy = path
            ax.quiver(pathx[:-1],
                      pathy[:-1],
                      pathx[1:] - pathx[:-1],
                      pathy[1:] - pathy[:-1],
                      scale_units='xy',
                      angles='xy',
                      scale=1,
                      color='gray',
                      alpha=0.75)

            line, = ax.plot([], [],
                            '',
                            label=alpha,
                            lw=2,
                            alpha=0.5,
                            color='darkviolet')
            point, = ax.plot([], [],
                             'o',
                             markersize=12,
                             alpha=0.7,
                             color='darkviolet')
            path_lines.append(line)
            path_points.append(point)

    ax.set_xlabel('$J_x$')
    ax.set_ylabel('$J_y$')

    ax.set_xlim((xmin, xmax))
    ax.set_ylim((ymin, ymax))

    return path_lines, path_points
Exemple #24
0
def make_networks(
    spec: specs.EnvironmentSpec,
    policy_layer_sizes: Sequence[int] = (300, 200),
    critic_layer_sizes: Sequence[int] = (400, 300),
    vmin: float = -150.,
    vmax: float = 150.,
    num_atoms: int = 51,
) -> D4PGNetworks:
    """Creates networks used by the agent."""

    action_spec = spec.actions

    num_dimensions = np.prod(action_spec.shape, dtype=int)
    critic_atoms = jnp.linspace(vmin, vmax, num_atoms)

    def _actor_fn(obs):
        network = hk.Sequential([
            utils.batch_concat,
            networks_lib.LayerNormMLP(policy_layer_sizes, activate_final=True),
            networks_lib.NearZeroInitializedLinear(num_dimensions),
            networks_lib.TanhToSpec(action_spec),
        ])
        return network(obs)

    def _critic_fn(obs, action):
        network = hk.Sequential([
            utils.batch_concat,
            networks_lib.LayerNormMLP(
                layer_sizes=[*critic_layer_sizes, num_atoms]),
        ])
        value = network([obs, action])
        return value, critic_atoms

    policy = hk.without_apply_rng(hk.transform(_actor_fn))
    critic = hk.without_apply_rng(hk.transform(_critic_fn))

    # Create dummy observations and actions to create network parameters.
    dummy_action = utils.zeros_like(spec.actions)
    dummy_obs = utils.zeros_like(spec.observations)
    dummy_action = utils.add_batch_dim(dummy_action)
    dummy_obs = utils.add_batch_dim(dummy_obs)

    return D4PGNetworks(
        policy_network=networks_lib.FeedForwardNetwork(
            lambda rng: policy.init(rng, dummy_obs), policy.apply),
        critic_network=networks_lib.FeedForwardNetwork(
            lambda rng: critic.init(rng, dummy_obs, dummy_action),
            critic.apply))
Exemple #25
0
  def test_get_hartree_energy(self, interaction_fn):
    grids = jnp.linspace(-5, 5, 11)
    dx = utils.get_dx(grids)
    density = utils.gaussian(grids=grids, center=1., sigma=1.)

    # Compute the expected Hartree energy by nested for loops.
    expected_hartree_energy = 0.
    for x_0, n_0 in zip(grids, density):
      for x_1, n_1 in zip(grids, density):
        expected_hartree_energy += 0.5 * n_0 * n_1 * interaction_fn(
            x_0 - x_1) * dx ** 2

    self.assertAlmostEqual(
        float(scf.get_hartree_energy(
            density=density, grids=grids, interaction_fn=interaction_fn)),
        float(expected_hartree_energy))
 def test_swsft_forward_spins_channels_matches_swsft_forward(self):
     transformer = _get_transformer()
     resolution = 16
     n_channels = 2
     spins = (0, 1)
     shape = (resolution, resolution, len(spins), n_channels)
     sphere_set = jnp.linspace(-1, 1, np.prod(shape)).reshape(shape)
     coefficients = transformer.swsft_forward_spins_channels(
         sphere_set, spins)
     for channel in range(n_channels):
         for spin in spins:
             # Slices must match swsft_forward().
             sliced = transformer.swsft_forward(
                 sphere_set[Ellipsis, spin, channel], spin)
             self.assertAllClose(coefficients[Ellipsis, spin, channel],
                                 sliced)
Exemple #27
0
def test_conjugate():
    key = jr.PRNGKey(123)
    kern = to_spectral(RBF(), 10)
    posterior = Prior(kernel=kern) * Gaussian()
    x = jnp.linspace(-1.0, 1.0, 20).reshape(-1, 1)
    y = jnp.sin(x)
    params = initialise(key, posterior)
    config = get_defaults()
    unconstrainer, constrainer = build_all_transforms(params.keys(), config)
    params = unconstrainer(params)
    mll = marginal_ll(posterior, transform=constrainer)
    assert isinstance(mll, Callable)
    neg_mll = marginal_ll(posterior, transform=constrainer, negative=True)
    assert neg_mll(params, x, y) == jnp.array(-1.0) * mll(params, x, y)
    nmll = neg_mll(params, x, y)
    assert nmll.shape == ()
Exemple #28
0
def set_axhlines(ax, ys):
    """Paint len(ys) segmented horizontal lines onto plot given by ax."""
    try:
        ns = len(ys)
    except TypeError:
        ys = [ys]
        ns = len(ys)
    grid = np.linspace(0, 1, num=ns + 1)

    for i, y in enumerate(ys):
        ax.axhline(y,
                   xmin=grid[i],
                   xmax=grid[i + 1],
                   color="k",
                   linestyle="--",
                   linewidth=2.5)
Exemple #29
0
  def test_get_hartree_potential(self, interaction_fn):
    grids = jnp.linspace(-5, 5, 11)
    dx = utils.get_dx(grids)
    density = utils.gaussian(grids=grids, center=1., sigma=1.)

    # Compute the expected Hartree energy by nested for loops.
    expected_hartree_potential = np.zeros_like(grids)
    for i, x_0 in enumerate(grids):
      for x_1, n_1 in zip(grids, density):
        expected_hartree_potential[i] += np.sum(
            n_1 * interaction_fn(x_0 - x_1)) * dx

    np.testing.assert_allclose(
        scf.get_hartree_potential(
            density=density, grids=grids, interaction_fn=interaction_fn),
        expected_hartree_potential)
Exemple #30
0
  def test_get_xc_potential_hartree(self):
    grids = jnp.linspace(-5, 5, 10001)
    density = utils.gaussian(grids=grids, center=1., sigma=1.)
    def half_hartree_potential(density):
      return 0.5 * scf.get_hartree_potential(
          density=density,
          grids=grids,
          interaction_fn=utils.exponential_coulomb)

    np.testing.assert_allclose(
        scf.get_xc_potential(
            density=density,
            xc_energy_density_fn=half_hartree_potential,
            grids=grids),
        scf.get_hartree_potential(
            density, grids=grids, interaction_fn=utils.exponential_coulomb))