def test_normal_sampling(): for mean in [0, 1]: dist = Normal(mean, 3 * B.eye(np.int32, 200)) # Sample without noise. samples = dist.sample(2000) approx(B.mean(samples), mean, atol=5e-2) approx(B.std(samples)**2, 3, atol=5e-2) # Sample with noise samples = dist.sample(2000, noise=2) approx(B.mean(samples), mean, atol=5e-2) approx(B.std(samples)**2, 5, atol=5e-2)
def test_normal_sampling(): for mean in [0, 1]: dist = Normal(mean, 3 * B.eye(np.int32, 200)) # Sample without noise. samples = dist.sample(2000) approx(B.mean(samples), mean, atol=5e-2) approx(B.std(samples) ** 2, 3, atol=5e-2) # Sample with noise samples = dist.sample(2000, noise=2) approx(B.mean(samples), mean, atol=5e-2) approx(B.std(samples) ** 2, 5, atol=5e-2) state, sample1 = dist.sample(B.create_random_state(B.dtype(dist), seed=0)) state, sample2 = dist.sample(B.create_random_state(B.dtype(dist), seed=0)) assert isinstance(state, B.RandomState) approx(sample1, sample2)
def test_normalise(): layer = Normalise(epsilon=0) x = B.randn(10, 5, 3) # Check number of weights and width. assert layer.num_weights(10) == 0 assert layer.width == 10 # Check initialisation and width. layer.initialise(3, None) assert layer.width == 3 # Check correctness out = layer(x) approx(B.std(out, axis=2), B.ones(10, 5), rtol=1e-4) approx(B.mean(out, axis=2), B.zeros(10, 5), atol=1e-4)
def summarise_samples(x, samples, db=False): """Summarise samples. Args: x (vector): Inputs of samples. samples (tensor): Samples, with the first dimension corresponding to different samples. db (bool, optional): Convert to decibels. Returns: :class:`collections.namedtuple`: Named tuple containing various statistics of the samples. """ x, samples = B.to_numpy(x, samples) random_inds = np.random.permutation(B.shape(samples)[0])[:3] def transform(x): if db: return 10 * np.log10(x) else: return x perm = tuple(reversed(range(B.rank(samples)))) # Reverse all dimensions. return collect( x=B.to_numpy(x), mean=transform(B.mean(samples, axis=0)), var=transform(B.std(samples, axis=0))**2, err_68_lower=transform(B.quantile(samples, 0.32, axis=0)), err_68_upper=transform(B.quantile(samples, 1 - 0.32, axis=0)), err_95_lower=transform(B.quantile(samples, 0.025, axis=0)), err_95_upper=transform(B.quantile(samples, 1 - 0.025, axis=0)), err_99_lower=transform(B.quantile(samples, 0.0015, axis=0)), err_99_upper=transform(B.quantile(samples, 1 - 0.0015, axis=0)), samples=transform(B.transpose(samples, perm=perm)[..., random_inds]), all_samples=transform(B.transpose(samples, perm=perm)), )
def test_sample_noisy(construct_oilmm, x): oilmm = construct_oilmm(noise_amplification=1000) sample = oilmm.sample(x, latent=False) # Test that sample has high variance. assert B.std(sample) > 10
def test_sample_noiseless(construct_oilmm, x): oilmm = construct_oilmm(noise_amplification=1000) sample = oilmm.sample(x, latent=True) # Test that sample has low variance. assert B.std(sample) < 4
import lab as B import numpy as np from experiments.experiment import run, setup from wbml.data.mauna_loa import load args, wd = setup("mauna_loa") n = 200 data = load(detrend_method="gp") t = np.array(data.index)[-n:] t = t - t[0] y = np.array(data["ppm_detrended"])[-n:] # Normalise to zero mean and unity variance. y -= B.mean(y) y /= B.std(y) # Setup GPCM models. noise = 0.05 window = 5 scale = 1 / 12 run( args=args, wd=wd, noise=noise, window=window, scale=scale, fix_window_scale=True, t=t, y=y,
def __call__(self, x): mean = B.mean(x, axis=2)[:, :, None] std = B.std(x, axis=2)[:, :, None] return (x - mean) / (std + self.epsilon)