def testSamples(self): """Tests that samples follow Ornstein-Uhlenbeck process. This is done by checking that the successive differences `x_next - (1-theta) * x` have the expected mean and variance. """ # Increasing the number of samples can help reduce the variance and make the # sample mean closer to the distribution mean. num_samples = 1000 theta, sigma = 0.1, 0.2 ou = common.ornstein_uhlenbeck_process(tf.zeros([10]), damping=theta, stddev=sigma) samples = np.ndarray([num_samples, 10]) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(num_samples): samples[i] = self.evaluate(ou) diffs = np.ndarray([num_samples - 1, 10]) for i in range(num_samples - 1): diffs[i] = samples[i + 1] - (1 - theta) * samples[i] flat_diffs = diffs.reshape([-1]) mean, variance = flat_diffs.mean(), flat_diffs.var() # To avoid flakiness, we can only expect the sample statistics to match # the population statistics to one or two decimal places. self.assertAlmostEqual(mean, 0.0, places=1) self.assertAlmostEqual(variance, sigma * sigma, places=2)
def testMultipleSamples(self): """Tests that creates different samples. """ theta, sigma = 0.1, 0.2 ou1 = common.ornstein_uhlenbeck_process( tf.zeros([10]), damping=theta, stddev=sigma) ou2 = common.ornstein_uhlenbeck_process( tf.zeros([10]), damping=theta, stddev=sigma) samples = np.ndarray([100, 10, 2]) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(100): samples[i, :, 0], samples[i, :, 1] = self.evaluate([ou1, ou2]) diffs = samples[:, :, 0] - samples[:, :, 1] difference = np.absolute(diffs).mean() self.assertGreater(difference, 0.0)