def prepare(self, autobuild=True): rng = np.random.RandomState() with gpflow.defer_build(): m = Foo() m.X = gpflow.DataHolder(rng.randn(2, 2)) m.Y = gpflow.DataHolder(rng.randn(2, 2)) m.Z = gpflow.DataHolder(rng.randn(2, 2)) if autobuild: m.compile() return m, rng
def test_str(self): with self.test_context(): def check_str(obj, expect_str): expect = [ e for e in expect_str.format(name=p.name).split(' ') if e != '' ] got = [e for e in str(obj).split(' ') if e != ''] print(expect) print(got) self.assertEqual(expect, got) p_str = (' class prior transform trainable shape ' 'fixed_shape value\n{name} Parameter None (none)' ' True () True 1.0') p = gpflow.Param(1., name="short") check_str(p, p_str) d_str = (' class shape fixed_shape value' '\n{name} DataHolder () False 1.0') d = gpflow.DataHolder(1., name="short") check_str(d, d_str) params_str = ( ' class prior transform trainable shape' ' fixed_shape value\n{name}/p Parameter None' ' (none) True () True 1.0') params = gpflow.Parameterized(name="short") params.p = p params.d = d check_str(params, params_str)
def test_fixed_shape(self): with self.test_context(): p = gpflow.DataHolder(1.) assert_allclose(1., 1.) self.assertFalse(p.fixed_shape) self.assertAllEqual(p.shape, ()) value = [10., 10.] p.assign(value) assert_allclose(p.read_value(), value) self.assertFalse(p.fixed_shape) self.assertAllEqual(p.shape, (2, )) p.fix_shape() assert_allclose(p.read_value(), value) self.assertTrue(p.fixed_shape) self.assertAllEqual(p.shape, (2, )) p.assign(np.zeros(p.shape)) value = np.zeros(p.shape) with self.assertRaises(ValueError): p.assign([1.], force=True) assert_allclose(p.read_value(), value) with self.assertRaises(ValueError): p.assign(1., force=True) assert_allclose(p.read_value(), value) with self.assertRaises(ValueError): p.assign(np.zeros((3, 3)), force=True) assert_allclose(p.read_value(), value)
def __init__(self, obs_weight, X, Y, kern, likelihood, feat=None, mean_function=None, num_latent=None, q_diag=False, whiten=True, minibatch_size=None, Z=None, num_data=None, **kwargs): super(WeightedSVGP, self).__init__(X, Y, kern, likelihood, feat=feat, mean_function=mean_function, num_latent=num_latent, q_diag=q_diag, whiten=whiten, minibatch_size=None, Z=Z, num_data=num_data, **kwargs) self.obs_weight = gp.DataHolder( obs_weight) if minibatch_size is None else gp.Minibatch( obs_weight, batch_size=minibatch_size, seed=0)
def prepare(self): m = Foo(autobuild=False) rng = np.random.RandomState() m.X = gpflow.DataHolder(rng.randint(0, 10, (2, 2)), dtype=gpflow.settings.int_type) m.compile() return m, rng
def create_layout(): p = gpflow.Parameterized(name='p') p.a = gpflow.Param(10.) p.b = gpflow.Param(11.) p.c = gpflow.Parameterized() p.c.d = gpflow.Param(12., fix_shape=False) p.c.e = gpflow.DataHolder(13.) return p
def test_is_built(self): with self.test_context(): d = gpflow.DataHolder(1.0) with self.assertRaises(ValueError): d.is_built(None) with self.assertRaises(gpflow.GPflowError): d.is_built_coherence(tf.Graph())
def test_create_dataholder(self): with self.test_context(): shape = (10, ) d = gpflow.DataHolder(np.ones(shape)) self.assertAllEqual(d.shape, shape) self.assertEqual(d.dtype, np.float64) self.assertFalse(d.fixed_shape) self.assertFalse(d.trainable) shape = (10, ) d = gpflow.DataHolder(np.ones(shape), dtype=gpflow.settings.float_type) self.assertAllEqual(d.shape, shape) self.assertEqual(d.dtype, gpflow.settings.float_type) self.assertFalse(d.fixed_shape) self.assertFalse(d.trainable) d = gpflow.DataHolder(1) self.assertAllEqual(d.shape, ()) self.assertEqual(d.dtype, np.int32) self.assertFalse(d.fixed_shape) self.assertFalse(d.trainable) d = gpflow.DataHolder(1.0) self.assertAllEqual(d.shape, ()) self.assertEqual(d.dtype, np.float64) self.assertFalse(d.fixed_shape) self.assertFalse(d.trainable) size = 10 shape = (size, ) d = gpflow.DataHolder([1.] * size) self.assertAllEqual(d.shape, shape) self.assertEqual(d.dtype, np.float64) self.assertFalse(d.fixed_shape) self.assertFalse(d.trainable) d = gpflow.DataHolder(1.0, fix_shape=True) self.assertAllEqual(d.shape, ()) self.assertEqual(d.dtype, np.float64) self.assertTrue(d.fixed_shape) self.assertFalse(d.trainable) var = tf.get_variable('dataholder', shape=(), trainable=False) d = gpflow.DataHolder(var) self.assertAllEqual(d.shape, ()) self.assertEqual(d.dtype, np.float32) self.assertTrue(d.fixed_shape) self.assertFalse(d.trainable) tensor = var + 1 d = gpflow.DataHolder(tensor) self.assertAllEqual(d.shape, ()) self.assertEqual(d.dtype, np.float32) self.assertTrue(d.fixed_shape) self.assertFalse(d.trainable)
def __init__(self, X, Y, ms, a, b, kern_list): assert X.shape[1] == len(kern_list) assert a.size == len(kern_list) assert b.size == len(kern_list) for kern in kern_list: assert isinstance( kern, (gpflow.kernels.Matern12, gpflow.kernels.Matern32, gpflow.kernels.Matern52)) likelihood = gpflow.likelihoods.Gaussian() mean_function = gpflow.mean_functions.Zero() gpflow.models.GPModel.__init__(self, X, Y, None, likelihood, mean_function) self.num_data = X.shape[0] self.num_latent = Y.shape[1] self.a = a self.b = b self.ms = ms self.kerns = gpflow.ParamList(kern_list) # pre compute static quantities: chunk data to save memory self.tr_YTY = gpflow.DataHolder(np.sum(np.square(Y))) Mtotal = (2 * self.ms.size - 1) * X.shape[1] self.KufY = np.zeros((Mtotal, 1)) self.KufKfu = np.zeros((Mtotal, Mtotal)) for i in range(0, (X.shape[0]), 10000): Xchunk = X[i:i + 10000] Ychunk = Y[i:i + 10000] Kuf_chunk = np.empty((0, Xchunk.shape[0])) KufY_chunk = np.empty((0, Ychunk.shape[1])) for i, (ai, bi) in enumerate(zip(self.a, self.b)): assert np.all(Xchunk[:, i] > ai) assert np.all(Xchunk[:, i] < bi) Kuf = make_Kuf_np(Xchunk[:, i:i + 1], ai, bi, self.ms) KufY_chunk = np.vstack((KufY_chunk, np.dot(Kuf, Ychunk))) Kuf_chunk = np.vstack((Kuf_chunk, Kuf)) self.KufKfu += np.dot(Kuf_chunk, Kuf_chunk.T) self.KufY += KufY_chunk self.KufY = gpflow.DataHolder(self.KufY) self.KufKfu = gpflow.DataHolder(self.KufKfu)
def __init__(self,weights, X, Y, Z, kernels, likelihood, num_outputs=None, mean_function=gp.mean_functions.Zero(), # the final layer mean function **kwargs): super(WeightedDGP,self).__init__(X, Y, Z, kernels, likelihood, num_outputs=num_outputs, mean_function=mean_function, # the final layer mean function **kwargs) minibatch_size = 128 if minibatch_size: self.weights = gp.Minibatch(weights, minibatch_size, seed=0) else: self.weights = gp.DataHolder(weights)
def test_failed_creation(self): with self.test_context(): tensor = tf.get_variable('dataholder', shape=(1, )), values = [ tensor, [1, [1, [1]]], None, "test", object(), ] for value in values: with self.assertRaises(ValueError, msg='Value {}'.format(value)): gpflow.DataHolder(tensor)
def test_str(self): with self.test_context(): p_str = (' class prior transform trainable shape ' 'fixed_shape value\nParameter Parameter None (none)' ' True () True 1.0') p = gpflow.Param(1.) self.assertEqual(p_str.format('Parameter'), str(p)) d_str = (' class shape fixed_shape value' '\nDataHolder DataHolder () False 1.0') d = gpflow.DataHolder(1.) self.assertEqual(d_str, str(d)) params_str = (' class prior transform trainable shape' ' fixed_shape value\nParameterized/p Parameter None' ' (none) True () True 1.0') params = gpflow.Parameterized() params.p = p params.d = d self.assertEqual(params_str, str(params))
def __init__(self): super(OriginAllDataholders, self).__init__() self.a = gpflow.DataHolder(np.array(2.)) self.b = gpflow.DataHolder(np.array(2.))
def __init__(self): DumbModel.__init__(self) self.b = gpflow.DataHolder(np.array([3.]))
def as_data_holder(input): return input if isinstance(input, gpflow.DataHolder) \ else gpflow.DataHolder(input)