def increment_assert(i): p = gpflow.Param(1) assert p.index.split("-")[-1] == i
def __init__(self, state_dim, control_dim): gpflow.Parameterized.__init__(self) self.W = gpflow.Param(np.zeros((control_dim, state_dim))) self.t = gpflow.Param(np.zeros((1, state_dim))) self.b = gpflow.Param(np.zeros((1, control_dim)))
def setUp(self): with self.test_context(): self.p = gpflow.Param(1.0) self.m = gpflow.params.Parameterized() self.m.p = gpflow.Param(1.0) self.m.b = gpflow.Param(1.0)
def test_assign_tensor(self): with self.test_context(): tensor = tf.get_variable('a', shape=()) param = gpflow.Param(tensor) with self.assertRaises(GPflowError): param.assign(10)
def test_existing_tensor(self): with self.test_context(): _ = tf.get_variable('param/unconstrained', shape=()) with self.assertRaises(GPflowError): p = gpflow.Param(1.0, name='param')
def __init__(self, inv_link=tf.exp, w=1., b=0., name=None): super().__init__(inv_link, name=name) self.w = gp.Param(w) self.b = gp.Param(b)
def __init__(self, X, Y, kernel): gpflow.Parameterized.__init__(self) self.X = gpflow.Param(X) self.Y = gpflow.Param(Y) self.kern = kernel self.likelihood = gpflow.likelihoods.Gaussian()
def __init__(self): super().__init__(input_dim=1, active_dims=[0]) self.alpha = gpflow.Param(1.0, transform=gpflow.transforms.positive) self.beta = gpflow.Param(0.5, transform=gpflow.transforms.positive)
def __init__(self): gpflow.models.Model.__init__(self) self.a = gpflow.Param(3.)
def __init__(self, Y_N, Y_B, ts_N, ts_B, n_latent, ss, neural_kernel=KernelKronecker_Neural, conv_scheme=KernelHRFConvDownsized_RBF, kern_tX=None, mean_tX=None, kern_XN=None, mean_XN=None, kern_XB=None, mean_XB=None, name=None): if kern_tX is None: kern_tX = gpflow.kernels.RBF(input_dim=1) if mean_tX is None: mean_tX = gpflow.mean_functions.Zero(output_dim=n_latent) if kern_XN is None: kern_XN = neural_kernel(input_dim_n=n_latent, ts_N=ts_N, ts_B=ts_B, ss=ss, kernel_temporal=conv_scheme) if mean_XN is None: mean_XN = gpflow.mean_functions.Zero(output_dim=Y_N.shape[1]) if kern_XB is None: # kern_XB = gpflow.kernels.RBF(input_dim = n_latent) kern_XB = gpflow.kernels.Matern12(input_dim=n_latent, ARD=True) if mean_XB is None: mean_XB = gpflow.mean_functions.Zero(output_dim=Y_B.shape[1]) super().__init__(name=name) def cubic_interpolation(ts_sparse, Y_N, ts_dense, ss): from scipy import interpolate yn_new = np.zeros((ts_dense.shape[0], ss.shape[0])) yn_array = Y_N.reshape(ss.shape[0], ts_sparse.shape[0]).T for i in range(ss.shape[0]): temp = interpolate.interp1d(np.squeeze(ts_sparse), yn_array[:, i], kind='cubic') yn_new[:, i] = temp(np.squeeze(ts_dense)) return yn_new def downsizing_scheme_nearest(ts_sparse, ts_dense): M = np.zeros((ts_dense.shape[0], ts_sparse.shape[0])) ts = np.squeeze(ts_dense) for i in range(ts_sparse.shape[0]): argmin_idx = np.argmin(np.abs(ts - ts_sparse[i, 0])) M[argmin_idx, i] = 1 return M def HRF_filter(ts_dense): ts = np.squeeze(ts_dense) unit_ts = ts[ts <= 30] def HRFunit(t): from scipy.special import gamma a1 = 6 # b1=1 a2 = 16 # b2=1 c = 1. / 6 part1 = t**(a1 - 1) * np.exp(-t) / gamma(a1) part2 = t**(a2 - 1) * np.exp(-t) / gamma(a2) return part1 - c * part2 hrf = HRFunit(unit_ts) return (hrf) if len(ts_N) > len(ts_B): print("Neural: Dense / Behavioral: Sparse") self.ts = tf.constant(ts_N.copy()) self.ts_np = ts_N.copy() elif len(ts_N) < len(ts_B): print("Neural: Sparse / Behavioral: Dense") self.ts = tf.constant(ts_B.copy()) self.ts_np = ts_B.copy() self.Y_N_interp = cubic_interpolation(ts_N, Y_N, ts_B, ss) self.M = downsizing_scheme_nearest(ts_N, ts_B) # Data self.Y_N = tf.constant(Y_N.copy()) self.Y_B = tf.constant(Y_B.copy()) self.ts_N = tf.constant(ts_N.copy()) self.ts_B = tf.constant(ts_B.copy()) self.ss = tf.constant(ss.copy()) self.n_Nsample = Y_N.shape[0] self.n_Nfeature = Y_N.shape[1] self.n_Bsample = Y_B.shape[0] self.n_Bfeature = Y_B.shape[1] # latent dynamics kernel + downsizing scheme self.kern_tX = kern_tX self.mean_tX = mean_tX self.n_latent = n_latent self.N_pca = gpflow.models.gplvm.PCA_reduce(self.Y_N_interp, n_latent) self.X = gpflow.Param( gpflow.models.gplvm.PCA_reduce(self.Y_N_interp, n_latent)) # Neural data kernel self.kern_XN = kern_XN self.mean_XN = mean_XN self.hrf = tf.constant(HRF_filter(self.ts_np)) # Behavioral data kernel self.kern_XB = kern_XB self.mean_XB = mean_XB # Likelihood self.likelihood_tX = gpflow.likelihoods.Gaussian() self.likelihood_XN = gpflow.likelihoods.Gaussian() self.likelihood_XB = gpflow.likelihoods.Gaussian( ) # Can differ according to the model you rely on.
def __init__(self): super().__init__(input_dim=1, active_dims=[0]) self.variance = gpflow.Param(1.0, transform=gpflow.transforms.positive)
def __init__(self): super(OriginSuccess, self).__init__() self.b = gpflow.Param(np.array(3.))
def __init__(self): super(ReplaceParameterTest.Origin, self).__init__() self.a = gpflow.Param(1.) self.b = gpflow.Param(2.)
def __init__(self): rng = np.random.RandomState(0) gpflow.models.Model.__init__(self) self.x = gpflow.Param(rng.randn(10))
def test_len(self): with self.test_context(): p1 = gpflow.Param(1.2) p2 = gpflow.Param(np.array([3.4, 5.6], settings.float_type)) l = gpflow.ParamList([p1, p2]) self.assertTrue(len(l) == 2)
def __init__(self, state_dim, control_dim, max_action=None): gpflow.Parameterized.__init__(self) self.W = gpflow.Param(np.random.rand(control_dim, state_dim)) self.b = gpflow.Param(np.random.rand(1, control_dim)) self.max_action = max_action
def __init__(self, latent_dim, Y, transitions, T_latent=None, inputs=None, emissions=None, px1_mu=None, px1_cov=None, Xmu=None, Xchol=None, n_samples=100, batch_size=None, seed=None, name=None): super().__init__(latent_dim, Y[0], transitions, T_latent=None, inputs=None, emissions=emissions, px1_mu=px1_mu, px1_cov=None, Xmu=None, Xchol=None, n_samples=n_samples, seed=seed, name=name) self.T = [Y_s.shape[0] for Y_s in Y] self.T_latent = T_latent or self.T self.n_seq = len(self.T) self.T_tf = tf.constant(self.T, dtype=gp.settings.int_type) self.T_latent_tf = tf.constant(self.T_latent, dtype=gp.settings.int_type) self.sum_T = float(sum(self.T)) self.sum_T_latent = float(sum(self.T_latent)) self.batch_size = batch_size self.Y = gp.ParamList(Y, trainable=False) self.inputs = None if inputs is None else gp.ParamList(inputs, trainable=False) _Xmu = [np.zeros((T_s, self.latent_dim)) for T_s in self.T_latent] if Xmu is None else Xmu self.X = gp.ParamList(_Xmu) _Xchol = [np.eye(T_s * self.latent_dim) for T_s in self.T_latent] if Xchol is None else Xchol xc_tr = lambda xc: None if xc.ndim == 1 else gtf.LowerTriangular( xc.shape[-1], num_matrices=1 if xc.ndim == 2 else xc.shape[0], squeeze=xc.ndim == 2) self.Xchol = gp.ParamList( [gp.Param(xc, transform=xc_tr(xc)) for xc in _Xchol]) self.multi_diag_px1_cov = False if isinstance(px1_cov, list): # different prior for each sequence _x1_cov = np.stack(px1_cov) _x1_cov = np.sqrt( _x1_cov) if _x1_cov.ndim == 2 else np.linalg.cholesky(_x1_cov) _transform = None if _x1_cov.ndim == 2 else gtf.LowerTriangular( self.latent_dim, num_matrices=self.n_seq) self.multi_diag_px1_cov = _x1_cov.ndim == 2 elif isinstance(px1_cov, np.ndarray): # same prior for each sequence assert px1_cov.ndim < 3 _x1_cov = np.sqrt( px1_cov) if px1_cov.ndim == 1 else np.linalg.cholesky(px1_cov) _transform = None if px1_cov.ndim == 1 else gtf.LowerTriangular( self.latent_dim, squeeze=True) else: _x1_cov = np.eye(self.latent_dim) _transform = gtf.LowerTriangular(self.latent_dim, squeeze=True) self.px1_cov_chol = gp.Param(_x1_cov, trainable=False, transform=_transform)
def __init__(self): super(Quadratic, self).__init__() rng = np.random.RandomState(0) self.x = gpflow.Param(rng.randn(2), dtype=gpflow.settings.np_float)
def __init__(self, state_dim, control_dim, W=None, b=None, e=None): gpflow.Parameterized.__init__(self) self.W = gpflow.Param(np.random.rand(control_dim, state_dim)) self.b = gpflow.Param(np.random.rand(1, control_dim)) self.e = e
def __init__(self, **kwargs): super(SampleGaussianTest.Gauss, self).__init__(**kwargs) self.x = gpflow.Param(np.zeros(3))
def test_fail_scenarios(self): with self.test_context() as session: p = gpflow.Param(1.0) values = ['', 'test', 1., object(), None] for v in values: def value_error(value): return self.assertRaises( ValueError, msg='Raised at "{}"'.format(value)) with value_error(v): p.set_trainable(v) with value_error(v): p.trainable = v with value_error(v): p.is_built(v) tensor = tf.get_variable('test', shape=()) tensor_non_trainable = tf.get_variable('test_non_trainable', shape=(), trainable=False) p = gpflow.Param(tensor) p_non_trainable = gpflow.Param(1.0, trainable=False) with self.assertRaises(GPflowError): p_non_trainable._check_tensor_trainable(tensor) with self.assertRaises(GPflowError): p._check_tensor_trainable(tensor_non_trainable) with self.assertRaises(GPflowError): p.read_value(session=None) for v in ['', 'non-empty', 1.0, object()]: with self.assertRaises(ValueError): p.read_value(session=v) with self.assertRaises(GPflowError): p.set_trainable(False) with self.assertRaises(GPflowError): p.trainable = False with self.assertRaises(GPflowError): p.set_trainable(True) with self.assertRaises(GPflowError): p.trainable = True values = ['', 'test', 1., object()] for v in values: with self.assertRaises(ValueError, msg='Raised at "{}"'.format(v)): p.anchor(v) with self.assertRaises(tf.errors.FailedPreconditionError): p.anchor(session) with self.assertRaises(ValueError): tensor = tf.get_variable('test1', shape=(), trainable=False) gpflow.Param(tensor) with self.assertRaises(ValueError): tensor = tf.get_variable('test2', shape=()) gpflow.Param(tensor, trainable=False)
def param(session_tf): return gpflow.Param(10.)
def test_standard_name(self): p_index = gpflow.core.parentable.Parentable._read_index() + 1 with self.test_context(): p = gpflow.Param(1) self.assertEqual(p.name, 'Parameter') self.assertEqual(p.hidden_name, '{}/Parameter'.format(p_index))
def params_tree(session_tf): p = gpflow.Parameterized() p.a = gpflow.Param(1.) return p
def __init__(self): gpflow.models.Model.__init__(self) self.param_list = gpflow.ParamList([gpflow.Param(1.), gpflow.Param(12.)])
def __init__(self, add_to_inits=[], add_to_trainables=[], name=None): super().__init__(name=name) data = np.random.randn(10, 10) self.a = gpflow.Param(data, dtype=gpflow.settings.float_type) self.init_vars = add_to_inits self.trainable_vars = add_to_trainables