def build_column_stds(shape, settings, name): N, K = shape if settings.gaussian_auto_ard: prec_dim = K logstd_mean = Gaussian(mean=0.0, std=1.0, shape=(1, ), dtype=np.float32, name="%s_logstd_mean" % name) logstd_logstd = Gaussian(mean=0.0, std=1.0, shape=(1, ), dtype=np.float32, name="%s_logstd_logstd" % name) logstd_std = DeterministicTransform(logstd_logstd, transforms.Exp, name="%s_logstd_std" % name) # model column stds as drawn from a lognormal distribution with inferred mean and std. # this allows for ARD if we infer a high variance on the column stds, but cheaply # specializes to the case where all column variances are the same logstd = Gaussian(mean=logstd_mean, std=logstd_std, shape=(prec_dim, ), dtype=np.float32, name="%s_logstd" % name) std = DeterministicTransform(logstd, transforms.Exp, name="%s_std" % name) else: std = settings.constant_gaussian_std return std
def gaussian_lowrank_model(): A = Gaussian(mean=0.0, std=1.0, shape=(100, 3), name="A") B = Gaussian(mean=0.0, std=1.0, shape=(100, 3), name="B") C = NoisyGaussianMatrixProduct(A=A, B=B, std=0.1, name="C") sampled_C = C.sample(seed=0) C.observe(sampled_C) jm = Model(C) return jm
def build_gaussian(shape, settings, name, local=False): N, K = shape col_stds = build_column_stds(shape, settings, name) G = Gaussian(mean=0.0, std=col_stds, name=name, shape=shape, local=local) return G
def build_vae(d_z=2, d_hidden=256, d_x=784, N=100, total_N=60000): # MODEL z = Gaussian(mean=0, std=1.0, shape=(N, d_z), name="z", local=True) X = neural_bernoulli(z, d_hidden=d_hidden, d_out=d_x, name="X", local=True) # OBSERVED DATA x_placeholder = X.observe_placeholder() # VARIATIONAL MODEL q_z = neural_gaussian(X=x_placeholder, d_hidden=d_hidden, d_out=d_z, name="q_z") z.attach_q(q_z) jm = Model(X, minibatch_ratio=total_N / float(N)) return jm, x_placeholder
def gaussian_randomwalk_model(): A = Gaussian(mean=0.0, std=1.0, shape=(100, 2), name="A") C = NoisyCumulativeSum(A=A, std=0.1, name="C") sampled_C = C.sample(seed=0) C.observe(sampled_C) jm = Model(C) return jm
def autoencoder(): d_z = 2 d_hidden=256 d_x = 28*28 N=100 from util import get_mnist Xdata, ydata = get_mnist() Xbatch = tf.constant(np.float32(Xdata[0:N])) z = Gaussian(mean=0, std=1.0, shape=(N,d_z), name="z") X = neural_bernoulli(z, d_hidden=d_hidden, d_out=d_x, name="X") X.observe(Xbatch) q_z = neural_gaussian(X=Xbatch, d_hidden=d_hidden, d_out=d_z, name="q_z") z.attach_q(q_z) jm = Model(X) return jm
def sparsity(): G1 = Gaussian(mean=0, std=1.0, shape=(100,10), name="G1") expG1 = UnaryTransform(G1, Exp, name="expG1") X = MultiplicativeGaussianNoise(expG1, 1.0, name="X") sampled_X = X.sample(seed=0) X.observe(sampled_X) jm = Model(X) return jm
def neural_gaussian(X, d_hidden, d_out, shape=None, name=None, **kwargs): augmented_shape = (2, ) + shape if shape is not None else None encoder = NeuralGaussianTransform(X, d_hidden, d_out, shape=augmented_shape, name=None, **kwargs) means, stds = unpackRV(encoder) shape = means.shape return Gaussian(mean=means, std=stds, shape=shape, name=name)
def gaussian_mean_model(): mu = Gaussian(mean=0, std=10, shape=(1,), name="mu") X = Gaussian(mean=mu, std=1, shape=(100,), name="X") sampled_X = X.sample(seed=0) X.observe(sampled_X) jm = Model(X) return jm
def latent_feature_model(): K = 3 D = 10 N = 100 a, b = np.float32(1.0), np.float32(1.0) pi = BetaMatrix(alpha=a, beta=b, shape=(K,), name="pi") B = BernoulliMatrix(p=pi, shape=(N, K), name="B") G = Gaussian(mean=0.0, std=1.0, shape=(K, D), name="G") D = NoisyLatentFeatures(B=B, G=G, std=0.1, name="D") sampled_D = D.sample(seed=0) D.observe(sampled_D) jm = Model(D) return jm
def clustering_gmm_model(n_clusters = 4, cluster_center_std = 5.0, cluster_spread_std = 2.0, n_points = 500, dim = 2): centers = Gaussian(mean=0.0, std=cluster_center_std, shape=(n_clusters, dim), name="centers") weights = DirichletMatrix(alpha=1.0, shape=(n_clusters,), name="weights") X = GMMClustering(weights=weights, centers=centers, std=cluster_spread_std, shape=(n_points, dim), name="X") sampled_X = X.sample(seed=0) X.observe(sampled_X) jm = Model(X) return jm
def _inference_networks(self, q_result): batch_users, n_traits = self.input_shapes["A"] n_items, n_traits2 = self.input_shapes["B"] assert (n_traits == n_traits2) observed_ratings = q_result._sampled mask = self.mask means, stds, weights = build_trait_network( observed_ratings, mask, n_traits=n_traits, weights=self.inference_weights) self.inference_weights = weights q_A = Gaussian(mean=means, std=stds, shape=(batch_users, n_traits), name="q_neural_" + self.inputs_random["A"].name) return {"A": q_A}
def default_q(self): return Gaussian(shape=self.shape, name="q_" + self.name)