"W2", learnable=False) #V = DeterministicVariable(np.random.normal(0., 1., (100, h_size)), "V", learnable=False) f = lambda z, W: z + BF.tanh(BF.matmul(W, z)) F = lambda z, W1, W2: f(f(z, W1), W2) measurement_noise = 2. #1.5 z = [ NormalVariable(np.zeros((h_size, 1)), np.ones((h_size, 1)), "z0", learnable=False) ] img = [ DeterministicVariable(decoder(BF.reshape(z[0], (h_size, 1, 1))), "img0", learnable=False) ] x = [ NormalVariable(img[0], measurement_noise * np.ones( (3, image_size, image_size)), "x0", learnable=False) ] T = 10 t_cond = lambda t: True driving_noise = 0.05 for t in range(1, T):
images2 = [] images3 = [] for rep in range(N_rep): h_size = 120 W1 = DeterministicVariable(np.random.normal(0., 0.2, (h_size, h_size)), "W1", learnable=False) W2 = DeterministicVariable(np.random.normal(0., 0.2, (h_size, h_size)), "W2", learnable=False) #V = DeterministicVariable(np.random.normal(0., 1., (100, h_size)), "V", learnable=False) f = lambda z, W: z + BF.tanh(BF.matmul(W, z)) F = lambda z, W1, W2: f(f(z, W1), W2) measurement_noise = 0.5 #1.5 z = [NormalVariable(np.zeros((h_size, 1)), np.ones((h_size, 1)), "z0", learnable=False)] img = [DeterministicVariable(decoder(BF.reshape(z[0], (h_size, 1, 1))), "img0", learnable=False)] x = [NormalVariable(img[0], measurement_noise*np.ones((3, image_size, image_size)), "x0", learnable=False)] T = 10 t_cond = lambda t: t < 3 or t > T - 3 driving_noise = 0.05 for t in range(1, T): z.append(NormalVariable(F(z[-1], W1, W2), driving_noise*np.ones((h_size, 1)), "z{}".format(t), learnable=False)) img.append(DeterministicVariable(decoder(BF.reshape(z[-1], (h_size, 1, 1))), "img{}".format(t), learnable=False)) if t_cond(t): x.append(NormalVariable(img[-1], measurement_noise*np.ones((3, image_size, image_size)),
out_channels = 10 image_size = 28 Wk = NormalVariable(loc=np.zeros((out_channels, in_channels, 2, 2)), scale=10 * np.ones( (out_channels, in_channels, 2, 2)), name="Wk") z = DeterministicVariable(BF.mean(BF.relu(BF.conv2d(x, Wk, stride=1)), (2, 3)), name="z") Wl = NormalVariable(loc=np.zeros((num_classes, out_channels)), scale=10 * np.ones((num_classes, out_channels)), name="Wl") b = NormalVariable(loc=np.zeros((num_classes, 1)), scale=10 * np.ones((num_classes, 1)), name="b") reshaped_z = BF.reshape(z, shape=(out_channels, 1)) k = CategoricalVariable(logits=BF.linear(reshaped_z, Wl, b), name="k") # Probabilistic model model = ProbabilisticModel([k]) # Observations k.observe(labels) # Variational model #num_particles = 2 #10 wk_locations = [ np.random.normal(0., 0.1, (out_channels, in_channels, 2, 2)) for _ in range(num_particles) ] wl_locations = [
name="Wk") z = Normal(BF.conv2d(x, Wk, padding=1), 1., name="z") num_samples = 6 z.get_sample(num_samples)["z"] num_classes = 10 Wl = Normal(loc=np.zeros( (num_classes, image_size * image_size * out_channels)), scale=1. * np.ones( (num_classes, image_size * image_size * out_channels)), name="Wl") b = Normal(loc=np.zeros((num_classes, 1)), scale=1. * np.ones((num_classes, 1)), name="b") reshaped_z = BF.reshape(z, shape=(image_size * image_size * out_channels, 1)) k = Categorical(logits=BF.linear(reshaped_z, Wl, b), name="k") k.observe(labels) from brancher.inference import MAP from brancher.inference import perform_inference from brancher.variables import ProbabilisticModel convolutional_model = ProbabilisticModel([k]) perform_inference(convolutional_model, inference_method=MAP(), number_iterations=1, optimizer="Adam", lr=0.0025)