def test_mean_field_matches_inpainting(cls): # Tests that running mean field to infer Y given X # gives the same result as running inpainting with # Y masked out and all of X observed # This test uses the MoreConsistent InferenceProcedure # For SuperWeightDoubling it will fail because mf initializes Y_hat # with double weights and do_inpainting initializes it with sigmoid(biases) batch_size = 5 niter = 3 nvis = 4 nhid1 = 5 nhid2 = 10 classes = 3 rng = np.random.RandomState([2012, 11, 3]) vis = BinaryVector(nvis) vis.set_biases(rng.randn(nvis).astype(floatX)) h1 = BinaryVectorMaxPool(detector_layer_dim=nhid1, pool_size=1, layer_name='h1', irange=1.) h2 = BinaryVectorMaxPool(detector_layer_dim=nhid2, pool_size=1, layer_name='h2', irange=1.) y = Softmax(n_classes=classes, irange=1., layer_name='y') dbm = SuperDBM(batch_size=batch_size, inference_procedure=cls(), niter=niter, visible_layer=vis, hidden_layers=[h1, h2, y]) X = sharedX(rng.randn(batch_size, nvis)) Y = sharedX(np.zeros((batch_size, classes))) drop_mask = sharedX(np.zeros((batch_size, nvis))) drop_mask_Y = sharedX(np.ones((batch_size, ))) q_mf = dbm.mf(X) Y_hat_mf = q_mf[-1] V_hat_inpaint, Y_hat_inpaint = dbm.do_inpainting(V=X, Y=Y, drop_mask=drop_mask, drop_mask_Y=drop_mask_Y) Y_hat_mf, Y_hat_inpaint = function([], [Y_hat_mf, Y_hat_inpaint])() if not np.allclose(Y_hat_mf, Y_hat_inpaint): print Y_hat_mf print Y_hat_inpaint assert False, cls
def make_super_dbm(n_iter): return SuperDBM(batch_size = 100, visible_layer = GaussianConvolutionalVisLayer( rows = 32, cols = 32, channels = 3, init_beta = 1., init_mu = 0.), hidden_layers = [ ConvMaxPool(output_channels = 256, kernel_rows = 6, kernel_cols = 6, pool_rows = 3, pool_cols = 3, irange = .05, layer_name = 'h0', init_bias = -3.), ConvMaxPool(output_channels = 128, kernel_rows = 4, kernel_cols = 4, pool_rows = 3, pool_cols = 3, irange = .01, layer_name = 'h1', init_bias = -3.), #ConvMaxPool(output_channels = 3200, # kernel_rows = 5, # kernel_cols = 5, # pool_rows = 1, #really inefficient way of getting a non-pooled sigmoid layer # pool_cols = 1, # irange = .01, # layer_name = 'h2', # init_bias = -1.), #ConvMaxPool(output_channels = 1600, # kernel_rows = 3, #really inefficient way of getting a densely connected sigmoid layer # kernel_cols = 3, # pool_rows = 1, # pool_cols = 1, # irange = .01, # layer_name = 'h3', # init_bias = -1.), Softmax(n_classes = 10, irange = .01) ], niter = n_iter)