def test_compute_mpe_path_dilated(self): grid_dims = [4, 4] input_channels = 2 vars = spn.RawLeaf(num_vars=grid_dims[0] * grid_dims[1] * input_channels) convprod = spn.ConvProducts(vars, num_channels=32, padding='valid', strides=1, spatial_dim_sizes=grid_dims, dilation_rate=2) valgen = spn.LogValue(inference_type=spn.InferenceType.MARGINAL) valgen.get_value(convprod) counts = np.stack([np.arange(16) + 23 * i for i in range(4)]).reshape( (1, 2, 2, 16)).astype(np.float32) var_counts = tf.reshape( convprod._compute_log_mpe_path(counts, valgen.values[vars])[0], (1, 4, 4, 2)) with self.test_session() as sess: var_counts_out = sess.run( var_counts, feed_dict={vars: np.random.rand(1, 4 * 4 * 2)}) print(var_counts_out)
def test_compute_mpe_path_padding(self): grid_dims = [2, 2] vars = spn.RawLeaf(num_vars=4) convprod = spn.ConvProducts(vars, num_channels=1, strides=1, kernel_size=2, padding='full', spatial_dim_sizes=grid_dims) counts_feed = tf.constant( np.arange(18, dtype=np.float32).reshape((2, 9))) truth = [[0 + 1 + 3 + 4, 1 + 2 + 4 + 5, 3 + 4 + 6 + 7, 4 + 5 + 7 + 8], [ 9 + 10 + 12 + 13, 10 + 11 + 13 + 14, 12 + 13 + 15 + 16, 13 + 14 + 16 + 17 ]] counts_op = convprod._compute_mpe_path_common(counts_feed, tf.ones(shape=(2, 4))) with self.test_session() as sess: counts_out = sess.run(counts_op) self.assertAllClose(counts_out[0], truth)
def test_compute_log_value(self): grid_dims = [4, 4] input_channels = 2 vars = spn.RawLeaf(num_vars=grid_dims[0] * grid_dims[1] * input_channels) convprod = spn.ConvProducts(vars, num_channels=32, padding='valid', strides=2, spatial_dim_sizes=grid_dims) connectivity = [(0, 0, 0, 0), (1, 0, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0), (0, 0, 1, 0), (1, 0, 1, 0), (0, 1, 1, 0), (1, 1, 1, 0), (0, 0, 0, 1), (1, 0, 0, 1), (0, 1, 0, 1), (1, 1, 0, 1), (0, 0, 1, 1), (1, 0, 1, 1), (0, 1, 1, 1), (1, 1, 1, 1)] feed = [[[1, 2], [1, 2], [2, 1], [2, 1]], [[1, 1], [1, 1], [1, 1], [1, 1]], [[3, 1], [1, 3], [3, 1], [1, 3]], [[2, 3], [2, 3], [3, 2], [3, 2]]] feed = np.exp( np.reshape(feed, (1, grid_dims[0] * grid_dims[1] * input_channels))) truth = [ [[4, 6], [8, 10]], # 0 [[5, 5], [6, 8]], # 1 [[5, 5], [10, 12]], # 2 [[6, 4], [8, 10]], # 3 [[4, 6], [9, 9]], # 4 [[5, 5], [7, 7]], # 5 [[5, 5], [11, 11]], # 6 [[6, 4], [9, 9]], # 7 [[4, 6], [9, 9]], # 8 [[5, 5], [7, 7]], # 9 [[5, 5], [11, 11]], # 10 [[6, 4], [9, 9]], # 11 [[4, 6], [10, 8]], # 12 [[5, 5], [8, 6]], # 13 [[5, 5], [12, 10]], # 14 [[6, 4], [10, 8]] # 15 ] truth = np.transpose(truth, (1, 2, 0)) logval_op = tf.reshape( convprod.get_log_value(spn.InferenceType.MARGINAL), (2, 2, 16)) with self.test_session() as sess: logval_out = sess.run(logval_op, feed_dict={vars: feed}) self.assertAllClose(logval_out, truth)
def test_compute_mpe_path(self): grid_dims = [4, 4] input_channels = 2 vars = spn.RawLeaf(num_vars=grid_dims[0] * grid_dims[1] * input_channels) convprod = spn.ConvProducts(vars, num_channels=32, padding='valid', strides=2, spatial_dim_sizes=grid_dims) valgen = spn.LogValue(inference_type=spn.InferenceType.MARGINAL) valgen.get_value(convprod) counts = np.stack([ np.arange(16), np.arange(16) + 1000, np.arange(16) + 10000, np.arange(16) + 100000 ]).reshape((1, 2, 2, 16)).astype(np.float32) var_counts = tf.reshape( convprod._compute_log_mpe_path(counts, valgen.values[vars])[0], (1, 4, 4, 2)) truth_single_square = np.asarray([[ 0 + 2 + 4 + 6 + 8 + 10 + 12 + 14, 1 + 3 + 5 + 7 + 9 + 11 + 13 + 15 ], [ 0 + 1 + 4 + 5 + 8 + 9 + 12 + 13, 2 + 3 + 6 + 7 + 10 + 11 + 14 + 15 ], [ 0 + 1 + 2 + 3 + 8 + 9 + 10 + 11, 4 + 5 + 6 + 7 + 12 + 13 + 14 + 15 ], [ 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7, 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 ]]).reshape((2, 2, 2)) truth_top_squares = np.concatenate( [truth_single_square, truth_single_square + 8000], axis=1) truth_bottom_squares = np.concatenate( [truth_single_square + 80000, truth_single_square + 800000], axis=1) truth = np.concatenate((truth_top_squares, truth_bottom_squares), axis=0).reshape((1, 4, 4, 2)) with self.test_session() as sess: var_counts_out = sess.run( var_counts, feed_dict={vars: np.random.rand(1, 4 * 4 * 2)}) self.assertAllClose(truth, var_counts_out)
def test_compute_scope(self, dilate): grid_dims = [4, 4] input_channels = 2 vars = spn.IndicatorLeaf(num_vars=grid_dims[0] * grid_dims[1], num_vals=input_channels) strides = 1 if dilate else 2 dilation_rate = 2 if dilate else 1 convprod = spn.ConvProducts(vars, num_channels=32, padding='valid', strides=strides, spatial_dim_sizes=grid_dims, dilation_rate=dilation_rate) conv_prod_scope = convprod.get_scope() singular_scopes = np.asarray([ spn.Scope(vars, i) for i in range(grid_dims[0] * grid_dims[1]) ]).reshape((4, 4)) if dilate: scope_truth = [ spn.Scope.merge_scopes(singular_scopes[0::2, 0::2].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[0::2, 1::2].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[1::2, 0::2].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[1::2, 1::2].ravel().tolist()) ] else: scope_truth = [ spn.Scope.merge_scopes(singular_scopes[0:2, 0:2].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[0:2, 2:4].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[2:4, 0:2].ravel().tolist()), spn.Scope.merge_scopes(singular_scopes[2:4, 2:4].ravel().tolist()) ] scope_truth = np.repeat(scope_truth, 16).tolist() self.assertAllEqual(conv_prod_scope, scope_truth)
def test_generate_sparse_connections(self): grid_dims = [4, 4] input_channels = 2 vars = spn.RawLeaf(num_vars=grid_dims[0] * grid_dims[1] * input_channels) convprod = spn.ConvProducts(vars, num_channels=32, strides=2, padding='valid', spatial_dim_sizes=grid_dims) connections = convprod.generate_sparse_kernels(32) connection_tuples = [ tuple(c) for c in connections.reshape((-1, convprod._num_channels)).transpose() ] self.assertEqual(len(set(connection_tuples)), len(connection_tuples))
def test_compute_value_padding(self): grid_dims = [2, 2] vars = spn.RawLeaf(num_vars=4) convprod = spn.ConvProducts(vars, num_channels=1, strides=1, kernel_size=2, padding='full', spatial_dim_sizes=grid_dims) value_op = convprod.get_log_value() var_feed = np.exp(np.arange(16, dtype=np.float32).reshape((4, 4))) truth = [[0, 1, 1, 2, 6, 4, 2, 5, 3], [4, 9, 5, 10, 22, 12, 6, 13, 7], [8, 17, 9, 18, 38, 20, 10, 21, 11], [12, 25, 13, 26, 54, 28, 14, 29, 15]] with self.test_session() as sess: value_out = sess.run(value_op, feed_dict={vars: var_feed}) self.assertAllClose(value_out, truth)
def build_test_spn(self, batch_size, dilate, indicator_cols, indicator_rows, kernel_size, stride): num_vars = indicator_rows * indicator_cols indicator_leaf = spn.RawLeaf(num_vars=num_vars * 4) dense_connections = np.zeros((kernel_size, kernel_size, 4, 4), dtype=np.float32) for i in range(4): dense_connections[:, :, i, i] = 1.0 grid_dims = [indicator_rows, indicator_cols] conv_prod = spn.ConvProducts(indicator_leaf, num_channels=4, dense_connections=dense_connections, strides=stride, dilation_rate=dilate, spatial_dim_sizes=grid_dims, kernel_size=kernel_size) depthwise = spn.ConvProductsDepthwise(indicator_leaf, spatial_dim_sizes=grid_dims, strides=stride, dilation_rate=dilate, kernel_size=kernel_size) feed_dict = {indicator_leaf: np.random.rand(batch_size, num_vars * 4)} return conv_prod, depthwise, feed_dict, indicator_leaf
# In[3]: tf.reset_default_graph() # Leaf nodes normal_leafs = spn.NormalLeaf(num_components=num_leaf_components, num_vars=num_vars, trainable_scale=False, trainable_loc=True, scale_init=scale_init) # Twice non-overlapping convolutions x = spn.ConvProducts(normal_leafs, num_channels=32, padding='valid', kernel_size=2, strides=2, spatial_dim_sizes=[28, 28]) x = spn.LocalSums(x, num_channels=32) x = spn.ConvProductsDepthwise(x, padding='valid', kernel_size=2, strides=2) x = spn.LocalSums(x, num_channels=32) # Make a wicker stack stack_size = int(np.ceil(np.log2(28 // 4))) for i in range(stack_size): dilation_rate = 2**i x = spn.ConvProductsDepthwise(x, padding='full', kernel_size=2, strides=1, dilation_rate=dilation_rate)
def test_compute_dense_gen_two_spatial_decomps_v2(self, node_type, input_dist): input_channels = 2 grid_dims = [32, 32] num_vars = grid_dims[0] * grid_dims[1] vars = spn.IndicatorLeaf(num_vars=num_vars, num_vals=input_channels) convert_after = False if input_dist == spn.DenseSPNGenerator.InputDist.RAW and \ node_type in [spn.DenseSPNGenerator.NodeType.BLOCK, spn.DenseSPNGenerator.NodeType.LAYER]: node_type = spn.DenseSPNGenerator.NodeType.SINGLE convert_after = True # First decomposition convprod_dilate0 = spn.ConvProducts(vars, spatial_dim_sizes=grid_dims, num_channels=16, padding='valid', dilation_rate=2, strides=1, kernel_size=2) convprod_dilate1 = spn.ConvProducts(convprod_dilate0, spatial_dim_sizes=[30, 30], num_channels=512, padding='valid', dilation_rate=1, strides=4, kernel_size=2) convsum_dilate = spn.ConvSums(convprod_dilate1, num_channels=2, spatial_dim_sizes=[8, 8]) # Second decomposition convprod_stride0 = spn.ConvProducts(vars, spatial_dim_sizes=grid_dims, num_channels=16, padding='valid', dilation_rate=1, strides=2, kernel_size=2) convprod_stride1 = spn.ConvProducts(convprod_stride0, spatial_dim_sizes=[16, 16], num_channels=512, padding='valid', dilation_rate=1, strides=2, kernel_size=2) convsum_stride = spn.ConvSums(convprod_stride1, num_channels=2, spatial_dim_sizes=[8, 8]) # First decomposition level 2 convprod_dilate0_l2 = spn.ConvProducts(convsum_stride, convsum_dilate, spatial_dim_sizes=[8, 8], num_channels=512, padding='valid', dilation_rate=2, strides=1, kernel_size=2) convprod_dilate1_l2 = spn.ConvProducts(convprod_dilate0_l2, spatial_dim_sizes=[6, 6], num_channels=512, padding='valid', dilation_rate=1, kernel_size=2, strides=4) convsum_dilate_l2 = spn.ConvSums(convprod_dilate1_l2, num_channels=2, spatial_dim_sizes=[4, 4]) # Second decomposition level 2 convprod_stride0_l2 = spn.ConvProducts(convsum_stride, convsum_dilate, spatial_dim_sizes=[8, 8], num_channels=512, padding='valid', dilation_rate=1, strides=2, kernel_size=2) convprod_stride1_l2 = spn.ConvProducts(convprod_stride0_l2, spatial_dim_sizes=[4, 4], num_channels=512, padding='valid', dilation_rate=1, strides=2, kernel_size=2) convsum_stride_l2 = spn.ConvSums(convprod_stride1_l2, num_channels=2, spatial_dim_sizes=[4, 4]) dense_gen = spn.DenseSPNGenerator(num_mixtures=2, num_decomps=1, num_subsets=2, node_type=node_type, input_dist=input_dist) root = dense_gen.generate(convsum_stride_l2, convsum_dilate_l2) if convert_after: root = dense_gen.convert_to_layer_nodes(root) # Assert valid self.assertTrue(root.is_valid()) # Setup the remaining Ops spn.generate_weights(root) init = spn.initialize_weights(root) value_op = tf.squeeze(root.get_log_value()) with self.test_session() as sess: sess.run(init) value_out = sess.run( value_op, {vars: -np.ones((1, num_vars), dtype=np.int32)}) self.assertAllClose(value_out, 0.0)