def _sparse_conv2d_custom_with_mask(x, w, block_params, strides, ind_init, bin_init): """Sparse conv 2d with mask.""" ind_obj = ReduceMask(active_block_indices=ind_init, bin_counts=bin_init) y_ = sparse_conv2d_custom(x, w, ind_obj, block_params, strides, use_var=True, transpose=True) return y_
def _test_sparse_conv2d_custom_with_mask(self, mask, bsize, ksize, strides, padding, y_exp, use_var=True, transpose=False): # Currently we don't care about VALID convolution. assert padding == 'SAME', 'We do not support VALID conv at the moment.' mask_ = tf.constant(mask) blk_params = calc_block_params( list(mask.shape) + [ksize[2]], bsize, ksize, strides, padding) ind = convert_mask_to_indices_custom(mask_, blk_params, 0.) xval = np.ones([1, mask.shape[1], mask.shape[2], 1], dtype=np.float32) x = tf.constant(xval) if use_var: x = tf.Variable(x) w = tf.constant(np.ones(ksize, dtype=np.float32)) y = sparse_conv2d_custom(x, w, ind, blk_params, strides, use_var=use_var, transpose=transpose) # Manually paste the input tensor in the expected output. y_exp = (y_exp == 0).astype( np.float32) * xval[:, :y_exp.shape[1], :y_exp.shape[2], :] + y_exp with self.test_session() as sess: if use_var: sess.run(tf.variables_initializer([x])) y_act = y.eval() # print('===============') # print('Actual') # print(y_act.reshape([y_act.shape[1], y_act.shape[2]])) # print('Expected') # print(y_exp.reshape([y_exp.shape[1], y_exp.shape[2]])) # print(y_exp.shape) self.assertEqual(y_act.size, y_exp.size) np.testing.assert_array_equal(y_act.reshape(y_exp.shape), y_exp)
def _test_sparse_conv2d_gradient(self, mask, bsize, ksize, strides, padding, transpose=False): # Currently we don't care about VALID convolution. assert padding == 'SAME', 'We do not support VALID conv at the moment.' use_var = False mask_ = tf.constant(mask) blk_params = calc_block_params( list(mask.shape) + [ksize[2]], bsize, ksize, strides, padding) ind = convert_mask_to_indices_custom(mask_, blk_params, 0.) ReduceMask = namedtuple('ReduceMask', ['active_block_indices', 'bin_counts']) ind.active_block_indices.set_shape([27, 3]) ind.bin_counts.set_shape([1]) ind_var = tf.Variable(ind.active_block_indices, trainable=False) bin_var = tf.Variable(ind.bin_counts, trainable=False) ind_fixed = ReduceMask(active_block_indices=ind_var, bin_counts=bin_var) rnd = np.random.RandomState(0) batch_size = 1 xval = rnd.uniform(-0.1, 0.1, [mask.shape[0], mask.shape[1], mask.shape[2], ksize[2]]).astype(np.float32) x = tf.constant(xval) wval = rnd.uniform(-1, 1, ksize).astype(np.float32) w = tf.constant(wval) y = sparse_conv2d_custom( x, w, ind_fixed, blk_params, strides, use_var=use_var, transpose=transpose) print('') print('-' * 55) print('Sparse Conv Layer') print('{:30s} {:>10s} {:>10s}'.format('name', 'grad angle', 'abs err')) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) yval = y.eval() err = compute_gradient_angle(x, xval.shape, y, yval.shape, x_init_value=xval) err2 = compute_gradient_abs_error(x, xval.shape, y, yval.shape, x_init_value=xval) print('{:30s} {:>10.3f} {:>10.3f}'.format('x', err, err2)) err = compute_gradient_angle(w, wval.shape, y, yval.shape, x_init_value=wval) err = compute_gradient_abs_error(w, wval.shape, y, yval.shape, x_init_value=wval) print('{:30s} {:>10.3f} {:>10.3f}'.format('w', err, err2))