def test_max_pooling2d(make_image_data): """Test dropout layer.""" x, _, X = make_image_data # downsample by 2x max_pool = ab.MaxPool2D(pool_size=(2, 2), strides=(2, 2)) F, KL = max_pool(X) tc = tf.test.TestCase() with tc.test_session(): f = F.eval() # test equivalence of first window across batches assert np.all(np.max(x[:, :2, :2, :], axis=(1, 2)) == f[0, :, 0, 0, :]) # n_samples and batch size remain unchanged assert f.shape[:2] == X.eval().shape[:2] # downsampled by 2x assert 2 * f.shape[2] == X.eval().shape[2] assert 2 * f.shape[3] == X.eval().shape[3] # number of channels remain unchanged assert f.shape[-1] == X.eval().shape[-1] assert KL == 0
n_epochs = 50 batch_size = 100 config = tf.ConfigProto(device_count={'GPU': 0}) # Use GPU ? reg = 0.1 l_samples = 5 p_samples = 5 # Network architecture net = ab.stack( ab.InputLayer(name='X', n_samples=l_samples), # LSAMPLES,BATCH_SIZE,28*28 ab.Conv2D(filters=32, kernel_size=(5, 5), l2_reg=reg), # LSAMPLES, BATCH_SIZE, 28, 28, 32 ab.Activation(h=tf.nn.relu), ab.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), # LSAMPLES, BATCH_SIZE, 14, 14, 32 ab.Conv2D(filters=64, kernel_size=(5, 5), l2_reg=reg), # LSAMPLES, BATCH_SIZE, 14, 14, 64 ab.Activation(h=tf.nn.relu), ab.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), # LSAMPLES, BATCH_SIZE, 7, 7, 64 ab.Flatten(), # LSAMPLES, BATCH_SIZE, 7*7*64 ab.Dense(output_dim=1024, l2_reg=reg), # LSAMPLES, BATCH_SIZE, 1024 ab.Activation(h=tf.nn.relu), ab.DropOut(0.5), ab.Dense(output_dim=10, l2_reg=reg), # LSAMPLES, BATCH_SIZE, 10 ) def main():