Example #1
0
 def test_lenet(self):
     MNIST_must_converge('lenet', lenet.LeNet,
                         optimizers.AdamOptimizer,
                         # optimizers.MomentumOptimizer,
                         initial_learning_rate=1e-3,
                         batch_size=32,
                         epochs=5)
Example #2
0
 def test_resnet_must_converge_on_MNIST(self):
     MNIST_must_converge("Resnet", resnet.ResNet,
                         optimizers.AdamOptimizer,
                         batch_size=32,
                         epochs=3,
                         initial_learning_rate=0.001,
                         )
Example #3
0
    def test_lenet(self):
        model = lenet.LeNet

        MNIST_must_converge(model,
                            optimizers.MomentumOptimizer,
                            batch_size=100,
                            epochs=90)
Example #4
0
 def test_single_layer(self):
     model = mlp.MultiLayerPerceptron
     MNIST_must_converge('mlpx1', model,
                         optimizers.RMSPropOptimizer,
                         initial_learning_rate=0.1,
                         batch_size=128,
                         epochs=epochs)
Example #5
0
 def test_vgg_must_converge_on_MNIST(self):
     MNIST_must_converge("vgg16", vgg.VGG16,
                         optimizers.AdamOptimizer,
                         batch_size=16,
                         epochs=3,
                         initial_learning_rate=1e-3,
                         summaries=False,
                         use_debug_session=False,
                         )
Example #6
0
    def test_mlp_2048_2048_Momentum(self):
        hidden_layers = [2048, 2048, 2048]
        dropout = [0.2, 0.5, 0.5]
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout)

        MNIST_must_converge(model,
                            optimizers.MomentumOptimizer,
                            batch_size=100,
                            epochs=90)
Example #7
0
    def xxx_test_mlp_2048_2048_No_Dropout_Gradient(self):
        hidden_layers = [2048, 2048, 2048]
        dropout = []
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout)

        MNIST_must_converge(model,
                            optimizers.GradientDescentOptimizer,
                            batch_size=100,
                            epochs=90)
Example #8
0
    def xxx_test_mlp_layer_with_dropout(self):
        hidden_layers = [1024, 1024]
        dropout = [0.2, 0.5]
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout)

        MNIST_must_converge(model,
                            optimizers.GradientDescentOptimizer,
                            batch_size=100,
                            epochs=90)
Example #9
0
    def test_mlp_2048_2048_momentum(self):
        hidden_layers = [2048, 2048, 2048]
        dropout = [0.2, 0.5, 0.5]
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout)

        MNIST_must_converge('mlpx2048x2048x2048', model,
                            optimizers.RMSPropOptimizer,
                            initial_learning_rate=0.1,
                            batch_size=128,
                            epochs=epochs)
Example #10
0
    def test_mlp_2048_2048_no_dropout_gradient(self):
        hidden_layers = [2048, 2048, 2048]
        dropout = []
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout)

        MNIST_must_converge('mlpx2048x2048x2048xNoDropout', model,
                            optimizers.RMSPropOptimizer,
                            initial_learning_rate=0.1,
                            batch_size=32,
                            epochs=epochs)
Example #11
0
    def test_mlp_layer_with_dropout(self):
        hidden_layers = [1024, 1024]
        dropout = [0.2, 0.5]
        model = partial(mlp.MultiLayerPerceptron,
                        hidden_layers=hidden_layers,
                        dropout=dropout, activation_fn="tanh")

        MNIST_must_converge('mlpx1024x1024', model,
                            optimizers.GradientDescentOptimizer,
                            initial_learning_rate=0.1,
                            batch_size=128,
                            epochs=epochs)
Example #12
0
 def xxx_test_single_layer(self):
     model = mlp.MultiLayerPerceptron
     MNIST_must_converge(model,
                         optimizers.GradientDescentOptimizer,
                         batch_size=100,
                         epochs=90)