def network():

    HalfPadConvolution = partial(layers.Convolution, padding='half')

    return layers.join(
        layers.Input((3, 224, 224)),

        HalfPadConvolution((20, 5, 5), name='conv1_1') > layers.Relu(),
        
	HalfPadConvolution((20, 5, 5), name='conv1_2') > layers.Relu(),
        layers.MaxPooling((2, 2)),


        HalfPadConvolution((60, 5, 5), name='conv2_1') > layers.Relu(),

        HalfPadConvolution((60, 5, 5), name='conv2_2') > layers.Relu(),

        layers.MaxPooling((2, 2)),


        HalfPadConvolution((120, 5, 5), name='conv3_1') > layers.Relu(),

        HalfPadConvolution((120, 5, 5), name='conv3_2') > layers.Relu(),

        HalfPadConvolution((150, 5, 5), name='conv3_3') > layers.Relu(),

        HalfPadConvolution((150, 5, 5), name='conv3_4') > layers.Relu(),

        layers.MaxPooling((2, 2)),


        HalfPadConvolution((128, 5, 5), name='conv4_1') > layers.Relu(),

        HalfPadConvolution((128, 5, 5), name='conv4_2') > layers.Relu(),

        HalfPadConvolution((512, 3, 3), name='conv4_3') > layers.Relu(),

        HalfPadConvolution((512, 3, 3), name='conv4_4') > layers.Relu(),

        layers.MaxPooling((2, 2)),




        layers.Reshape(),

        layers.Linear(2000, name='dense_1') > layers.Relu(),

        layers.Dropout(0.5),

        layers.Linear(1000, name='dense_2') > layers.Relu(),

        layers.Dropout(0.5),

        layers.Linear(1000, name='dense_3') > layers.Softmax(),
    )
Exemplo n.º 2
0
    def test_multi_outputs_propagation(self):
        network = layers.join(
            layers.Input(4),
            layers.parallel(
                layers.Linear(2),
                layers.Linear(3),
                layers.Linear(4),
            ))
        x = asfloat(np.random.random((7, 4)))
        out1, out2, out3 = self.eval(network.output(x))

        self.assertEqual((7, 2), out1.shape)
        self.assertEqual((7, 3), out2.shape)
        self.assertEqual((7, 4), out3.shape)
Exemplo n.º 3
0
    def test_one_to_many_parallel_connection_output(self):
        input_connection = layers.Input(4)
        parallel_connections = layers.parallel(
            layers.Linear(11),
            layers.Linear(12),
            layers.Linear(13),
        )
        layers.join(input_connection, parallel_connections)

        input_value = asfloat(np.random.random((10, 4)))
        actual_output = self.eval(parallel_connections.output(input_value))

        self.assertEqual(actual_output[0].shape, (10, 11))
        self.assertEqual(actual_output[1].shape, (10, 12))
        self.assertEqual(actual_output[2].shape, (10, 13))
Exemplo n.º 4
0
    def test_invalid_weight_shape(self):
        network = layers.join(
            layers.Input(5),
            layers.Linear(4, weight=np.ones((3, 3))),
        )
        with self.assertRaisesRegexp(ValueError, "Cannot create variable"):
            network.create_variables()

        variable = tf.Variable(np.ones((3, 3)), dtype=tf.float32)
        network = layers.join(
            layers.Input(5),
            layers.Linear(4, weight=variable),
        )
        with self.assertRaisesRegexp(ValueError, "Cannot create variable"):
            network.create_variables()
Exemplo n.º 5
0
    def test_linear_layer_withut_bias(self):
        input_layer = layers.Input(10)
        output_layer = layers.Linear(2, weight=init.Constant(0.1), bias=None)
        connection = input_layer > output_layer

        self.assertEqual(output_layer.bias_shape, None)

        input_value = asfloat(np.ones((1, 10)))
        actual_output = self.eval(connection.output(input_value))
        expected_output = np.ones((1, 2))

        np.testing.assert_array_almost_equal(expected_output, actual_output)

        with self.assertRaises(TypeError):
            layers.Linear(2, weight=None)
Exemplo n.º 6
0
    def test_one_to_many_parallel_network_output(self):
        one_to_many = layers.join(
            layers.Input(4),
            layers.parallel(
                layers.Linear(11),
                layers.Linear(12),
                layers.Linear(13),
            ),
        )

        input_value = asfloat(np.random.random((10, 4)))
        actual_output = self.eval(one_to_many.output(input_value))

        self.assertEqual(actual_output[0].shape, (10, 11))
        self.assertEqual(actual_output[1].shape, (10, 12))
        self.assertEqual(actual_output[2].shape, (10, 13))
Exemplo n.º 7
0
    def test_compilation_multiple_inputs(self):
        input_matrix = asfloat(np.ones((7, 10)))
        expected_output = np.ones((7, 5))

        network = layers.join([[
            layers.Input(10),
        ], [
            layers.Input(10),
        ]], layers.Elementwise(),
                              layers.Linear(5,
                                            weight=init.Constant(0.1),
                                            bias=None))

        # Generated input variables
        predict = network.compile()
        actual_output = predict(input_matrix * 0.7, input_matrix * 0.3)
        np.testing.assert_array_almost_equal(actual_output, expected_output)

        # Pre-defined input variables
        input_variable_1 = T.matrix('x1')
        input_variable_2 = T.matrix('x2')

        predict = network.compile(input_variable_1, input_variable_2)
        actual_output = predict(input_matrix * 0.7, input_matrix * 0.3)
        np.testing.assert_array_almost_equal(actual_output, expected_output)
Exemplo n.º 8
0
    def test_storage_load_dict_invalid_number_of_paramters(self):
        network = layers.join(
            layers.Input(3),
            layers.Relu(4, name='relu'),
            layers.Linear(5, name='linear') > layers.Relu(),
            layers.Softmax(6, name='softmax'),
        )
        data = {
            'metadata': {},  # avoided for simplicity
            'graph': {},  # avoided for simplicity
            # Input layer was avoided on purpose
            'layers': [{
                'name': 'name-1',
                'class_name': 'Relu',
                'configs': {},
                'parameters': {
                    'weight': {
                        'trainable': True,
                        'value': np.ones((3, 4))
                    },
                    'bias': {
                        'trainable': True,
                        'value': np.ones((4, ))
                    },
                }
            }]
        }

        with self.assertRaises(ParameterLoaderError):
            storage.load_dict(network, data, ignore_missing=False)
Exemplo n.º 9
0
 def test_invalid_input_shape(self):
     error_message = ("Input shape expected to have 2 "
                      "dimensions, got 3 instead. Shape: \(\?, 10, 3\)")
     with self.assertRaisesRegexp(LayerConnectionError, error_message):
         layers.join(
             layers.Input((10, 3)),
             layers.Linear(10),
         )
Exemplo n.º 10
0
    def test_many_to_many_parallel_connection_output(self):
        connection = layers.parallel(
            layers.Input(1) > layers.Linear(11),
            layers.Input(2) > layers.Linear(12),
            layers.Input(3) > layers.Linear(13),
        )

        input_value_1 = asfloat(np.random.random((10, 1)))
        input_value_2 = asfloat(np.random.random((20, 2)))
        input_value_3 = asfloat(np.random.random((30, 3)))

        actual_output = self.eval(
            connection.output(input_value_1, input_value_2, input_value_3))

        self.assertEqual(actual_output[0].shape, (10, 11))
        self.assertEqual(actual_output[1].shape, (20, 12))
        self.assertEqual(actual_output[2].shape, (30, 13))
 def Initialize_Connection(self):
     neuronsLayers=self.NeuronsInEveryLayer
     activationFsLayers=self.ActivationFunctionInEveryLayer        
     connectionInit=[layerNeupy.Input(neuronsLayers[0])]      
     for i in range(1,len(self.NeuronsInEveryLayer)):
         if(activationFsLayers[i]=='logsig'):connectionInit.append(layerNeupy.Sigmoid(neuronsLayers[i])) 
         elif(activationFsLayers[i]=='tansig'):connectionInit.append(layerNeupy.Tanh(neuronsLayers[i])) 
         else:connectionInit.append(layerNeupy.Linear(neuronsLayers[i]))
     return connectionInit
Exemplo n.º 12
0
    def test_compilation_multiple_outputs(self):
        input_matrix = asfloat(np.ones((7, 10)))
        expected_output_1 = np.ones((7, 5))
        expected_output_2 = np.ones((7, 2))

        network = layers.join(
            layers.Input(10),
            [[layers.Linear(5, weight=init.Constant(0.1), bias=None)],
             [layers.Linear(2, weight=init.Constant(0.1), bias=None)]])
        predict = network.compile()

        actual_output_1, actual_output_2 = predict(input_matrix)

        np.testing.assert_array_almost_equal(actual_output_1,
                                             expected_output_1)

        np.testing.assert_array_almost_equal(actual_output_2,
                                             expected_output_2)
Exemplo n.º 13
0
    def test_storage_load_dict_using_wrong_names(self):
        connection = layers.join(
            layers.Input(3),
            layers.Relu(4, name='relu'),
            layers.Linear(5, name='linear') > layers.Relu(),
            layers.Softmax(6, name='softmax'),
        )

        storage.load_dict(connection, {
            'metadata': {},  # avoided for simplicity
            'graph': {},  # avoided for simplicity
            # Input layer was avoided on purpose
            'layers': [{
                'name': 'name-1',
                'class_name': 'Relu',
                'input_shape': (3,),
                'output_shape': (4,),
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((3, 4))},
                    'bias': {'trainable': True, 'value': np.ones((4,))},
                }
            }, {
                'name': 'name-2',
                'class_name': 'Relu',
                'input_shape': (4,),
                'output_shape': (5,),
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((4, 5))},
                    'bias': {'trainable': True, 'value': np.ones((5,))},
                }
            }, {
                'name': 'name-3',
                'class_name': 'Softmax',
                'input_shape': (5,),
                'output_shape': (6,),
                'configs': {},
                'parameters': {
                    'weight': {'trainable': True, 'value': np.ones((5, 6))},
                    'bias': {'trainable': True, 'value': np.ones((6,))},
                }
            }]
        }, load_by='order', skip_validation=False)

        relu = connection.layer('relu')
        self.assertEqual(12, np.sum(self.eval(relu.weight)))
        self.assertEqual(4, np.sum(self.eval(relu.bias)))

        linear = connection.layer('linear')
        self.assertEqual(20, np.sum(self.eval(linear.weight)))
        self.assertEqual(5, np.sum(self.eval(linear.bias)))

        softmax = connection.layer('softmax')
        self.assertEqual(30, np.sum(self.eval(softmax.weight)))
        self.assertEqual(6, np.sum(self.eval(softmax.bias)))
Exemplo n.º 14
0
    def test_linear_layer_withuot_bias(self):
        input_layer = layers.Input(10)
        output_layer = layers.Linear(2, weight=0.1, bias=None)
        network = layers.join(input_layer, output_layer)

        input_value = asfloat(np.ones((1, 10)))
        actual_output = self.eval(network.output(input_value))
        expected_output = np.ones((1, 2))

        np.testing.assert_array_almost_equal(expected_output, actual_output)
Exemplo n.º 15
0
def build_net(n_input, activation=layers.Sigmoid, sizes=[3, 3]):
    net = layers.Input(n_input)
    for size in sizes:
        net = net > activation(size)
    net = net > layers.Linear(1)

    conj = neual.ConjugateGradient(connection=net,
                                   step=0.005,
                                   addons=[neual.LinearSearch],
                                   show_epoch=25)
    return conj
Exemplo n.º 16
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Linear(20, weight=init.Uniform(-0.5, 0.5)) ,
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(12, weight=init.Uniform(-0.5, 0.5)),
                layers.Linear(9, weight=init.Uniform(-0.5, 0.5)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Exemplo n.º 17
0
    def test_unknwown_feature_during_weight_init(self):
        network = layers.join(
            layers.Input(None),
            layers.Linear(10, name='linear'),
        )

        message = ("Cannot create variables for the layer `linear`, "
                   "because number of input features is unknown. "
                   "Input shape: \(\?, \?\)")
        with self.assertRaisesRegexp(WeightInitializationError, message):
            network.create_variables()

        with self.assertRaisesRegexp(WeightInitializationError, message):
            network.outputs
Exemplo n.º 18
0
 def test_predict_different_inputs(self):
     for bp_algorithm_class in self.bp_algorithms:
         network = bp_algorithm_class(
             [
                 layers.Linear(
                     size=2, bias=np.zeros(1), weight=np.zeros((2, 1))),
                 layers.Output(1),
             ],
             verbose=False,
         )
         self.assertInvalidVectorPred(network,
                                      input_vector=np.array([0, 0]),
                                      target=0,
                                      is_feature1d=False)
Exemplo n.º 19
0
    def test_simple_connection_compilation(self):
        input_matrix = asfloat(np.ones((7, 10)))
        expected_output = np.ones((7, 5))

        network = layers.join(
            layers.Input(10),
            layers.Linear(5, weight=init.Constant(0.1), bias=None))

        # Generated input variables
        predict = network.compile()
        actual_output = predict(input_matrix)
        np.testing.assert_array_almost_equal(actual_output, expected_output)

        # Pre-defined input variables
        input_variable = T.matrix('x')
        predict = network.compile(input_variable)
        actual_output = predict(input_matrix)
        np.testing.assert_array_almost_equal(actual_output, expected_output)
Exemplo n.º 20
0
	def model_network(self, algorithm='LevenbergMarquardt', model=None, opt=None):

		model = self.decode_model(model)
		if model is None:
			model = [
				[1, 'hidden', 15, 'Linear'],
				[2, 'hidden', 10, 'Linear'],
				[3, 'output', self.output_classes, 'Elu']
			]
			# [Input(4), Elu(1)]
			# [Input(4), Elu(6), Elu(1)] EP: 100
		layer_model = [layers.Input(self.input_features)]
		for layer in model:
			if layer[3] == 'Linear':
				layer_model.append(layers.Linear(layer[2]))
			if layer[3] == 'Relu':
				layer_model.append(layers.Relu(layer[2]))
			if layer[3] == 'Sigmoid':
				layer_model.append(layers.Sigmoid(layer[2]))
			if layer[3] == 'HardSigmoid':
				layer_model.append(layers.HardSigmoid(layer[2]))
			if layer[3] == 'Step':
				layer_model.append(layers.Step(layer[2]))
			if layer[3] == 'Tanh':
				layer_model.append(layers.Tanh(layer[2]))
			if layer[3] == 'Softplus':
				layer_model.append(layers.Softplus(layer[2]))
			if layer[3] == 'Softmax':
				layer_model.append(layers.Softmax(layer[2]))
			if layer[3] == 'Elu':
				layer_model.append(layers.Elu(layer[2]))
			if layer[3] == 'PRelu':
				layer_model.append(layers.PRelu(layer[2]))
			if layer[3] == 'LeakyRelu':
				layer_model.append(layers.LeakyRelu(layer[2]))

		print('Model warstw: ' + str(layer_model))

		self.layers = layer_model
		self.select_algorithm(algorithm, options=opt)
Exemplo n.º 21
0
    def go(self):
        raw = self.datafile.read().splitlines()

        data = self._prepare_data(raw[::2])
        target = self._prepare_target(raw[1::2])
        print(len(data))
        print(len(target))

        environment.reproducible()

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.85)

        print(x_train[0])
        connections = [
            layers.Input(100),
            layers.Linear(200),
            layers.Sigmoid(150),
            layers.Sigmoid(5),
        ]

        cgnet = algorithms.ConjugateGradient(
            connection=connections,
            search_method='golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )

        cgnet.train(x_train, y_train, x_test, y_test, epochs=100)
        plots.error_plot(cgnet)

        y_predict = cgnet.predict(x_test).round(1)
        error = rmsle(y_test, y_predict)
        print(error)

        with open('lib/net/base_searcher.pickle', 'wb') as f:
            pickle.dump(cgnet, f)
Exemplo n.º 22
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Relu(30, weight=init.Uniform(-1, 1)),
                layers.Tanh(40, weight=init.Uniform(-1, 1)),
                # layers.Embedding(40, 1),
                # layers.GRU(40),
                layers.Relu(25, weight=init.Uniform(-1, 1)),
                layers.Linear(9, weight=init.Uniform(-1, 1)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Exemplo n.º 23
0
def vgg19():
    """
    VGG19 network architecture with random parameters. Parameters
    can be loaded using ``neupy.storage`` module.

    Originally VGG19 was built in order to solve image classification
    problem. It was used in the ImageNet competition. The goal of the
    competition is to build a model that classifies image into one of
    the 1,000 categories. Categories include animals, objects, transports
    and so on.

    VGG19 has roughly 143 million parameters.

    Examples
    --------
    >>> from neupy import architectures
    >>> vgg19 = architectures.vgg19()
    >>> vgg19
    (3, 224, 224) -> [... 44 layers ...] -> 1000
    >>>
    >>> from neupy import algorithms
    >>> network = algorithms.Momentum(vgg19)

    See Also
    --------
    :architecture:`vgg16` : VGG16 network
    :architecture:`squeezenet` : SqueezeNet network
    :architecture:`alexnet` : AlexNet network
    :architecture:`resnet50` : ResNet50 network

    References
    ----------
    Very Deep Convolutional Networks for Large-Scale Image Recognition.
    https://arxiv.org/abs/1409.1556
    """
    HalfPadConvolution = partial(layers.Convolution, padding='half')

    return layers.join(
        layers.Input((3, 224, 224)),
        HalfPadConvolution((64, 3, 3), name='conv1_1') > layers.Relu(),
        HalfPadConvolution((64, 3, 3), name='conv1_2') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((128, 3, 3), name='conv2_1') > layers.Relu(),
        HalfPadConvolution((128, 3, 3), name='conv2_2') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((256, 3, 3), name='conv3_1') > layers.Relu(),
        HalfPadConvolution((256, 3, 3), name='conv3_2') > layers.Relu(),
        HalfPadConvolution((256, 3, 3), name='conv3_3') > layers.Relu(),
        HalfPadConvolution((256, 3, 3), name='conv3_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((512, 3, 3), name='conv4_1') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv4_2') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv4_3') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv4_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((512, 3, 3), name='conv5_1') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv5_2') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv5_3') > layers.Relu(),
        HalfPadConvolution((512, 3, 3), name='conv5_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Linear(4096, name='dense_1') > layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(4096, name='dense_2') > layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(1000, name='dense_3') > layers.Softmax(),
    )
Exemplo n.º 24
0
 def test_exception(self):
     with self.assertRaises(TypeError):
         layers.Linear(2, weight=None)
Exemplo n.º 25
0
from neupy.algorithms import LevenbergMarquardt
import LABS.ZeroLab.E_Function as dataset5
import matplotlib.pyplot as plt
from neupy import layers

if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = dataset5.load_data(train_size=6000,
                                                              show=False)

    model = LevenbergMarquardt(
        [
            layers.Input(1),
            layers.Sigmoid(20),
            layers.Sigmoid(10),
            layers.Linear(1),
        ],
        error='mse',
        mu=0.1,
        verbose=True,
    )
    model.architecture()

    model.train(x_train, y_train, x_test, y_test, epochs=50)

    y_pred = model.predict(x_test)

    plt.plot(x_test, y_test, '.b')
    plt.plot(x_test, y_pred, '.r')

    plt.show()
Exemplo n.º 26
0
    crossentropy_loss = T.sum(
        T.nnet.binary_crossentropy(predicted, expected),
        axis=1
    )
    kl_loss = -0.5 * T.sum(
        1 + 2 * log_var - T.square(mean) - T.exp(2 * log_var),
        axis=1
    )

    return (crossentropy_loss + kl_loss).mean()


# Construct Variational Autoencoder
encoder = layers.Input(784) > layers.Tanh(500)

mu = layers.Linear(2, name='mu')
sigma = layers.Linear(2, name='sigma')
sampler = [mu, sigma] > GaussianSample()

decoder = layers.Tanh(500) > layers.Sigmoid(784)

# Train network
network = algorithms.RMSProp(
    encoder > sampler > decoder,

    error=vae_loss,
    batch_size=128,
    shuffle_data=True,
    step=0.001,
    verbose=True,
Exemplo n.º 27
0
 def test_linear_layer(self):
     layer = layers.Linear(1)
     self.assertEqual(layer.activation_function(1), 1)
Exemplo n.º 28
0
        action='store_true',
        help='load pretrained network from file and play without training',
    )
    args = parser.parse_args()
    network = algorithms.RMSProp(
        [
            layers.Input(4),
            layers.Relu(64),
            layers.Relu(48),
            layers.Relu(32),
            layers.Relu(64) > layers.Dropout(0.2),

            # Expecting two different actions:
            # 1. Move left
            # 2. Move right
            layers.Linear(2),
        ],
        step=0.0005,
        error='rmse',
        batch_size='full',
        verbose=False)

    env = gym.make('CartPole-v0')
    env.seed(0)  # To make results reproducible for the gym

    memory_size = 1000  # Number of samples stored in the memory
    memory = deque(maxlen=memory_size)

    if args.use_pretrained:
        if not os.path.exists(CARTPOLE_WEIGHTS):
            raise OSError("Cannot find file with pretrained weights "
Exemplo n.º 29
0
    x_test -= mean
    x_test /= std

    return x_train, x_test, y_train, y_test


network = algorithms.Adadelta(
    [
        layers.Input((1, 28, 28)),
        layers.Convolution((32, 3, 3)) > layers.BatchNorm() > layers.Relu(),
        layers.Convolution((48, 3, 3)) > layers.BatchNorm() > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Convolution((64, 3, 3)) > layers.BatchNorm() > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Linear(1024) > layers.BatchNorm() > layers.Relu(),
        layers.Softmax(10),
    ],

    # Using categorical cross-entropy as a loss function
    error='categorical_crossentropy',

    # Min-batch size
    batch_size=128,

    # Learning rate. We can allow high values
    # since we are using Batch Normalization
    step=1.0,

    # Shows information about algorithm and
    # training progress in terminal
Exemplo n.º 30
0
def vgg16():
    """
    VGG16 network architecture with random parameters. Parameters
    can be loaded using ``neupy.storage`` module.

    Originally VGG16 was built in order to solve image classification
    problem. It was used in the ImageNet competition. The goal of the
    competition is to build a model that classifies image into one of
    the 1,000 categories. Categories include animals, objects, transports
    and so on.

    VGG16 has roughly 138 million parameters.

    Examples
    --------
    >>> from neupy import architectures
    >>> vgg16 = architectures.vgg16()
    >>> vgg16
    (?, 224, 224, 3) -> [... 41 layers ...] -> (?, 1000)

    >>>
    >>> from neupy import algorithms
    >>> optimizer = algorithms.Momentum(vgg16, verbose=True)

    See Also
    --------
    :architecture:`vgg19` : VGG19 network
    :architecture:`squeezenet` : SqueezeNet network
    :architecture:`resnet50` : ResNet50 network

    References
    ----------
    Very Deep Convolutional Networks for Large-Scale Image Recognition.
    https://arxiv.org/abs/1409.1556
    """
    SamePadConv = layers.Convolution.define(padding='SAME')

    return layers.join(
        layers.Input((224, 224, 3)),
        SamePadConv((3, 3, 64), name='conv1_1') >> layers.Relu(),
        SamePadConv((3, 3, 64), name='conv1_2') >> layers.Relu(),
        layers.MaxPooling((2, 2)),
        SamePadConv((3, 3, 128), name='conv2_1') >> layers.Relu(),
        SamePadConv((3, 3, 128), name='conv2_2') >> layers.Relu(),
        layers.MaxPooling((2, 2)),
        SamePadConv((3, 3, 256), name='conv3_1') >> layers.Relu(),
        SamePadConv((3, 3, 256), name='conv3_2') >> layers.Relu(),
        SamePadConv((3, 3, 256), name='conv3_3') >> layers.Relu(),
        layers.MaxPooling((2, 2)),
        SamePadConv((3, 3, 512), name='conv4_1') >> layers.Relu(),
        SamePadConv((3, 3, 512), name='conv4_2') >> layers.Relu(),
        SamePadConv((3, 3, 512), name='conv4_3') >> layers.Relu(),
        layers.MaxPooling((2, 2)),
        SamePadConv((3, 3, 512), name='conv5_1') >> layers.Relu(),
        SamePadConv((3, 3, 512), name='conv5_2') >> layers.Relu(),
        SamePadConv((3, 3, 512), name='conv5_3') >> layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Linear(4096, name='dense_1') >> layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(4096, name='dense_2') >> layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(1000, name='dense_3') >> layers.Softmax(),
    )