Пример #1
0
def vgg19():
    """
    VGG19 network architecture with random parameters. Parameters
    can be loaded using ``neupy.storage`` module.

    Originally VGG19 was built in order to solve image classification
    problem. It was used in the ImageNet competition. The goal of the
    competition is to build a model that classifies image into one of
    the 1,000 categories. Categories include animals, objects, transports
    and so on.

    VGG19 has roughly 143 million parameters.

    Examples
    --------
    >>> from neupy import architectures
    >>> vgg19 = architectures.vgg19()
    >>> vgg19
    (?, 224, 224, 3) -> [... 47 layers ...] -> (?, 1000)
    >>>
    >>> from neupy import algorithms
    >>> optimizer = algorithms.Momentum(vgg19)

    See Also
    --------
    :architecture:`vgg16` : VGG16 network
    :architecture:`squeezenet` : SqueezeNet network
    :architecture:`resnet50` : ResNet50 network

    References
    ----------
    Very Deep Convolutional Networks for Large-Scale Image Recognition.
    https://arxiv.org/abs/1409.1556
    """
    HalfPadConvolution = partial(layers.Convolution, padding='SAME')

    return layers.join(
        layers.Input((224, 224, 3)),
        HalfPadConvolution((3, 3, 64), name='conv1_1') > layers.Relu(),
        HalfPadConvolution((3, 3, 64), name='conv1_2') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((3, 3, 128), name='conv2_1') > layers.Relu(),
        HalfPadConvolution((3, 3, 128), name='conv2_2') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((3, 3, 256), name='conv3_1') > layers.Relu(),
        HalfPadConvolution((3, 3, 256), name='conv3_2') > layers.Relu(),
        HalfPadConvolution((3, 3, 256), name='conv3_3') > layers.Relu(),
        HalfPadConvolution((3, 3, 256), name='conv3_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((3, 3, 512), name='conv4_1') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv4_2') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv4_3') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv4_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        HalfPadConvolution((3, 3, 512), name='conv5_1') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv5_2') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv5_3') > layers.Relu(),
        HalfPadConvolution((3, 3, 512), name='conv5_4') > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Linear(4096, name='dense_1') > layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(4096, name='dense_2') > layers.Relu(),
        layers.Dropout(0.5),
        layers.Linear(1000, name='dense_3') > layers.Softmax(),
    )
Пример #2
0
x_labeled, x_unlabeled, y_labeled, y_unlabeled = train_test_split(
    data.astype(np.float32),
    target.astype(np.float32),
    test_size=(1 - n_labeled / n_samples))

x_labeled_4d = x_labeled.reshape((n_labeled, 1, 28, 28))
x_unlabeled_4d = x_unlabeled.reshape((n_unlabeled, 1, 28, 28))

encoder = layers.join(
    layers.Input((1, 28, 28)),
    layers.Convolution((16, 3, 3)) > layers.Relu(),
    layers.Convolution((16, 3, 3)) > layers.Relu(),
    layers.MaxPooling((2, 2)),
    layers.Convolution((32, 3, 3)) > layers.Relu(),
    layers.MaxPooling((2, 2)),
    layers.Reshape(),
    layers.Relu(256),
    layers.Relu(128),
)

decoder = layers.join(
    layers.Relu(256),
    layers.Relu(32 * 5 * 5),
    layers.Reshape((32, 5, 5)),
    layers.Upscale((2, 2)),
    layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
    layers.Upscale((2, 2)),
    layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
    layers.Convolution((1, 3, 3), padding='full') > layers.Sigmoid(),
    layers.Reshape(),
)
Пример #3
0
    def test_gru_connection_exceptions(self):
        network = layers.join(layers.GRU(10), layers.Reshape())

        with self.assertRaises(LayerConnectionError):
            layers.join(layers.Input(1), network)
Пример #4
0
network = algorithms.Momentum(
    [
        [
            [
                # 3 categorical inputs
                layers.Input(3),

                # Train embedding matrix for categorical inputs.
                # It has 18 different unique categories (6 categories
                # per each of the 3 columns). Next layer projects each
                # category into 4 dimensional space. Output shape from
                # the layer should be: (batch_size, 3, 4)
                layers.Embedding(n_unique_categories, 4),

                # Reshape (batch_size, 3, 4) to (batch_size, 12)
                layers.Reshape(),
            ],
            [
                # 17 numerical inputs
                layers.Input(17),
            ]
        ],

        # Concatenate (batch_size, 12) and (batch_size, 17)
        # into one matrix with shape (batch_size, 29)
        layers.Concatenate(),
        layers.Relu(128),
        layers.Relu(32) > layers.Dropout(0.5),
        layers.Sigmoid(1)
    ],
    step=0.2,
Пример #5
0
    test_size=(1 - n_labeled / n_samples))

x_labeled_4d = x_labeled.reshape((n_labeled, 28, 28, 1))
x_unlabeled_4d = x_unlabeled.reshape((n_unlabeled, 28, 28, 1))

# We will features trained in the encoder and the first part for the future
# classifier. At first we pre-train them with unlabeled data, since we have
# a lot of it and we hope to learn some common features from it.
encoder = layers.join(
    layers.Input((28, 28, 1)),
    layers.Convolution((3, 3, 16)) > layers.Relu(),
    layers.Convolution((3, 3, 16)) > layers.Relu(),
    layers.MaxPooling((2, 2)),
    layers.Convolution((3, 3, 32)) > layers.Relu(),
    layers.MaxPooling((2, 2)),
    layers.Reshape(),
    layers.Relu(256),
    layers.Relu(128),
)

# Notice that in the decoder every operation reverts back changes from the
# encoder layer. Upscale replaces MaxPooling and Convolutional layer
# without padding replaced with large padding that increase size of the image.
decoder = layers.join(
    layers.Relu(256),
    layers.Relu(32 * 5 * 5),
    layers.Reshape((5, 5, 32)),
    layers.Upscale((2, 2)),
    layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),
    layers.Upscale((2, 2)),
    layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),