示例#1
0
def getEncoder():
    # This returns a tensor
    inputs = Input(shape=(28, 28, 1))
    # Making a sample AlexNet Model Layer 1
    # A layer that consists of a set of “filters”. The filters take a subset of the input data at a time, but are
    # applied across the full input (by sweeping over the input). The operations performed by this layer are still
    # linear/matrix multiplications, but they go through an activation function at the output, which is usually a
    # non-linear operation.
    # We are using 32 filters of size 4,4 Filters are choosen by Keras itself
    # Default filters used by Keras
    # https://datascience.stackexchange.com/questions/16463/what-is-are-the-default-filters-used-by-keras-convolution2d
    encoder = Conv2D(32, (4, 4), padding='same', activation='relu')(inputs)
    # A pooling layer effectively down samples the output of the prior layer, reducing the number of operations
    # required for all the following layers, but still passing on the valid information from the previous layer.
    # Max pooling sums up the closeby matrix points and place them in middle.
    encoder = MaxPooling2D((4, 4), strides=(2, 2),
                           padding='same')(encoder)  # 14,14
    # Used at the input for feature scaling, and in batch normalisation at hidden layers.
    encoder = BatchNormalization()(encoder)
    # Making a sample AlexNet Model Layer 2
    encoder = Conv2D(64, (2, 2), padding='same', activation='relu')(encoder)
    encoder = MaxPooling2D((4, 4), strides=(2, 2),
                           padding='same')(encoder)  # 7, 4
    encoder = BatchNormalization()(encoder)
    # Flatten entire row, in our case the result of batch norm, 7x7x64 = 3136
    encoder = Flatten()(encoder)
    latentDimensions = 64
    # Name is used to directly locate the layer, otherwise keras will name it based on its internal mechanism
    zMeanLayer = Dense(latentDimensions, name='z_mean')(encoder)
    zLogLayer = Dense(latentDimensions, name='z_log_var')(encoder)
    # Lambda layer is a layer that wraps an arbitrary expression. For example, at a point you want to calculate the
    # square of a variable but you can not only put the expression into you model because it only accepts layer so you
    # need Lambda function to make your expression be a valid layer in Keras.
    # This is to perform the reparametrization trick
    # Arguments are : Function -> Output and given arguments of function in ()
    z = Lambda(sampling, output_shape=(latentDimensions, ),
               name='z')([zMeanLayer, zLogLayer])
    # Creating final encoder --- We have 2 output layers for VAEs and a third for epsilon i-e reparametrization
    encoder = Model(inputs=inputs, outputs=[zMeanLayer, zLogLayer, z])
    encoder.summary()
    return encoder
示例#2
0
    def create_model(img_size=64):
        inputs = Input(shape=(img_size, img_size, 1))

        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu',
                       input_shape=(img_size, img_size, 1))(inputs)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=32,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=64,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=128,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=256,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Flatten()(model)
        model = Dense(1024, activation="relu")(model)
        model = Dropout(rate=0.3)(model)
        dense = Dense(512, activation="relu")(model)

        head_root = Dense(168, activation='softmax')(dense)
        head_vowel = Dense(11, activation='softmax')(dense)
        head_consonant = Dense(7, activation='softmax')(dense)

        model = Model(inputs=inputs,
                      outputs=[head_root, head_vowel, head_consonant])
        model.summary()

        plot_model(model, to_file='./results/model.png')
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        return model
示例#3
0
tower_3 = Conv2D(48, (1, 1), activation='relu')(model)
tower_3 = Conv2D(128, (5, 5), padding='same', activation='relu')(tower_3)
tower_4 = MaxPooling2D((3, 3), strides=1, padding='same')(model)
tower_4 = Conv2D(128, (1, 1), activation='relu')(tower_4)
model = Concatenate(axis=-1)([tower_1, tower_2, tower_3, tower_4])
model = AveragePooling2D((1, 1), strides=1, padding='valid')(model)

# model = AveragePooling2D((7,7), strides=1, padding='same')(model)   # I had this incorrectly

model = Flatten()(model)
model = Dropout(0.4)(model)
model = Dense(cifar_classes, activation='linear')(model)
output = Dense(cifar_classes, activation='softmax')(model)

model = Model(input, output)
model.summary()
"""## CIFAR-10

The CIFAR10 dataset contains 60,000 color images in 10 classes, with 6,000 images in each class. The dataset is divided into 50,000 training images and 10,000 testing images. The classes are mutually exclusive and there is no overlap between them.
"""

(train_images_cifar,
 train_labels_cifar), (test_images_cifar,
                       test_labels_cifar) = datasets.cifar10.load_data()

# Normalize pixel values to be between 0 and 1
train_images_cifar, test_images_cifar = train_images_cifar / 255.0, test_images_cifar / 255.0

print('train_images_cifar:', train_images_cifar.shape)
print('train_labels_cifar:', train_labels_cifar.shape)
print('test_images_cifar:', test_images_cifar.shape)