示例#1
0
def make_model(shape):
    input = Input(shape)
    cur_layer = Conv2D(3, (3, 3), activation='relu', padding='same')(input)
    cur_layer = Conv2D(3, (5, 5), activation='relu', padding='same')(cur_layer)
    cur_layer = MaxPool2D(pool_size=(3, 3), strides=(3, 3))(cur_layer)
    cur_layer = BatchNormalization()(cur_layer)
    cur_layer = Dropout(0.5)(cur_layer)
    cur_layer = Conv2D(16, (5, 5), activation='relu',
                       padding='same')(cur_layer)
    cur_layer = MaxPool2D(pool_size=(5, 5), strides=(5, 5))(cur_layer)
    cur_layer = BatchNormalization()(cur_layer)
    cur_layer = Dropout(0.5)(cur_layer)
    cur_layer = Flatten()(cur_layer)

    o1 = Dense(310, name='f')(cur_layer)

    o2 = concatenate([o1, cur_layer])
    o2 = Dense(3678, name='g')(o2)
    o3 = concatenate([o1, o2, cur_layer])
    o3 = Dense(32094, name='c')(o3)
    cur_layer = Model(inputs=input, outputs=[o1, o2, o3])
    opt = Adam(lr=0.001, amsgrad=True)
    cur_layer.compile(optimizer=opt,
                      loss=[
                          'sparse_categorical_crossentropy',
                          'sparse_categorical_crossentropy',
                          'sparse_categorical_crossentropy'
                      ],
                      metrics=['accuracy'])
    return cur_layer
示例#2
0
def generate_classifier(size=128, classes=4, learning_rate=0.01):
    x, y = size, size  #pixel_crop*number_of_crops,pixel_crop*number_of_crops
    i = Input(shape=(x, y, 3))
    m = Conv2D(64, kernel_size=(3, 3), padding="same", activation="relu")(i)
    m = BatchNormalization()(m)
    m = MaxPool2D()(m)
    m = Conv2D(64, kernel_size=(3, 3), padding="same", activation="relu")(m)
    m = BatchNormalization()(m)
    m = MaxPool2D()(m)
    m = Conv2D(128, kernel_size=(3, 3), padding="same", activation="relu")(m)
    m = BatchNormalization()(m)
    m = MaxPool2D()(m)
    m = Conv2D(256, kernel_size=(3, 3), padding="same", activation="relu")(m)
    m = BatchNormalization()(m)
    m = MaxPool2D()(m)
    m = Conv2D(512, kernel_size=(3, 3), padding="same", activation="relu")(m)
    m = BatchNormalization()(m)
    m = MaxPool2D()(m)
    m = Flatten()(m)
    m = Dense(512, activation="relu")(m)
    m = Dropout(0.3)(m)
    out1 = Dense(classes, activation="softmax", name="output_classes")(m)
    out2 = Dense(1, activation="sigmoid", name="output_score")(m)
    m = Model(i, [out1, out2])
    optimizer = Adam(learning_rate=learning_rate)
    m.compile(optimizer=optimizer,
              loss={
                  'output_score': 'mse',
                  'output_classes': 'categorical_crossentropy'
              },
              loss_weights={
                  'output_score': 5.,
                  'output_classes': 1.
              })
    return m
def get_model(img_size = 64, loss_weights=[0.5, 0.25, 0.25], optimizer='adam'):
    inputs = Input(shape = (img_size, img_size, 1))

    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(inputs)
    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=32, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = Dropout(rate=0.3)(model)

    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = Dropout(rate=0.3)(model)

    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=128, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = Dropout(rate=0.3)(model)

    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=256, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = Dropout(rate=0.3)(model)

    model = Flatten()(model)
    model = Dense(1024, activation = "relu")(model)
    model = Dropout(rate=0.3)(model)
    dense = Dense(512, activation = "relu")(model)

    head_root = Dense(168, activation = 'softmax', name='out_root')(dense)
    head_vowel = Dense(11, activation = 'softmax', name='out_vowel')(dense)
    head_consonant = Dense(7, activation = 'softmax', name='out_consonant')(dense)

    model = Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant])

    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'],
                  loss_weights=loss_weights)

    return model
示例#4
0
    def create_model(img_size=64):
        inputs = Input(shape=(img_size, img_size, 1))

        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu',
                       input_shape=(img_size, img_size, 1))(inputs)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=32,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=32,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=64,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=128,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = Conv2D(filters=256,
                       kernel_size=(3, 3),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = MaxPool2D(pool_size=(2, 2))(model)
        model = Conv2D(filters=256,
                       kernel_size=(5, 5),
                       padding='SAME',
                       activation='relu')(model)
        model = BatchNormalization(momentum=0.15)(model)
        model = Dropout(rate=0.3)(model)

        model = Flatten()(model)
        model = Dense(1024, activation="relu")(model)
        model = Dropout(rate=0.3)(model)
        dense = Dense(512, activation="relu")(model)

        head_root = Dense(168, activation='softmax')(dense)
        head_vowel = Dense(11, activation='softmax')(dense)
        head_consonant = Dense(7, activation='softmax')(dense)

        model = Model(inputs=inputs,
                      outputs=[head_root, head_vowel, head_consonant])
        model.summary()

        plot_model(model, to_file='./results/model.png')
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        return model
示例#5
0
    plt.grid(False)
    plt.imshow(train_images_cifar[i], cmap=plt.cm.binary)
    # The CIFAR labels happen to be arrays,
    # which is why you need the extra index
    plt.xlabel(class_names[train_labels_cifar[i][0]])
plt.show()

# Learning rate tested with 0.001, 0.005 and 0.01 over 300 epochs for Table VII

sgd = optimizers.SGD(learning_rate=0.005,
                     decay=1e-6,
                     momentum=0.9,
                     nesterov=True)

model.compile(
    optimizer=sgd,
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])

# Batch size of 128, 256 & 512 tested for Modification 2
# Batch size set to 128 for Modification 3 and 4

history = model.fit(train_images_cifar,
                    train_labels_cifar,
                    batch_size=128,
                    epochs=200,
                    validation_data=(test_images_cifar, test_labels_cifar))

plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
示例#6
0
def create_model(image_size=64, weights_path='weights/bengalimodal.h5', pretrained=True):
    """
    Description
    -----------
    Creates model from https://www.kaggle.com/amanmishra4yearbtech/bengali-classification-quick-implementation


    Parameters
    ----------
    image_size: int
        Image size to use model with.
    weights_path: str
        Path to model's weights. Required if pretrained=True.
    pretrained: bool:
        Use pretrained weights or no.


    Returns
    -------
    model: tensorflow.keras.models.Model
        CNN model
    """

    inputs = Input(shape=(image_size, image_size, 1))

    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu',
                   input_shape=(image_size, image_size, 1))(inputs)
    model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=32, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = Dropout(rate=0.25)(model)

    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = Dropout(rate=0.25)(model)

    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=128, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = Dropout(rate=0.2)(model)

    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.15)(model)
    model = MaxPool2D(pool_size=(2, 2))(model)
    model = Conv2D(filters=256, kernel_size=(5, 5), padding='SAME', activation='relu')(model)
    model = BatchNormalization(momentum=0.20)(model)
    model = Dropout(rate=0.25)(model)

    model = Flatten()(model)
    model = Dense(512, activation="relu", name='dense_')(model)
    model = Dropout(rate=0.25)(model)
    dense = Dense(256, activation="relu", name='dense_1')(model)

    head_root = Dense(168, activation='softmax', name='dense_2')(dense)
    head_vowel = Dense(11, activation='softmax', name='dense_3')(dense)
    head_consonant = Dense(7, activation='softmax', name='dense_4')(dense)

    model = Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant])  # 3 outputs one for each
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    if pretrained:
        model.load_weights(weights_path)

    return model