Ejemplo n.º 1
0
def get_cnn(input_data, num_labels):
    model = Sequential()
    model.add(Conv2D(16, kernel_size=2, activation='relu', input_shape=input_data.shape))
    model.add(Conv2D(64, kernel_size=3, activation='relu'))
    model.add(Conv2D(128, kernel_size=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=LEARNING_RATE)
    model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Ejemplo n.º 2
0
def get_cnn_adv(input_data, num_labels):
    model = Sequential()
    div = 4
    model.add(Conv2D(64, kernel_size=(input_data.shape[0] // div, 1), activation='relu', input_shape=input_data.shape))
    model.add(Conv2D(128, kernel_size=(div, 8), activation='relu'))
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=KeyConstants.ELR)
    model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy'])
    return model
Ejemplo n.º 3
0
def get_stacked_cnn_lstm(input_data, num_labels):
    model = Sequential()
    model.add(TimeDistributed(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_data.shape)))
    model.add(TimeDistributed(Conv2D(64, kernel_size=5, activation='relu')))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(units=2048))
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=LEARNING_RATE)

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
Ejemplo n.º 4
0
def get_pen_cnn(input_data, num_labels):
    model = Sequential()
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_data.shape))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_labels, activation='softmax'))

    opt = Adam(learning_rate=.0003)

    model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy'])
    return model
def build_network():
    network = models.Sequential()
    network.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(Flatten())
    network.add(Dense(64, activation='relu'))
    # network.add(Dense(32, activation='relu'))
    network.add(Dense(10, activation='softmax'))

    network.compile(optimizer='adam',
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])
    return network
Ejemplo n.º 6
0
def create_model():
    model = Sequential()

    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    training_data_generator = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True)

    training_generator = training_data_generator.flow_from_directory(
        data_dir,
        target_size=(300, 300),
        batch_size=5,
        class_mode="categorical")

    model.fit_generator(training_generator,	epochs=3)

    return model
Ejemplo n.º 7
0
    def _make_layers(self):
        # Create the model
        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=(48, 48, 1)))
        model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(7, activation='softmax'))
        return model
Ejemplo n.º 8
0
    datagen = ImageDataGenerator(rescale=1. / 255)
    img_width = 120
    img_height = 120
    batch_size = 10
    train_generator = datagen.flow_from_directory(directory=id_path,
                                                  classes=classes,
                                                  target_size=(img_width,
                                                               img_height),
                                                  batch_size=batch_size,
                                                  color_mode="grayscale",
                                                  class_mode="sparse")
    model = Sequential()
    model.add(InputLayer((120, 120, 1)))
    model.add(
        Conv2D(32, (3, 3),
               padding="same",
               activation="relu",
               kernel_initializer="he_uniform"))
    model.add(Conv2D(64, 3, padding="same", activation="relu"))
    model.add(Conv2D(128, 3, padding="same", activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPool2D(2, 2))
    model.add(Dropout(0.2))

    model.add(
        Conv2D(256,
               3,
               padding="same",
               activation="relu",
               kernel_initializer="he_uniform"))
    model.add(Conv2D(256, 3, padding="same", activation="relu"))
    model.add(BatchNormalization())
Ejemplo n.º 9
0
model = Sequential()
model.add(Input(shape=input_shape))

if use_conv == 1:
    x_train = x_train.reshape(len(x_train), img_rows * img_cols, 1)
    x_test = x_test.reshape(len(x_test), img_rows * img_cols, 1)

for k in range(3):
    if filters[k] > 0:
        if rate_conv[k] > 0:
            model.add(Dropout(rate_conv[k]))
        if use_conv == 1:
            model.add(Conv1D(filters[k], kernel_size=4, activation='relu'))
            model.add(MaxPooling1D(pool_size=3, strides=1, padding='same'))
        else:
            model.add(Conv2D(filters[k], kernel_size=(4, 4), activation='relu'))
            model.add(MaxPooling2D(pool_size=(3, 3), strides=1, padding='same'))
        if bn_conv[k]:
            model.add(BatchNormalization())

model.add(Flatten())

for k in range(3):
    if units[k] > 0:
        if rate[k] > 0:
            model.add(Dropout(rate[k]))
        model.add(Dense(units[k], activation='relu'))
        if bn[k]:
            model.add(BatchNormalization())

#model.add(Dropout(0.2))
    (len(x_train), 64, 64, 1)) / 255.0
x_test = np.array(x_test, dtype='float').reshape(
    (len(x_test), 64, 64, 1)) / 255.0
y_train = keras.utils.to_categorical(y_train, num_classes=6, dtype='uint8')
y_test = keras.utils.to_categorical(y_test, num_classes=6, dtype='uint8')

model = Sequential()
# model.add(Flatten(input_shape=(64, 64, 1)))
# model.add(Dense(2048, input_shape=(4096, ) ,activation='relu'))
# model.add(Dense(512 ,activation='relu'))
# model.add(Dense(6 ,activation='softmax'))

model.add(
    Conv2D(4,
           kernel_size=(5, 5),
           strides=(1, 1),
           padding='same',
           activation='relu',
           input_shape=(64, 64, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(6, activation='softmax'))

model.summary()
model.compile(optimizer='Adam', loss='mse', metrics=['accuracy'])

datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1)

datagen.fit(x_train)

history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=128),
Ejemplo n.º 11
0
    def create(self):

        inputShape = self.inputShape
        outputShape = self.outputShape

        baseModel = MobileNetV2(
            include_top=False,  # weights=None,
            alpha=1.0,
            input_shape=(inputShape[0], inputShape[1], 3))

        l1 = 0  #.001
        l2 = 0  #.001#.0001

        baseNeurons = 128
        dropout = 0.25

        out28 = baseModel.get_layer('block_6_expand_BN').output

        out = Conv2D(baseNeurons, (1, 1),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out28)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons * 2, (3, 3),
                     strides=(2, 2),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons, (1, 1),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons * 2, (3, 3),
                     strides=(2, 2),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out72 = Activation('relu')(out)

        out14 = baseModel.get_layer('block_13_expand_BN').output

        out = Conv2D(baseNeurons, (1, 1),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out14)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Conv2D(baseNeurons * 2, (3, 3),
                     strides=(2, 2),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out7 = Activation('relu')(out)

        out = baseModel.get_layer('block_16_project_BN').output
        #         out = baseModel.get_layer('out_relu').output

        out = Concatenate(axis=3)([out, out7, out72])

        out = Conv2D(baseNeurons * 1, (1, 1),
                     padding='valid',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons * 2, (3, 3),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons * 1, (1, 1),
                     padding='valid',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Dropout(dropout)(out)

        out = Conv2D(baseNeurons * 2, (3, 3),
                     padding='same',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)
        out = BatchNormalization()(out)
        out = Activation('relu')(out)

        out = Conv2D(5, (1, 1),
                     padding='valid',
                     activity_regularizer=regularizers.l1_l2(l1, l2))(out)

        model = Model(inputs=baseModel.input, outputs=out)

        for layer in baseModel.layers:
            layer.trainable = False
#
        model.summary()
        #         baseModel.summary()
        return model