Exemplo n.º 1
0
def scratchVGG16_Model():
    data = helper.prepDataforCNN(numChannel=3, feat_norm=True)
    trainX = data["trainX"]
    valdX = data["valdX"]
    trainY = data["trainY"]
    valdY = data["valdY"]

    _, row, col, channel = trainX.shape
    digLen = 5  # including category 0
    numDigits = 11
    epochs = 50
    batch_size = 64

    vgg16Model = VGG16(include_top=False, weights=None)
    vgg16Model.summary()
    ptInput = keras.Input(shape=(row, col, channel), name='vgg16Scratch')
    vgg16 = vgg16Model(ptInput)

    # vgg16 = Conv2D(64,(3, 3), activation ='relu', padding='same')(input)
    # vgg16 = Conv2D(64,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(128,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(128,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(256,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(256,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)

    vgg16 = Flatten()(vgg16)
    vgg16 = Dense(512, activation='relu')(vgg16)
    vgg16 = Dense(512, activation='relu')(vgg16)
    # vgg16 = Dense(1000, activation='relu')(vgg16)
    vgg16 = Dropout(0.5)(vgg16)

    numd_SM = Dense(digLen, activation='softmax', name='num')(vgg16)
    dig1_SM = Dense(numDigits, activation='softmax', name='dig1')(vgg16)
    dig2_SM = Dense(numDigits, activation='softmax', name='dig2')(vgg16)
    dig3_SM = Dense(numDigits, activation='softmax', name='dig3')(vgg16)
    dig4_SM = Dense(numDigits, activation='softmax', name='dig4')(vgg16)
    numB_SM = Dense(2, activation='softmax', name='nC')(vgg16)
    out = [numd_SM, dig1_SM, dig2_SM, dig3_SM, dig4_SM, numB_SM]

    vgg16 = keras.Model(inputs=ptInput, outputs=out)

    callback = []
    optim = optimizers.Adam(lr=0.001,
                            beta_1=0.9,
                            beta_2=0.999,
                            epsilon=None,
                            decay=0.0,
                            amsgrad=True)

    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath='saved_models/vgg16.classifier.hdf5',
        monitor='loss',
        save_best_only=True,
        verbose=2)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                  factor=0.1,
                                                  verbose=1,
                                                  patience=3,
                                                  cooldown=0,
                                                  min_lr=0.000001)
    # tb = keras.callbacks.TensorBoard(log_dir='logs', write_graph=True, write_images=True)
    es = keras.callbacks.EarlyStopping(monitor='val_loss',
                                       min_delta=0.00000001,
                                       patience=5,
                                       verbose=1,
                                       mode='auto')
    callback.append(es)
    callback.append(checkpointer)
    callback.append(reduce_lr)
    vgg16.summary()

    vgg16.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    vgg16History = vgg16.fit(x=trainX,
                             y=trainY,
                             batch_size=batch_size,
                             epochs=epochs,
                             verbose=1,
                             shuffle=True,
                             validation_data=(valdX, valdY),
                             callbacks=callback)

    print(vgg16History.history.keys())
    modName = 'vgg16_Scratch'
    print(vgg16History.history.keys())
    createSaveMetricsPlot(vgg16History, modName, data, vgg16)
Exemplo n.º 2
0
def preTrainedVGG16_Model():
    data = helper.prepDataforCNN(numChannel=3, feat_norm=True)
    trainX = data["trainX"]
    valdX = data["valdX"]
    trainY = data["trainY"]
    valdY = data["valdY"]

    _, row, col, channel = trainX.shape
    digLen = 5
    numDigits = 11
    epochs = 50
    batch_size = 64
    optim = optimizers.Adam(lr=0.001,
                            beta_1=0.9,
                            beta_2=0.999,
                            epsilon=None,
                            decay=0.0,
                            amsgrad=True)

    preTrainModel = VGG16(include_top=False, weights='imagenet')
    preTrainModel.summary()
    ptInput = keras.Input(shape=(row, col, channel), name='inputVGGPreTrain')
    pt_vgg16 = preTrainModel(ptInput)

    Mout = Flatten(name='flatten')(pt_vgg16)
    Mout = Dense(1024, activation='relu', name='FC1_4096')(Mout)
    Mout = Dense(1024, activation='relu', name='FC1_512')(Mout)
    # Mout = Dense(512,  activation='relu', name = 'FC2_1024')(Mout)
    # Mout = Dropout(0.5)(Mout)

    numd_SM = Dense(digLen, activation='softmax', name='num')(Mout)
    dig1_SM = Dense(numDigits, activation='softmax', name='dig1')(Mout)
    dig2_SM = Dense(numDigits, activation='softmax', name='dig2')(Mout)
    dig3_SM = Dense(numDigits, activation='softmax', name='dig3')(Mout)
    dig4_SM = Dense(numDigits, activation='softmax', name='dig4')(Mout)
    numB_SM = Dense(2, activation='softmax', name='nC')(Mout)
    out = [numd_SM, dig1_SM, dig2_SM, dig3_SM, dig4_SM, numB_SM]  #numd_SM

    vggPreTrain = keras.Model(inputs=ptInput, outputs=out)

    vggPreTrain.compile(
        loss='sparse_categorical_crossentropy',  #ceLoss ,
        optimizer=optim,
        metrics=['accuracy'])  #[])
    vggPreTrain.summary()

    callback = []
    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath='saved_models/VGGPreTrained.classifier.hdf5',
        monitor='loss',
        save_best_only=True,
        verbose=2)

    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                  factor=0.1,
                                                  verbose=1,
                                                  patience=4,
                                                  cooldown=1,
                                                  min_lr=0.0001)
    es = keras.callbacks.EarlyStopping(monitor='loss',
                                       min_delta=0.000001,
                                       patience=5,
                                       verbose=1,
                                       mode='auto')
    callback.append(es)
    callback.append(checkpointer)
    callback.append(reduce_lr)

    vggHistory = vggPreTrain.fit(x=trainX,
                                 y=trainY,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 verbose=1,
                                 shuffle=True,
                                 validation_data=(valdX, valdY),
                                 callbacks=callback)

    print(vggHistory.history.keys())
    modName = 'vgg16_PreTrain'
    # list all data in history
    print(vggHistory.history.keys())
    createSaveMetricsPlot(vggHistory, modName, data, vggPreTrain)
Exemplo n.º 3
0
def designedCNN_Model():
    data = helper.prepDataforCNN(numChannel=3, feat_norm=True)
    trainX = data["trainX"]
    valdX = data["valdX"]
    trainY = data["trainY"]
    valdY = data["valdY"]

    _, row, col, channel = trainX.shape
    digLen = 5  # including category 0
    numDigits = 11
    epochs = 75
    batch_size = 64

    optim = optimizers.Adam(lr=0.001,
                            beta_1=0.9,
                            beta_2=0.999,
                            epsilon=None,
                            decay=0.0,
                            amsgrad=True)
    # optim = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    tf.Session(config=config)

    input = keras.Input(shape=(row, col, channel), name='customModel')
    M = Conv2D(16, (3, 3), activation='relu', padding='same',
               name='conv_16_1')(input)
    M = Conv2D(16, (3, 3), activation='relu', padding='same',
               name='conv_16_2')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2))(M)

    M = Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_32_01')(M)
    M = Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_32_02')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2))(M)
    M = Dropout(0.5)(M)

    M = Conv2D(48, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_48_01')(M)
    M = Conv2D(48, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_48_02')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2))(M)

    M = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_64_1')(M)
    M = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_64_2')(M)
    M = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='conv2_64_3')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D((2, 2), strides=1)(M)

    M = Conv2D(128,
               kernel_size=(5, 5),
               activation='relu',
               padding='same',
               name='conv2_128_1')(M)
    M = Conv2D(128,
               kernel_size=(5, 5),
               activation='relu',
               padding='same',
               name='conv2_128_2')(M)
    M = Conv2D(128,
               kernel_size=(5, 5),
               activation='relu',
               padding='same',
               name='conv2_128_3')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2), strides=1)(M)

    M = Conv2D(256,
               kernel_size=(5, 5),
               activation='relu',
               padding='same',
               name='conv2_128_5')(M)
    M = Conv2D(256,
               kernel_size=(5, 5),
               activation='relu',
               padding='same',
               name='conv2_128_6')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2), strides=1)(M)
    M = Dropout(0.5)(M)

    M = Conv2D(256, (5, 5),
               activation='relu',
               padding='same',
               name='conv256_1')(M)
    M = Conv2D(256, (5, 5),
               activation='relu',
               padding='same',
               name='conv256_2')(M)
    M = Conv2D(256, (5, 5),
               activation='relu',
               padding='same',
               name='conv256_3')(M)
    # kernel_regularizer=regularizers.l2(0.01),
    # activity_regularizer=regularizers.l1(0.01))(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D((2, 2), strides=1)(M)

    M = Conv2D(512, (5, 5),
               activation='relu',
               padding='same',
               name='conv2_512_1')(M)
    M = Conv2D(512, (5, 5),
               activation='relu',
               padding='same',
               name='conv2_512_2')(M)
    M = BatchNormalization(axis=-1)(M)
    M = MaxPooling2D(pool_size=(2, 2), strides=1)(M)
    M = Dropout(0.25)(M)
    # M = keras.layers.BatchNormalization(axis=-1)(M)

    Mout = Flatten()(M)
    Mout = Dense(2048, activation='relu', name='FC1_2048')(Mout)
    Mout = Dense(1024, activation='relu', name='FC1_1024')(Mout)
    Mout = Dense(1024, activation='relu', name='FC2_1024')(Mout)
    # Mout = Dropout(0.5)(Mout)

    numd_SM = Dense(digLen, activation='softmax', name='num')(Mout)
    dig1_SM = Dense(numDigits, activation='softmax', name='dig1')(Mout)
    dig2_SM = Dense(numDigits, activation='softmax', name='dig2')(Mout)
    dig3_SM = Dense(numDigits, activation='softmax', name='dig3')(Mout)
    dig4_SM = Dense(numDigits, activation='softmax', name='dig4')(Mout)
    numB_SM = Dense(2, activation='softmax', name='nC')(Mout)
    out = [numd_SM, dig1_SM, dig2_SM, dig3_SM, dig4_SM, numB_SM]

    svhnModel = keras.Model(inputs=input, outputs=out)

    lr_metric = get_lr_metric(optim)
    svhnModel.compile(
        loss='sparse_categorical_crossentropy',  #ceLoss ,
        optimizer=optim,
        metrics=['accuracy'])  #[])
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.1,
                                                  verbose=1,
                                                  patience=2,
                                                  cooldown=1,
                                                  min_lr=0.00001)
    svhnModel.summary()

    callback = []
    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath='saved_models/designedBGRClassifier.hdf5',
        monitor='loss',
        save_best_only=True,
        verbose=2)
    tb = keras.callbacks.TensorBoard(log_dir='logs',
                                     write_graph=True,
                                     batch_size=batch_size,
                                     write_images=True)
    es = keras.callbacks.EarlyStopping(
        monitor='loss',  #'dig1_loss',
        min_delta=0.000001,
        patience=5,
        verbose=1,
        mode='auto')
    callback.append(tb)
    callback.append(es)
    callback.append(checkpointer)
    callback.append(reduce_lr)

    # svhnModel.fit_generator(
    #                   datagen.flow(ctrain, ctrlab, batch_size=batch_size),
    #                   batch_size = batch_size,
    #                   epochs=epochs,
    #                   verbose=1,
    #                   shuffle = True,
    #                   validation_data=(cvald, cvlab),
    #                   callbacks= callback)

    # fits the model on batches with real-time data augmentation:
    # svhnModel.fit_generator(datagen.flow(ctrain, ctrlab, batch_size=batch_size),
    #                         steps_per_epoch=len(ctrain) / batch_size,
    #                         epochs=epochs,
    #                         verbose=1,
    #                         validation_data = (cvald, cvlab),
    #                         callbacks= callback)
    #
    designHist = svhnModel.fit(x=trainX,
                               y=trainY,
                               batch_size=batch_size,
                               epochs=epochs,
                               verbose=1,
                               shuffle=True,
                               validation_data=(valdX, valdY),
                               callbacks=callback)

    print(designHist.history.keys())
    modName = 'customDesign'
    print(designHist.history.keys())
    createSaveMetricsPlot(designHist, modName, data, svhnModel)