Exemple #1
0
                              mode='auto',
                              epsilon=0.0001,
                              cooldown=0,
                              min_lr=0)

# 创建一个 LossHistory 实例
history = LossHistory()

# compile
model.compile(optimizer=adam(lr=LEARNING_RATE),
              loss='binary_crossentropy',
              metrics=['accuracy'])

# fit
model.fit_generator(train_generator,
                    steps_per_epoch=train_generator.samples // BATCH_SIZE,
                    epochs=EPOCHS,
                    validation_data=valid_generator,
                    validation_steps=valid_generator.samples // BATCH_SIZE,
                    callbacks=[check_point, early_stopping, history])

# 绘制 loss 曲线和 batch 曲线
history.loss_plot('batch',
                  os.path.join(RESULT_PATH, 'inception_v1_loss_batch.png'))
history.acc_plot('batch',
                 os.path.join(RESULT_PATH, 'inception_v1_acc_batch.png'))
history.loss_plot('epoch',
                  os.path.join(RESULT_PATH, 'inception_v1_loss_epoch.png'))
history.acc_plot('epoch',
                 os.path.join(RESULT_PATH, 'inception_v1_acc_epoch.png'))
Exemple #2
0
def mergeFinetuneModel():

    X_train = []
    X_valid = []

    filenames = [
        os.path.join(OUTPUT_PATH, 'inceptionv3-finetune-output.hdf5'),
        os.path.join(OUTPUT_PATH, 'resnet50-finetune-output.hdf5'),
        os.path.join(OUTPUT_PATH, 'xception-finetune-output.hdf5'),
        os.path.join(OUTPUT_PATH, 'vgg16-finetune-output.hdf5')
    ]

    for filename in filenames:
        with h5py.File(filename, 'r') as h:
            X_train.append(np.array(h['X_train']))
            X_valid.append(np.array(h['X_val']))
            y_train = np.array(h['y_train'])
            y_valid = np.array(h['y_val'])

    for x in X_train:
        print(x.shape)

    for x in X_valid:
        print(x.shape)

    X_train = np.concatenate(X_train, axis=1)
    X_valid = np.concatenate(X_valid, axis=1)

    # check
    print('X_train shape:', X_train.shape)
    print('X_valid shape:', X_valid.shape)
    print('y_train shape:', y_train.shape)
    print('y_valid shape:', y_valid.shape)

    X_train, y_train = shuffle(X_train, y_train)
    y_train = to_categorical(y_train)
    X_valid, y_valid = shuffle(X_valid, y_valid)
    y_valid = to_categorical(y_valid)

    print('X_train shape:', X_train.shape)
    print('X_valid shape:', X_valid.shape)
    print('y_train shape:', y_train.shape)
    print('y_valid shape:', y_valid.shape)

    inputs = Input(X_train.shape[1:])
    x = Dense(2048, activation='relu')(inputs)
    x = Dropout(DROP_RATE)(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(CLASS_NUM, activation='softmax')(inputs)
    model = Model(inputs, predictions)
    check_point = ModelCheckpoint(filepath=os.path.join(
        MODEL_PATH, 'merge-model-01.hdf5'),
                                  verbose=1,
                                  save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0.0001,
                                   patience=EARLY_STOPPING_PATIENCE,
                                   verbose=1,
                                   mode='auto')
    # 创建一个 LossHistory 实例
    history = LossHistory()

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=LEARNING_RATE),
                  metrics=['accuracy'])
    model.fit(X_train,
              y_train,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              validation_data=(X_valid, y_valid),
              callbacks=[early_stopping, check_point, history])

    # 绘制 loss 曲线和 batch 曲线
    history.loss_plot('batch',
                      os.path.join(RESULT_PATH, 'merge_all_loss_batch.png'))
    history.acc_plot('batch',
                     os.path.join(RESULT_PATH, 'merge_all_acc_batch.png'))
    history.loss_plot('epoch',
                      os.path.join(RESULT_PATH, 'merge_all_loss_epoch.png'))
    history.acc_plot('epoch',
                     os.path.join(RESULT_PATH, 'merge_all_acc_epoch.png'))
Exemple #3
0
def create_model():

    train_gen = ImageDataGenerator()
    valid_gen = ImageDataGenerator()
    train_generator = train_gen.flow_from_directory(TRAIN_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    shuffle=True,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')
    valid_generator = valid_gen.flow_from_directory(VALID_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')

    inputs = Input((*IMAGE_SIZE, 1))
    x_input = Lambda(my_preprocess)(inputs)

    # block1
    x = Conv2D(64, (3, 3),
               input_shape=(*IMAGE_SIZE, 1),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block1_conv1')(x_input)
    x = Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block1_conv2')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block1_pool')(x)

    # block2
    x = Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block2_conv1')(x)
    x = Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block2_conv2')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)

    # block3
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv1')(x)
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv2')(x)
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)

    # side1
    x_side1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv1')(x)
    x_side1 = BatchNormalization(name='side1_bn1')(x_side1)
    x_side1 = Activation('relu', name='side1_act1')(x_side1)
    x_side1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv2')(x_side1)
    x_side1 = BatchNormalization(name='side1_bn2')(x_side1)
    x_side1 = Activation('relu', name='side1_act2')(x_side1)
    x_side1 = MaxPooling2D((2, 2),
                           strides=(2, 2),
                           padding='same',
                           name='side1_pool')(x_side1)
    x_side1 = SeparableConv2D(728, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv3')(x_side1)
    x_side1 = BatchNormalization(name='side1_bn3')(x_side1)
    x_side1 = Activation('relu', name='side1_act3')(x_side1)
    x_side1 = SeparableConv2D(728, (3, 3),
                              padding='same',
                              activation='relu',
                              name='side1_sepconv4')(x_side1)
    x_side1 = GlobalAveragePooling2D(name='side1_gap')(x_side1)

    # side2
    x_side2_1_1 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_1_conv1')(x)
    x_side2_1_2 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_2_conv1')(x)
    x_side2_1_2 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_2_conv2')(x_side2_1_2)
    x_side2_1_3 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_3_conv1')(x)
    x_side2_1_3 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_3_conv2')(x_side2_1_3)
    x_side2_1 = keras.layers.concatenate(
        [x_side2_1_1, x_side2_1_2, x_side2_1_3])

    x_side2_2_1 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_1_conv1')(x_side2_1)
    x_side2_2_2 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_2_conv1')(x_side2_1)
    x_side2_2_2 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_2_conv2')(x_side2_2_2)
    x_side2_2_3 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_3_conv1')(x_side2_1)
    x_side2_2_3 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_3_conv2')(x_side2_2_3)

    x_side2_2 = keras.layers.concatenate(
        [x_side2_2_1, x_side2_2_2, x_side2_2_3])
    x_side2 = GlobalAveragePooling2D(name='side2_gap')(x_side2_2)

    # block4
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv1')(x)
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv2')(x)
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv3')(x)

    x = GlobalAveragePooling2D(name='gap')(x)

    x = keras.layers.concatenate([x, x_side1, x_side2])

    x = Dropout(DROP_RATE, name='dropout1')(x)
    predictions = Dense(CLASS_NUM, activation='softmax', name='dense1')(x)
    model = Model(inputs=inputs, outputs=predictions)
    model.summary()
    plot_model(model,
               to_file=os.path.join(RESULT_PATH, 'my_model.png'),
               show_shapes=True)

    check_point = ModelCheckpoint(monitor='val_loss',
                                  filepath=os.path.join(
                                      MODEL_PATH, MODEL_NAME),
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  mode='auto')

    # early stopping
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=EARLY_STOPPING_PATIENCE,
                                   verbose=0,
                                   mode='auto')

    # reduce lr
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=REDUCE_LR_PATIENCE,
                                  verbose=0,
                                  mode='auto',
                                  epsilon=0.0001,
                                  cooldown=0,
                                  min_lr=0)

    # 创建一个 LossHistory 实例
    history = LossHistory()

    # compile
    model.compile(optimizer=adam(lr=LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # fit
    model.fit_generator(train_generator,
                        steps_per_epoch=train_generator.samples // BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_data=valid_generator,
                        validation_steps=valid_generator.samples // BATCH_SIZE,
                        callbacks=[check_point, early_stopping, history])

    # 绘制 loss 曲线和 batch 曲线
    history.loss_plot('batch', os.path.join(RESULT_PATH, 'my_loss_batch.png'))
    history.acc_plot('batch', os.path.join(RESULT_PATH, 'my_batch.png'))
    history.loss_plot('epoch', os.path.join(RESULT_PATH, 'my_loss_epoch.png'))
    history.acc_plot('epoch', os.path.join(RESULT_PATH, 'my_acc_epoch.png'))
    K.clear_session()
Exemple #4
0
def finetuneModel():
    train_gen = ImageDataGenerator()
    valid_gen = ImageDataGenerator()
    train_generator = train_gen.flow_from_directory(TRAIN_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    shuffle=True,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')
    valid_generator = valid_gen.flow_from_directory(VALID_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')
    inputs = Input((*IMAGE_SIZE, 1))
    x = Lambda(my_preprocess)(inputs)
    base_model = InceptionV3(input_tensor=x, weights=None, include_top=False)
    x = GlobalAveragePooling2D(name='my_global_average_pooling_layer_1')(
        base_model.output)
    x = Dropout(DROP_RATE, name='my_dropout_layer_1')(x)
    predictions = Dense(CLASS_NUM,
                        activation='softmax',
                        name='my_dense_layer_1')(x)
    model = Model(base_model.input, predictions)
    plot_model(model,
               to_file=os.path.join(RESULT_PATH, 'inceptionv3.png'),
               show_shapes=True)

    # set trainable layer
    for layer in model.layers[:INCEPTIONV3_NO_TRAINABLE_LAYERS]:
        layer.trainable = False
    for layer in model.layers[INCEPTIONV3_NO_TRAINABLE_LAYERS:]:
        layer.trainable = True
    model.summary()

    # check
    for i, layer in enumerate(model.layers):
        print('{}: {}, {}'.format(i, layer.name, layer.trainable))
    print('=' * 100)
    layers = zip(range(len(model.layers)), [x.name for x in model.layers])
    for layer_num, layer_name in layers:
        print('{}: {}'.format(layer_num + 1, layer_name))

    # check point
    check_point = ModelCheckpoint(monitor='val_loss',
                                  filepath=os.path.join(
                                      MODEL_PATH, MODEL_NAME),
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  mode='auto')

    # early stoppiing
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=EARLY_STOPPING_PATIENCE,
                                   verbose=0,
                                   mode='auto')

    # 创建一个 LossHistory 实例
    history = LossHistory()

    # compile
    model.compile(optimizer=adam(lr=LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # fit
    model.fit_generator(train_generator,
                        steps_per_epoch=train_generator.samples // BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_data=valid_generator,
                        validation_steps=valid_generator.samples // BATCH_SIZE,
                        callbacks=[check_point, early_stopping, history])

    # 绘制 loss 曲线和 batch 曲线
    history.loss_plot('batch',
                      os.path.join(RESULT_PATH, 'inceptionv3_loss_batch.png'))
    history.acc_plot('batch',
                     os.path.join(RESULT_PATH, 'inceptionv3_acc_batch.png'))
    history.loss_plot('epoch',
                      os.path.join(RESULT_PATH, 'inceptionv3_loss_epoch.png'))
    history.acc_plot('epoch',
                     os.path.join(RESULT_PATH, 'inceptionv3_acc_epoch.png'))