def Network_config(class_num=4,
                   epoch=200,
                   initial_epoch=0,
                   batch_size=32,
                   train_data=None,
                   train_label=None,
                   test_data=None,
                   test_label=None,
                   fold=0):
    adam = Adam(lr=0.005,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=1e-08,
                decay=0.0009)
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)

    input_tensor = Input(shape=(224, 224, 3))

    #backbone
    base_model = VGG16(input_tensor=input_tensor,
                       weights='imagenet',
                       include_top=False)
    base_output = base_model.output

    #self-attention
    x = non_local.non_local_block(base_output,
                                  intermediate_dim=None,
                                  compression=2,
                                  mode='embedded',
                                  add_residual=False)
    x = BatchNormalization()(x)

    #channel-attention
    y = channel_attention.squeeze_excitation_layer(base_output,
                                                   512,
                                                   ratio=4,
                                                   concate=False)
    y = BatchNormalization()(y)

    #concat
    x = concatenate([base_output, x], axis=3)
    x = concatenate([x, y], axis=3)

    # spp
    gap = GlobalAveragePooling2D()(x)
    x = Flatten()(x)
    x = concatenate([gap, x])
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    predict = Dense(class_num, activation='softmax')(x)
    model = Model(inputs=input_tensor, outputs=predict)

    for layer in (base_model.layers):
        layer.trainable = False

    for l in model.layers:
        print(l.name)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[keras.metrics.categorical_accuracy])
    model.summary()

    tools.create_directory('./final/')
    weights_file = './final/' + str(
        fold
    ) + '-weights.{epoch:02d}-{categorical_accuracy:.4f}-{val_loss:.4f}-{val_categorical_accuracy:.4f}.h5'
    csv_file = './final/record.csv'
    lr_reducer = ReduceLROnPlateau(monitor='categorical_accuracy',
                                   factor=0.2,
                                   cooldown=0,
                                   patience=2,
                                   min_lr=0.5e-6)
    early_stopper = EarlyStopping(monitor='val_categorical_accuracy',
                                  min_delta=1e-4,
                                  patience=30)

    model_checkpoint = ModelCheckpoint(weights_file,
                                       monitor='val_categorical_accuracy',
                                       save_best_only=True,
                                       verbose=1,
                                       save_weights_only=True,
                                       mode='max')
    tensorboard = TensorBoard(log_dir='./logs/',
                              histogram_freq=0,
                              batch_size=8,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    CSV_record = CSVLogger(csv_file, separator=',', append=True)

    callbacks = [
        lr_reducer, early_stopper, model_checkpoint, tensorboard, CSV_record
    ]
    gc.disable()
    model.fit_generator(
        generator=tools.batch_generator(np.array(train_data),
                                        np.array(train_label), batch_size,
                                        True, class_num, True),
        steps_per_epoch=int(len(train_label) / batch_size) - 1,
        max_q_size=20,
        initial_epoch=initial_epoch,
        epochs=epoch,
        verbose=1,
        callbacks=callbacks,
        validation_data=tools.batch_generator(np.array(test_data),
                                              np.array(test_label), batch_size,
                                              True, class_num, False),
        validation_steps=int(len(test_label) / batch_size) - 1,
        class_weight='auto')

    #confusion matrix
    all_y_pred = []
    all_y_true = []
    for test_data_batch, test_label_batch in tools.batch_generator_confusion_matrix(
            np.array(test_data), np.array(test_label), batch_size, True,
            class_num):
        y_pred = model.predict(test_data_batch, batch_size)
        y_true = test_label_batch
        for y_p in y_pred:
            all_y_pred.append(np.where(y_p == max(y_p))[0][0])
        for y_t in y_true:
            all_y_true.append(np.where(y_t == max(y_t))[0][0])
    confusion = confusion_matrix(y_true=all_y_true, y_pred=all_y_pred)
    print(confusion)
    f = open('confusion_matrix.txt', 'a+')
    f.write(str(all_y_true) + "\n")
    f.write(str(all_y_pred) + "\n")
    f.write(str(confusion) + '\n')
    f.close()
    gc.enable()
Beispiel #2
0
def Network_config(class_num=4,
                   epoch=200,
                   initial_epoch=0,
                   batch_size=32,
                   train_data=None,
                   train_label=None,
                   test_data=None,
                   test_label=None,
                   fold=0):
    adam = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.000)
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
    K.set_learning_phase(1)
    base_model = VGG16(input_tensor=Input(shape=(224, 224, 3)),
                       weights='imagenet',
                       include_top=False)

    x = base_model.output
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    predictions = Dense(class_num, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in (base_model.layers):
        layer.trainable = False
        if layer.name.startswith('bn'):
            layer.call(layer.input, training=False)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[keras.metrics.categorical_accuracy])

    tools.create_directory('./tmpvgg/')
    weights_file = './tmpvgg/' + str(
        fold
    ) + '-weights.{epoch:02d}-{categorical_accuracy:.4f}-{val_loss:.4f}-{val_categorical_accuracy:.4f}.h5'
    csv_file = './tmpvgg/record.csv'
    lr_reducer = ReduceLROnPlateau(monitor='categorical_accuracy',
                                   factor=0.5,
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)
    early_stopper = EarlyStopping(monitor='val_categorical_accuracy',
                                  min_delta=1e-4,
                                  patience=50)

    model_checkpoint = ModelCheckpoint(weights_file,
                                       monitor='val_categorical_accuracy',
                                       save_best_only=True,
                                       verbose=2,
                                       save_weights_only=True,
                                       mode='max')
    tensorboard = TensorBoard(log_dir='./logs/',
                              histogram_freq=0,
                              batch_size=8,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    CSV_record = CSVLogger(csv_file, separator=',', append=True)

    callbacks = [
        lr_reducer, early_stopper, model_checkpoint, tensorboard, CSV_record
    ]
    gc.disable()
    model.fit_generator(
        generator=tools.batch_generator(np.array(train_data),
                                        np.array(train_label), batch_size,
                                        True, class_num),
        steps_per_epoch=int(len(train_label) / batch_size) - 1,
        max_q_size=50,
        initial_epoch=initial_epoch,
        epochs=epoch,
        verbose=1,
        callbacks=callbacks,
        validation_data=tools.batch_generator(np.array(test_data),
                                              np.array(test_label), batch_size,
                                              True, class_num),
        validation_steps=int(len(test_label) / batch_size) - 1,
        class_weight='auto')

    all_y_pred = []
    all_y_true = []
    for test_data_batch, test_label_batch in tools.batch_generator_confusion_matrix(
            np.array(test_data), np.array(test_label), batch_size, True,
            class_num):
        y_pred = model.predict(test_data_batch, batch_size)
        y_true = test_label_batch
        for y_p in y_pred:
            all_y_pred.append(np.where(y_p == max(y_p))[0][0])

        for y_t in y_true:
            all_y_true.append(np.where(y_t == max(y_t))[0][0])
    confusion = confusion_matrix(y_true=all_y_true, y_pred=all_y_pred)
    print(confusion)
    f = open('confusion_matrix.txt', 'a+')
    f.write(str(all_y_true) + "\n")
    f.write(str(all_y_pred) + "\n")
    f.write(str(confusion) + '\n')
    f.close()
    gc.enable()