Пример #1
0
def transfer_model(model_name, input_shape, classes_nr):
    new_input = Input(shape=(input_shape[0], input_shape[1], 3))

    if model_name == "vgg16":
        model = VGG16(include_top=False, input_tensor=new_input)
    if model_name == "densenet121":
        model = DenseNet121(include_top=False, input_tensor=new_input)
    if model_name == "inceptionv3":
        model = InceptionV3(include_top=False, input_tensor=new_input)
    if model_name == "mobilenet":
        model = MobileNet(include_top=False, input_tensor=new_input)
    if model_name == "resnet101":
        model = ResNet101(include_top=False, input_tensor=new_input)
    if model_name == "xception":
        model = Xception(include_top=False, input_tensor=new_input)

    for layer in model.layers:
        layer.trainable = False
    flat1 = layers.Flatten()(model.layers[-1].output)
    class1 = layers.Dense(1024, activation='relu')(flat1)
    drop1 = layers.Dropout(0.2)(class1)
    class2 = layers.Dense(256, activation='relu')(drop1)
    output = layers.Dense(classes_nr, activation='softmax')(class2)
    model = Model(inputs=model.inputs, outputs=output)
    return model
def init_model(model_name):
    if (model_name == "VGG19"):  # Initialisierung des VGG19
        return VGG19(include_top=True, weights='imagenet')
    if (model_name == "VGG16"):
        return tf.keras.applications.VGG16(include_top=True,
                                           weights='imagenet')
    if (model_name == "ResNet50"):
        return ResNet50(include_top=True, weights="imagenet")
    if (model_name == "DenseNet201"):
        return DenseNet201(include_top=True, weights="imagenet")
    if (model_name == "DenseNet121"):
        return DenseNet121(include_top=True, weights="imagenet")
    if (model_name == "InceptionResNetV2"):
        return InceptionResNetV2(include_top=True, weights="imagenet")
Пример #3
0
def get_densenet_weights(save_dir: Path = Path('densenet_121')) -> Path:
    """Download pre-trained imagenet weights for densenet model.

    Args:
        save_dir: Path to where checkpoint must be downloaded.

    Returns: Path to checkpoint file.

    """
    g = tf.Graph()
    with tf.Session(graph=g) as sess:
        keras_backend.set_session(sess)
        save_dir.mkdir(parents=True, exist_ok=True)
        _ = DenseNet121(weights='imagenet')
        saver = tf.train.Saver()
        return saver.save(sess, Path(save_dir, "densenet_model.ckpt").as_posix())
Пример #4
0
    def __init__(self, model, info):
        self.info = info

        possible_model = {
            "Small": self.getSmallModel(),
            "MobileNetV2": MobileNetV2(),
            "Resnet50": ResNet50(weights=None, classes=self.info.features['label'].num_classes),
            "Resnet200": ResNet152V2(weights=None, include_top=False),
            "VGG": VGG16(weights=None, include_top=False, classes=self.info.features['label'].num_classes),
            "DenseNet": DenseNet121(),
            "Cifarnet": self.cifarnet(),
            "Inception": InceptionV3(),
            "CNN": self.getCNN()
        }

        assert model in possible_model.keys(), "Selected model not available"
        self.model = possible_model[model]
Пример #5
0
    def create_model(self):
        """
        https://github.com/tensorflow/tensorflow/issues/14356

        """

        input_shape = (self.config.tfr_image_height,
                       self.config.tfr_image_width,
                       self.config.tfr_image_channels)

        ## VGG16
        if self.config.model_name == 'vgg16':
            base_model = VGG16(weights='imagenet',
                               include_top=False,
                               input_tensor=self.features,
                               input_shape=input_shape)

        ## Xception
        elif self.config.model_name == 'xception':
            base_model = Xception(weights='imagenet',
                                  include_top=False,
                                  input_tensor=self.features,
                                  input_shape=input_shape)

        ## Resnet50
        elif self.config.model_name == 'resnet50':
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_tensor=self.features,
                                  input_shape=input_shape)
            logits = self.model_top_resnet50(base_model)

        ## InceptionResNetV2
        elif self.config.model_name == 'inception_resnet_v2':
            base_model = InceptionResNetV2(weights='imagenet',
                                           include_top=False,
                                           input_tensor=self.features,
                                           input_shape=input_shape)

        ## Densenet121
        elif self.config.model_name == 'densenet121':
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_tensor=self.features,
                                     input_shape=input_shape)
            logits = self.model_top_densenet121(base_model)

        ## Densenet169
        elif self.config.model_name == 'densenet169':
            base_model = DenseNet169(weights='imagenet',
                                     include_top=False,
                                     input_tensor=self.features,
                                     input_shape=input_shape)
            logits = self.model_top_densenet121(base_model)

        ## Densenet201
        elif self.config.model_name == 'densenet201':
            base_model = DenseNet201(weights='imagenet',
                                     include_top=False,
                                     input_tensor=self.features,
                                     input_shape=input_shape)
            logits = self.model_top_densenet121(base_model)

        else:
            logging.error('Unknown model_name {}'.format(model_name))
            exit(1)

        return logits
def main(args):

    dataset = args.dataset
    data_type = args.data_type

    # data dir, image size and batch size
    DATA_DIR = 'data'
    TRAIN_DIR = os.path.join(DATA_DIR, 'train')
    VALID_DIR = os.path.join(DATA_DIR, 'valid')
    SIZE = (224, 224)
    BATCH_SIZE = 4

    # remove files
    try:
        shutil.rmtree(DATA_DIR)
    except:
        pass

    train_ratio = 0.8  # 80% data for training, the rest for testing

    # get the list of filenames and corresponding list of labels for training et validation
    train_filenames = []
    train_labels = []
    val_filenames = []
    val_labels = []

    # read files into label and frame lists
    with open('../data/labels/' + dataset + '_' + data_type +
              '_label.csv') as f:
        frames_labels = [(line.strip().split(',')[0],
                          line.strip().split(',')[1]) for line in f]

    # re-organize data by labels
    file_dir = '../data/' + dataset + '/' + data_type + '/jpg/'
    file_format = '.jpeg'
    dict_frame = {
    }  # key: label value, value: a list of indice in the original file
    for fr_lb in frames_labels:
        fr, lb = fr_lb
        if (lb not in dict_frame):
            dict_frame[lb] = []
        dict_frame[lb].append(file_dir + fr + file_format)

    random.seed()  # using current time as the seed
    # generate filenames and labels for training and validation dataset
    for lb in dict_frame:
        # pick random indices for training data for lb in dict_frame
        train_index = random.sample(range(0, len(dict_frame[lb])),
                                    int(train_ratio * len(dict_frame[lb])))
        for index in range(len(dict_frame[lb])):
            # training data
            if (index in train_index):
                train_filenames.append(dict_frame[lb][index])
                train_labels.append(int(lb) - 1)
            # validation data
            else:
                val_filenames.append(dict_frame[lb][index])
                val_labels.append(int(lb) - 1)

    assert set(train_labels) == set(
        val_labels), "Train and val labels don't correspond:\n{}\n{}".format(
            set(train_labels), set(val_labels))

    # create new dir data/train/label_x and data/valid/label_x
    for label in set(train_labels):
        os.makedirs(os.path.join(TRAIN_DIR, str(label)), exist_ok=True)
        os.makedirs(os.path.join(VALID_DIR, str(label)), exist_ok=True)

    # copy files
    for tr_file, label in zip(train_filenames, train_labels):
        shutil.copy2(tr_file, os.path.join(TRAIN_DIR, str(label)))
    for val_file, label in zip(val_filenames, val_labels):
        shutil.copy2(val_file, os.path.join(VALID_DIR, str(label)))

    # train models
    num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
    num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])

    num_train_steps = math.floor(num_train_samples / BATCH_SIZE)
    num_valid_steps = math.floor(num_valid_samples / BATCH_SIZE)

    gen = image.ImageDataGenerator()
    val_gen = image.ImageDataGenerator(horizontal_flip=True,
                                       vertical_flip=True)

    batches = gen.flow_from_directory(TRAIN_DIR,
                                      target_size=SIZE,
                                      class_mode='categorical',
                                      shuffle=True,
                                      batch_size=BATCH_SIZE)
    val_batches = val_gen.flow_from_directory(VALID_DIR,
                                              target_size=SIZE,
                                              class_mode='categorical',
                                              shuffle=True,
                                              batch_size=BATCH_SIZE)

    model = DenseNet121()

    classes = list(iter(batches.class_indices))
    model.layers.pop()
    for layer in model.layers:
        layer.trainable = False
    last = model.layers[-1].output
    x = Dense(len(classes), activation="softmax")(last)
    finetuned_model = Model(model.input, x)
    finetuned_model.compile(optimizer=Adam(lr=0.0001),
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
    for c in batches.class_indices:
        classes[batches.class_indices[c]] = c
    finetuned_model.classes = classes

    early_stopping = EarlyStopping(patience=450)
    checkpointer = ModelCheckpoint('./densenet_model/densenet_121_best.h5',
                                   verbose=1,
                                   save_best_only=True)

    finetuned_model.fit_generator(batches,
                                  steps_per_epoch=num_train_steps,
                                  epochs=450,
                                  callbacks=[early_stopping, checkpointer],
                                  validation_data=val_batches,
                                  validation_steps=num_valid_steps)
    finetuned_model.save('./densenet_model/densenet_121_final.h5')
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.densenet import DenseNet121
from tensorflow.python.keras.applications.densenet import DenseNet201
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from  tensorflow.python.keras.models import Model

# Für jedes Modell wird die Summary ausgegeben 

print("=== VGG 16 ===")
VGG16().summary()

print("=== VGG 19 ===")
VGG19().summary()

print("=== ResNet50 ===")
ResNet50().summary()

print("=== DenseNet121 ===")
DenseNet121().summary()

print("=== DenseNet201 ===")
DenseNet201().summary()

print("=== InceptionResNetV2 ===")
InceptionResNetV2().summary()
import os
import pandas as pd
from tensorflow.python.keras.applications.densenet import DenseNet121, preprocess_input
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model

model_weights_file = './traincpkmodel/1/weights.h5'  # 模型权重文件

image_dimension = 224  # 图片大小

class_names = 'Atelectasis,Cardiomegaly,Consolidation,Edema,Effusion,\
Emphysema,Fibrosis,Hernia,Infiltration,Mass,Nodule,Pleural_Thickening,\
Pneumonia,Pneumothorax'.split(",")  # 类别名称

# 创建模型
base_DenseNet121_model = DenseNet121(include_top=False, weights=None, pooling="avg")  # 创建模型
m_output = base_DenseNet121_model.output
predictions = Dense(len(class_names), activation="sigmoid", name="predictions")(m_output)  # 创建一个全连接层,预测类别
final_conv_layer = base_DenseNet121_model.get_layer("bn")  # 获取模型的最后一个卷积层
model = Model(inputs=base_DenseNet121_model.input, outputs=[predictions, final_conv_layer.output],
              name='myDenseNet121')  # 一个输入,两个输出
if os.path.exists(model_weights_file) == False:
    print("____wrong!!!___no model___:", model_weights_file)
    raise ("wrong")
# 加载模型权重
model.load_weights(model_weights_file)  # 加载模型权重
class_weights = model.layers[-1].get_weights()[0]  # 最后一个网络层的权重


def get_output_layer(model, layer_name):
    layer_dict = dict([(layer.name, layer) for layer in model.layers])
Пример #9
0
def predict_densenet121():

    # DesneNet generators

    test_batches = ImageDataGenerator(
        preprocessing_function= \
            applications.densenet.preprocess_input).flow_from_directory(
        test2_path,
        target_size=(image_size, image_size),
        batch_size=test_batch_size,
        shuffle=False)

    input_tensor = Input(shape=(224, 224, 3))

    #Loading the model
    model = DenseNet121(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)

    # add a global spatial average pooling layer
    x = model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(7, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=model.input, outputs=predictions)

    model.load_weights('modelDenseNet121.h5')

    test_batches.reset()

    test_labels = test_batches.classes

    # Make predictions
    predictions = model.predict_generator(test_batches,
                                          steps=test_steps,
                                          verbose=1)

    # Declare a function for plotting the confusion matrix
    def plot_confusion_matrix(cm,
                              classes,
                              normalize=False,
                              title='Confusion matrix',
                              cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.
        """
        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            print("Normalized confusion matrix")
        else:
            print('Confusion matrix, without normalization')

        print(cm)

        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j,
                     i,
                     format(cm[i, j], fmt),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        plt.tight_layout()
        plt.savefig('confusion_matrix_DenseNet121.png')

    cm = confusion_matrix(test_labels, predictions.argmax(axis=1))

    cm_plot_labels = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']

    plot_confusion_matrix(cm, cm_plot_labels)

    recall = np.diag(cm) / np.sum(cm, axis=1)
    precision = np.diag(cm) / np.sum(cm, axis=0)

    print("Mean recall " + str(np.mean(recall).item()))
    print("Mean precision " + str(np.mean(precision).item()))

    print("Mean recall " + str(
        recall_score(
            test_labels, predictions.argmax(axis=1), average='weighted')))
    print("Balanced Accuracy " +
          str(balanced_accuracy_score(test_labels, predictions.argmax(
              axis=1))))
    print("Mean Precision " + str(
        precision_score(
            test_labels, predictions.argmax(axis=1), average='weighted')))
    print("Mean f1 score " + str(
        f1_score(test_labels, predictions.argmax(axis=1), average='weighted')))

    file = open("DenseNet121_results_last", "w+")
    file.write("Mean recall " + str(np.mean(recall).item()) + "\n")
    file.write("Mean precision " + str(np.mean(precision).item()) + "\n")
    file.write("Mean recall " + str(
        recall_score(
            test_labels, predictions.argmax(axis=1), average='weighted')) +
               "\n")
    file.write(
        "Balanced Accuracy " +
        str(balanced_accuracy_score(test_labels, predictions.argmax(axis=1))) +
        "\n")
    file.write("Mean Precision " + str(
        precision_score(
            test_labels, predictions.argmax(axis=1), average='weighted')) +
               "\n")
    file.write("Mean f1 score " + str(
        f1_score(test_labels, predictions.argmax(
            axis=1), average='weighted')) + "\n")

    file.close()

    predicted_class_indices = np.argmax(predictions, axis=1)

    labels = train_batches.class_indices
    labels = dict((v, k) for k, v in labels.items())
    predictions = [labels[k] for k in predicted_class_indices]

    filenames = test_batches.filenames
    results = pd.DataFrame({
        "Filename": filenames,
        "DenseNet121_Predictions": predictions
    })
    results.to_csv("test_prediction_densenet121.csv", index=False)
Пример #10
0
def buil_bcnn(all_trainable=False,
              size_height=224,
              size_width=224,
              no_class=100,
              no_last_layer_backbone=-9,
              name_optimizer='sgd',
              learning_rate=1.0,
              decay_learning_rate=0.0,
              decay_weight_rate=0.0,
              name_initializer='glorot_normal',
              name_activation='softmax',
              name_loss='categorical_crossentropy'):
    '''Build Bilinear CNN.

    Detector and extractor are both VGG16.

    Args:
        all_trainable: fix or unfix VGG16 layers.
        size_height: default 448.
        size_width: defalult 448.
        no_class: number of prediction classes.
        no_last_layer_backbone: number of VGG16 backbone layer.
        name_optimizer: optimizer method.
        learning_rate: learning rate.
        decay_learning_rate: learning rate decay.
        decay_weight_rate: l2 normalization decay rate.
        name_initializer: initializer method.
        name_activation: activation method.
        name_loss: loss function.

    Returns:
        Bilinear CNN model.
    '''
    ##########################
    # Load pre-trained model #
    ##########################

    # Load model
    input_tensor = Input(shape=[size_height, size_width, 3])
    pre_train_model = DenseNet121(input_tensor=input_tensor,
                                  include_top=False,
                                  weights='imagenet')

    # Pre-trained weights
    for layer in pre_train_model.layers:
        layer.trainable = all_trainable

    ######################
    # Combine two models #
    ######################

    # Extract features form detecotr
    model_detector = pre_train_model
    output_detector = model_detector.layers[no_last_layer_backbone].output
    shape_detector = model_detector.layers[no_last_layer_backbone].output_shape

    # Extract features from extractor
    model_extractor = pre_train_model
    output_extractor = model_extractor.layers[no_last_layer_backbone].output
    shape_extractor = model_extractor.layers[
        no_last_layer_backbone].output_shape

    # Reshape tensor to (minibatch_size, total_pixels, filter_size)
    output_detector = Reshape(
        [shape_detector[1] * shape_detector[2],
         shape_detector[-1]])(output_detector)
    output_extractor = Reshape(
        [shape_extractor[1] * shape_extractor[2],
         shape_extractor[-1]])(output_extractor)

    # Outer-products
    x = Lambda(_outer_product)([output_detector, output_extractor])
    # Reshape tensor to (minibatch_size, filter_size_detector*filter_size_extractor)
    x = Reshape([shape_detector[-1] * shape_extractor[-1]])(x)
    # Signed square-root
    x = Lambda(_signed_sqrt)(x)
    # L2 normalization
    x = Lambda(_l2_normalize)(x)

    ###############################
    # Attach full-connected layer #
    ###############################

    if name_initializer is not None:
        name_initializer = eval(name_initializer + '()')

    # FC layer
    x = Dense(units=no_class,
              kernel_initializer=name_initializer,
              kernel_regularizer=l2(decay_weight_rate))(x)
    output_tensor = Activation(name_activation)(x)

    #################
    # Compile model #
    #################

    model_bcnn = Model(inputs=[input_tensor], outputs=[output_tensor])

    # Optimizer
    if name_optimizer == 'adam':
        optimizer = adam(lr=learning_rate, decay=decay_learning_rate)
    elif name_optimizer == 'rmsprop':
        optimizer = RMSprop(lr=learning_rate, decay=decay_learning_rate)
    elif name_optimizer == 'sgd':
        optimizer = SGD(lr=learning_rate,
                        decay=decay_learning_rate,
                        momentum=0.9,
                        nesterov=None)
    else:
        raise RuntimeError('Optimizer should be one of Adam, RMSprop and SGD.')

    # Compile
    model_bcnn.compile(loss=name_loss,
                       optimizer=optimizer,
                       metrics=['accuracy'])

    # print('-------- Mode summary --------')
    # print(model_bcnn.summary())
    # print('------------------------------')

    return model_bcnn
Пример #11
0
def train_top_model_densenet121():



    # DesneNet generators

    # Set up generators
    train_batches = ImageDataGenerator(
        preprocessing_function= \
            applications.densenet.preprocess_input).flow_from_directory(
        train_path,
        target_size=(image_size, image_size),
        batch_size=train_batch_size)

    valid_batches = ImageDataGenerator(
        preprocessing_function= \
            applications.densenet.preprocess_input).flow_from_directory(
        valid_path,
        target_size=(image_size, image_size),
        batch_size=val_batch_size)

    test_batches = ImageDataGenerator(
        preprocessing_function= \
            applications.densenet.preprocess_input).flow_from_directory(
        test_path,
        target_size=(image_size, image_size),
        batch_size=test_batch_size,
        shuffle=False)

    input_tensor = Input(shape = (224,224,3))

    #Loading the model
    model = DenseNet121(input_tensor= input_tensor,weights='imagenet',include_top=False)

    # add a global spatial average pooling layer
    x = model.output
    x = GlobalAveragePooling2D()(x)
    # add relu layer
    x = Dense(1024, activation='relu')(x)
    # and a softmax layer for 7 classes
    predictions = Dense(7, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=model.input, outputs=predictions)


    def top_3_accuracy(y_true, y_pred):
        return top_k_categorical_accuracy(y_true, y_pred, k=3)

    def top_2_accuracy(y_true, y_pred):
        return top_k_categorical_accuracy(y_true, y_pred, k=2)


    model.compile(optimizer=optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False),
                          loss="categorical_crossentropy",
                          metrics=[categorical_accuracy, top_2_accuracy, top_3_accuracy])

    print(model.summary())

    # Declare a checkpoint to save the best version of the model
    checkpoint = ModelCheckpoint("modelDenseNet121.h5", monitor='val_categorical_accuracy', verbose=1,
                                 save_best_only=True, mode='max')

    # Reduce the learning rate as the learning stagnates
    reduce_lr = ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.5, patience=2,
                                  verbose=1, mode='max', min_lr=0.00001)

    early_stopping = EarlyStopping(monitor='val_categorical_accuracy', patience=10, verbose=1, mode='max')

    callbacks_list = [checkpoint, reduce_lr,
                      early_stopping
                      ]

    history = model.fit_generator(train_batches,
                                  # class_weight = class_weights,
                                  epochs=epochs, shuffle=True, validation_data = valid_batches, steps_per_epoch=train_steps, validation_steps = val_steps,  verbose=1, callbacks=callbacks_list)


    # # Evaluation of the best epoch
    model.load_weights('modelDenseNet.h5')

    val_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \
        model.evaluate_generator(valid_batches, steps=val_steps)

    print('val_loss:', val_loss)
    print('val_cat_acc:', val_cat_acc)
    print('val_top_2_acc:', val_top_2_acc)
    print('val_top_3_acc:', val_top_3_acc)
    def init_model(self, mode="resnet50"):
        """
        初始化模型
        :param mode: 模型类型
        :return: 模型名称和基础模型
        """
        if mode == "resnet50v2":
            from tensorflow.keras.applications.resnet_v2 import ResNet50V2
            model_name = 'rotnet_v3_resnet50v2_{epoch:02d}_{val_acc:.4f}.hdf5'
            base_model = ResNet50V2(weights='imagenet',
                                    include_top=False,
                                    input_shape=self.input_shape)
        elif mode == "resnet50":
            from tensorflow.keras.applications.resnet import ResNet50
            model_name = 'rotnet_v3_resnet50_{epoch:02d}_{val_acc:.4f}.hdf5'
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_shape=self.input_shape)
        elif mode == "mobilenetv2":
            from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
            model_name = 'rotnet_v3_mobilenetv2_{epoch:02d}_{val_acc:.4f}.hdf5'
            base_model = MobileNetV2(weights='imagenet',
                                     include_top=False,
                                     input_shape=self.input_shape)
        elif mode == "densenet121":
            from tensorflow.python.keras.applications.densenet import DenseNet121
            model_name = 'rotnet_v3_densenet121_{epoch:02d}_{val_acc:.4f}.hdf5'
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_shape=self.input_shape)
        else:
            raise Exception("[Exception] mode {} 不支持!!".format(mode))

        # freeze
        for layer in base_model.layers:
            layer.trainable = True

        x = base_model.output
        # if mode == "mobilenetv2":
        #     x = Dense(128, activation="relu")(x)
        x = Flatten()(x)

        if self.is_hw_ratio:  # 是否使用宽高比
            x1 = base_model.output
            x1 = Flatten()(x1)
            input_ratio = Input(shape=(1, ), name='ratio')
            x2 = Dense(1, activation='relu')(input_ratio)
            x = concatenate([x1, x2])

        final_output = Dense(self.nb_classes,
                             activation='softmax',
                             name='fc360')(x)
        model = Model(inputs=base_model.input, outputs=final_output)

        # model.summary()

        # 优化器
        if self.nb_classes == 360:
            metrics = ["acc", angle_error]
        else:
            metrics = ["acc"]

        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr=0.0004, momentum=0.9),
                      metrics=metrics)
        if self.model_path:
            model.load_weights(self.model_path)
            print('[Info] 加载模型的路径: {}'.format(self.model_path))

        return model_name, model
Пример #13
0
def train():
    current_dir = os.path.dirname(os.path.realpath(__file__))

    train_dir = os.path.join(current_dir, 'train')
    validation_dir = os.path.join(current_dir, 'validation')

    train_paper_dir = os.path.join(train_dir, 'paper')
    train_rock_dir = os.path.join(train_dir, 'rock')
    train_scissors_dir = os.path.join(train_dir, 'scissors')

    validation_paper_dir = os.path.join(validation_dir, 'paper')
    validation_rock_dir = os.path.join(validation_dir, 'rock')
    validation_scissors_dir = os.path.join(validation_dir, 'scissors')

    num_paper_tr = len(os.listdir(train_paper_dir))
    num_rock_tr = len(os.listdir(train_rock_dir))
    num_scissors_tr = len(os.listdir(train_scissors_dir))

    num_paper_val = len(os.listdir(validation_paper_dir))
    num_rock_val = len(os.listdir(validation_rock_dir))
    num_scissors_val = len(os.listdir(validation_scissors_dir))

    total_train = num_paper_tr + num_rock_tr + num_scissors_tr
    total_val = num_paper_val + num_rock_val + num_scissors_val

    print('total train:', total_train)
    print('total validation:', total_val)

    train_image_generator = ImageDataGenerator(
        rescale=1. /
        255)  #, horizontal_flip=True, rotation_range=45, zoom_range=0.5)
    validation_image_generator = ImageDataGenerator(rescale=1. / 255,
                                                    horizontal_flip=True,
                                                    rotation_range=45,
                                                    zoom_range=0.5)

    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=train_dir,
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='categorical')

    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=validation_dir,
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='categorical')

    densenet = DenseNet121(include_top=False,
                           weights='imagenet',
                           classes=3,
                           input_shape=(256, 256, 3))
    densenet.trainable = True
    model = Sequential()
    model.add(densenet)
    model.add(MaxPool2D())
    model.add(Flatten())
    model.add(Dense(3, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    history = model.fit(train_data_gen,
                        steps_per_epoch=math.ceil(total_train / batch_size),
                        epochs=epochs,
                        validation_data=val_data_gen,
                        validation_steps=math.ceil(total_val / batch_size))

    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs_range = range(epochs)

    plt.figure(figsize=(8, 8))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')

    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()

    model_path = os.path.join(current_dir, 'model')
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    model.save(os.path.join(model_path, 'model.h5'))