Ejemplo n.º 1
0
def build_model(config):
    """
    Returns: a model with specified weights
    """

    input_size = config['input_size']
    input_shape = (input_size, input_size, 3)

    if ('pretrain' in config) and config['pretrain']:
        assert config['input_size'] == 224
        weights = 'imagenet'
    else:
        weights = None

    assert config['image_model'] in ['densenet', 'resnet', 'linear']
    if config['image_model'] == 'densenet':
        print("Using Densenet.")
        base_model = DenseNet121(input_shape=input_shape,
                                 weights=weights,
                                 include_top=False,
                                 pooling='avg')
        image_input = base_model.input
        layer = base_model.output
    elif config['image_model'] == 'resnet':
        print("Using Resnet.")
        base_model = ResNet50(input_shape=input_shape,
                              weights=weights,
                              include_top=False,
                              pooling='avg')
        image_input = base_model.input
        layer = base_model.output
    elif config['image_model'] == 'linear':
        print("Using linear model.")
        image_input = Input(shape=input_shape)
        layer = Flatten()(image_input)

    if ('freeze' in config) and config['freeze']:
        for layer in base_model.layers:
            try:
                layer.trainable = False
                print("Freezing {}".format(layer))
            except Exception:
                print("Not trainable {}".format(layer))

    predictions = Dense(14, activation='sigmoid')(layer)
    model = Model(inputs=image_input, outputs=predictions)
    return model
Ejemplo n.º 2
0
def fine_tune(data, labels, best_top_model, type_top, layers_fine_tune):
    folds = list(StratifiedKFold(n_splits=5, shuffle=True).split(data, labels))

    for j, (train_idx, val_idx) in enumerate(folds):

        print('\nFold ', j)
        x_train = data[train_idx]
        y_train = labels[train_idx]
        x_valid = data[val_idx]
        y_valid = labels[val_idx]

        num_classes = len(np.unique(labels))

        model = VGG16()

        top_model = Sequential()

        if type_top == enum_models.typeA:
            layer = Flatten(name='flatten', input_shape=model.output_shape[1:])
            top_model.add(layer)
            layer = Dense(4096, activation='relu', name='fc1')
            top_model.add(layer)
            layer = Dense(4096, activation='relu', name='fc2')
            top_model.add(layer)
            layer = Dense(num_classes,
                          activation='softmax',
                          name='predictions')
            top_model.add(layer)

        elif type_top == enum_models.typeB:
            layer = Flatten(name='flatten', input_shape=model.output_shape[1:])
            top_model.add(layer)
            layer = Dense(256, activation='relu', name='fc1')
            top_model.add(layer)
            layer = Dropout(0.5, name='dropout')
            top_model.add(layer)
            layer = Dense(num_classes,
                          activation='softmax',
                          name='predictions')
            top_model.add(layer)

        elif type_top == enum_models.typeC:
            layer = Flatten(name='flatten', input_shape=model.output_shape[1:])
            top_model.add(layer)
            layer = Dense(4096, activation='relu', name='fc1')
            top_model.add(layer)
            layer = Dense(4096, activation='relu', name='fc2')
            top_model.add(layer)
            layer = Dropout(0.5, name='dropout')
            top_model.add(layer)
            layer = Dense(num_classes,
                          activation='softmax',
                          name='predictions')
            top_model.add(layer)

        top_model.load_weights(best_top_model)
        # build the final model
        for layer in top_model.layers:
            model.add(layer)

        for layer in model.layers[:layers_fine_tune]:
            layer.trainable = False

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                      metrics=['accuracy'])

        train_labels = y_train
        train_labels = to_categorical(train_labels, num_classes=num_classes)
        validation_labels = y_valid
        validation_labels = to_categorical(validation_labels,
                                           num_classes=num_classes)

        # prepare data augmentation configuration
        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)
        train_generator = train_datagen.flow(x_train,
                                             train_labels,
                                             shuffle=False,
                                             batch_size=batch_size)

        #constantes
        nb_train_samples = len(train_idx)
        num_classes = len(np.unique(labels))
        predict_size_train = int(math.ceil(nb_train_samples / batch_size))

        test_datagen = ImageDataGenerator(rescale=1. / 255)
        validation_generator = test_datagen.flow(x_valid,
                                                 validation_labels,
                                                 shuffle=False,
                                                 batch_size=batch_size)
        nb_validation_samples = len(val_idx)
        predict_size_validation = int(
            math.ceil(nb_validation_samples / batch_size))

        import time
        t = time.process_time()
        historyGenerated = model.fit_generator(
            train_generator,
            steps_per_epoch=predict_size_train,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=predict_size_validation)

        training_time = time.process_time() - t

        #este si porq el fine tune es el que usaremos
        model.save('fine_tune/final_model_exported_fold_' + str(j) + '.h5')

        import json
        contentJson = json.dumps(historyGenerated.history,
                                 indent=4,
                                 sort_keys=True)
        f = open('fine_tune/fine_tune_history_fold_' + str(j) + '.json', "w")
        f.write(contentJson)
        f.close()
        ########################################test##############################################
        test_datagen = ImageDataGenerator(rescale=1. / 255)
        generator_test = test_datagen.flow(x_valid,
                                           validation_labels,
                                           shuffle=False,
                                           batch_size=batch_size)

        nb_test_samples = nb_validation_samples
        test_loss, test_accuracy = model.evaluate_generator(
            generator_test, steps=nb_test_samples / batch_size)

        print("[INFO] TEST accuracy: {:.2f}%".format(test_accuracy * 100))
        print("[INFO] Test loss: {}".format(test_loss))

        import json
        contentJson = json.dumps({
            'test_accuracy':
            test_accuracy,
            'test_loss':
            test_loss,
            'training_time':
            training_time,
            'model':
            'fine_tune/fine_tune_exported_fold_' + str(j) + '.h5'
        })
        f = open('fine_tune/fine_tune_test_fold_' + str(j) + '.json', "w")
        f.write(contentJson)
        f.close()

        #liberar memoria
        del model
        del top_model
        del x_train
        del y_train
        del x_valid
        del y_valid
        K.clear_session()
        gc.collect()