def tune(lr=0.0001, class_weight=None):
    model = load_model(nb_class=len(config.classes), weights_path=config.get_top_model_weights_path())

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    # prepare data augmentation configuration
    train_datagen = ImageDataGenerator(
        rotation_range=30.,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
    util.apply_mean(train_datagen)

    train_generator = train_datagen.flow_from_directory(
        config.train_dir,
        target_size=config.img_size,
        classes=config.classes)

    test_datagen = ImageDataGenerator()
    util.apply_mean(test_datagen)

    validation_generator = test_datagen.flow_from_directory(
        config.validation_dir,
        target_size=config.img_size,
        classes=config.classes)

    early_stopping = EarlyStopping(verbose=1, patience=30, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(config.get_fine_tuned_weights_path(checkpoint=True),
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss')
    history = model.fit_generator(
        train_generator,
        samples_per_epoch=config.nb_train_samples,
        nb_epoch=fine_tuning_nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=config.nb_validation_samples,
        callbacks=[early_stopping, model_checkpoint],
        class_weight=class_weight)

    util.save_history(history=history, prefix='fine-tuning')
    util.save_classes(config.classes)

    _cleanup()
示例#2
0
def _get_data_generators(train_datagen):
    util.apply_mean(train_datagen)
    train_generator = train_datagen.flow_from_directory(
        config.train_dir,
        target_size=config.img_size,
        classes=config.classes,
        shuffle=False)

    test_datagen = ImageDataGenerator()
    util.apply_mean(test_datagen)
    validation_generator = test_datagen.flow_from_directory(
        config.validation_dir,
        target_size=config.img_size,
        classes=config.classes,
        shuffle=False)

    return train_generator, validation_generator
def save_bottleneck_features():
    model = VGG16(weights='imagenet', include_top=False)

    datagen = ImageDataGenerator()
    util.apply_mean(datagen)

    generator = datagen.flow_from_directory(config.train_dir,
                                            target_size=config.img_size,
                                            shuffle=False,
                                            classes=config.classes)
    bottleneck_features_train = model.predict_generator(
        generator, config.nb_train_samples)
    np.save(open(config.bf_train_path, 'w'), bottleneck_features_train)

    generator = datagen.flow_from_directory(config.validation_dir,
                                            target_size=config.img_size,
                                            shuffle=False,
                                            classes=config.classes)
    bottleneck_features_validation = model.predict_generator(
        generator, config.nb_validation_samples)
    np.save(open(config.bf_valid_path, 'w'), bottleneck_features_validation)