Пример #1
0
def main(data_path=args['data_path'], train_from=train_from):
    train_gen, test_gen = create_data_generator(data_path)
    valid_x, valid_y = create_valid_data()
    if train_from == 'trained_weights':
        model = load_model_from_trained_weights(imagedims=IMAGE_DIMS, nb_classes=len(train_gen.class_indices),
                                                weights=args['weight_path'],
                                                freeze_until=freeze_until)
    elif train_from == 'trained_model':
        model = load_model_from_trained_model()
    else:
        model = load_models(imagedims=IMAGE_DIMS, nb_classes=len(train_gen.class_indices))
    print('[INFO] compiling model...')
    model.compile(loss="categorical_crossentropy", optimizer=OPT, metrics=["accuracy"])

    checkpoint = ModelCheckpoint(filepath=args['save_model'], monitor='val_loss', verbose=0,
                                 save_best_only=True, save_weights_only=False,
                                 mode='auto', period=1)
    stop_early = EarlyStopping(monitor='val_loss', min_delta=.0, patience=40, verbose=0, mode='auto')
    if lr_finder_from == 'large_range_search':
        '''Exponential lr finder,
           USE THIS FOR A LARGE RANGE SEARCH
           Uncomment the validation_data flag to reduce speed but get a better idea of the learning rate
        '''
        lr_finder = LRFinder(NUM_SAMPLES, BS, minimum_lr=1e-3, maximum_lr=10.,
                             lr_scale='exp',
                             validation_data=(valid_x, valid_y),  # use the validation data for losses
                             validation_sample_rate=5,
                             save_dir='weights/', verbose=True)
    elif lr_finder_from == 'close_range_search':
        '''LINEAR lr finder,
           USE THIS FOR A CLOSE RANGE SEARCH
           Uncomment the validation_data flag to reduce speed but get a better idea of the learning rate
        '''
        lr_finder = LRFinder(NUM_SAMPLES, BS, minimum_lr=1e-5, maximum_lr=1e-2,
                             lr_scale='exp',
                             validation_data=(valid_x, valid_y),  # use the validation data for losses
                             validation_sample_rate=5,
                             save_dir='weights/', verbose=True)
    callbacks = [checkpoint, stop_early, lr_finder]
    H = model.fit_generator(train_gen,
                            validation_data=(valid_x, valid_y),
                            epochs=EPOCHS,
                            #steps_per_epoch=209,
                            callbacks=callbacks,
                            verbose=1
                            )
    lr_finder.plot_schedule(clip_beginning=10, clip_endding=5)
Пример #2
0
    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(X_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size, shuffle=True),
                        steps_per_epoch=X_train.shape[0] // batch_size,
                        validation_data=(X_test, Y_test),
                        epochs=nb_epoch, verbose=1,
                        callbacks=[lr_finder, model_checkpoint])

lr_finder.plot_schedule(clip_beginning=10, clip_endding=5)

scores = model.evaluate(X_test, Y_test, batch_size=batch_size)
for score, metric_name in zip(scores, model.metrics_names):
    print("%s : %0.4f" % (metric_name, score))
Пример #3
0
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=True,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=True,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        # randomly rotate images in the range (degrees, 0 to 180)
        rotation_range=90,
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0,
        # randomly shift images vertically (fraction of total height)
        height_shift_range=0,
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True)  # randomly flip images
    img_gen.fit(X_train)

    VGG16_model.fit_generator(img_gen.flow(X_train,
                                           Y_train,
                                           batch_size=n_batch,
                                           shuffle=True),
                              steps_per_epoch=X_train.shape[0] // n_batch,
                              validation_data=(X_test, Y_test),
                              epochs=n_epochs,
                              verbose=1,
                              callbacks=[lrf])
lrf.plot_schedule(clip_beginning=10, clip_endding=5)

scores = VGG16_model.evaluate(X_test, Y_test, batch_size=n_batch)
for score, metric_name in zip(scores, VGG16_model.metrics_names):
    print("%s : %0.4f" % (metric_name, score))