示例#1
0
checkpointer = ModelCheckpoint(filepath="/tmp/fcn_vgg16_weights.h5",
                               verbose=1,
                               save_best_only=True)
lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                               factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=0.5e-6)
early_stopper = EarlyStopping(monitor='val_loss',
                              min_delta=0.001,
                              patience=100)
csv_logger = CSVLogger('output/{}_fcn_vgg16.csv'.format(
    datetime.datetime.now().isoformat()))

datagen = PascalVocGenerator(**init_args['pascal_voc_generator']['train'])

train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

fcn_vgg16 = FCN(basenet='vgg16', input_shape=(500, 500, 3), num_output=21)
fcn_vgg16.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', 'categorical_accuracy'])

flow_args = init_args['pascal_voc_generator']['flow_from_imageset']
train_flow_args = flow_args.copy()
train_flow_args['image_set_loader'] = train_loader
val_flow_args = flow_args.copy()
val_flow_args['image_set_loader'] = val_loader
示例#2
0
lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                               factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=10,
                               min_lr=1e-12)
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=30)
nan_terminator = TerminateOnNaN()
csv_logger = CSVLogger(
    #'output/{}_fcn_vgg16.csv'.format(datetime.datetime.now().isoformat()))
    'output/tmp_fcn_vgg16.csv')
#check_num = CheckNumericsOps(validation_data=[np.random.random((1, 224, 224, 3)), 1],
#                             histogram_freq=100)

datagen = PascalVocGenerator(image_shape=[224, 224, 3],
                             image_resample=True,
                             pixelwise_center=True,
                             pixel_mean=[115.85100, 110.50989, 102.16182],
                             pixelwise_std_normalization=True,
                             pixel_std=[70.30930, 69.41244, 72.60676])

train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

fcn_vgg16 = FCN(input_shape=(224, 224, 3),
                classes=21,
                weight_decay=3e-3,
                weights='imagenet',
                trainable_encoder=True)
optimizer = keras.optimizers.Adam(1e-4)

fcn_vgg16.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
示例#3
0
import keras
import keras.backend as K
from keras.models import load_model
from voc_generator import PascalVocGenerator, ImageSetLoader
from keras_fcn.layers import BilinearUpSampling2D

import yaml
with open("init_args.yml", 'r') as stream:
    try:
        init_args = yaml.load(stream)
    except yaml.YAMLError as exc:
        print(exc)

datagen = PascalVocGenerator(image_shape=[224, 224, 3],
                             image_resample=True,
                             pixelwise_center=True,
                             pixel_mean=[115.85100, 110.50989, 102.16182],
                             pixelwise_std_normalization=True,
                             pixel_std=[70.30930, 69.41244, 72.60676])
dataload = ImageSetLoader(**init_args['image_set_loader']['train'])

model = load_model('/tmp/fcn_vgg16_weights.h5',
        custom_objects={'BilinearUpSampling2D': BilinearUpSampling2D})
print(model.summary())


for fn in dataload.filenames[:10]:
    x = dataload.load_img(fn)
    x = datagen.standardize(x)
    print(x.min(), x.max())
    X = x[np.newaxis, ...]
    label = dataload.load_seg(fn)
示例#4
0
from keras.models import load_model
from voc_generator import PascalVocGenerator, ImageSetLoader
from keras_fcn.layers import CroppingLike2D
from keras_fcn.losses import (mean_categorical_crossentropy,
                              flatten_categorical_crossentropy)

import yaml
with open("init_args.yml", 'r') as stream:
    try:
        init_args = yaml.load(stream)
    except yaml.YAMLError as exc:
        print(exc)

datagen = PascalVocGenerator(image_shape=[224, 224, 3],
                             image_resample=True,
                             pixelwise_center=True,
                             pixel_mean=[115.85100, 110.50989, 102.16182],
                             pixelwise_std_normalization=True,
                             pixel_std=[70.30930, 69.41244, 72.60676])
dataload = ImageSetLoader(**init_args['image_set_loader']['train'])

model = load_model(
    '/tmp/fcn_vgg16_weights.h5',
    custom_objects={
        'CroppingLike2D':
        CroppingLike2D,
        #'mean_categorical_crossentropy': mean_categorical_crossentropy})
        'flatten_categorical_crossentropy':
        flatten_categorical_crossentropy(classes=21)
    })
print(model.summary())
示例#5
0
文件: train.py 项目: scwsdhr/FCN
def main():
    '''
    Main function
    '''

    # Define common arguments
    checkpointer = ModelCheckpoint(filepath='output/fcn_vgg16_weights_tmp.h5',
                                   verbose=1,
                                   save_best_only=True)
    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=10,
                                   min_lr=1e-12)
    early_stopper = EarlyStopping(monitor='val_loss',
                                  min_delta=0.001,
                                  patience=30)
    nan_terminator = TerminateOnNaN()
    csv_logger = CSVLogger('output/tmp_fcn_vgg16.csv')

    # Set data generator
    datagen = PascalVocGenerator(image_shape=[224, 224, 3],
                                 image_resample=True,
                                 pixelwise_center=True,
                                 pixel_mean=[115.85100, 110.50989, 102.16182],
                                 pixelwise_std_normalization=True,
                                 pixel_std=[70.30930, 69.41244, 72.60676])

    # Define training set and validation set
    train_loader = ImageSetLoader(*arg_gen('train'))
    val_loader = ImageSetLoader(*arg_gen('val'))

    # Construct model
    fcn_vgg16 = model.fcn_vgg16(input_shape=(224, 224, 3),
                                classes=21,
                                weight_decay=3e-3,
                                weights='imagenet',
                                trainable_encoder=False)

    # Set optimizer
    optimizer = Adam(1e-4)

    # Compile model
    fcn_vgg16.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    # Fit model with the above generators
    fcn_vgg16.fit_generator(
        datagen.flow_from_imageset(class_mode='categorical',
                                   classes=21,
                                   batch_size=1,
                                   shuffle=True,
                                   image_set_loader=train_loader),
        steps_per_epoch=1112,
        epochs=40,
        validation_data=datagen.flow_from_imageset(
            class_mode='categorical',
            classes=21,
            batch_size=1,
            shuffle=True,
            image_set_loader=val_loader),
        validation_steps=1111,
        verbose=1,
        callbacks=[
            lr_reducer, early_stopper, csv_logger, checkpointer, nan_terminator
        ])

    # Save weights
    fcn_vgg16.save('output/fcn_vgg16.h5')
示例#6
0
import datetime
import numpy as np
from keras.models import load_model
from voc_generator import PascalVocGenerator, ImageSetLoader

import yaml
with open("init_args.yml", 'r') as stream:
    try:
        init_args = yaml.load(stream)
    except yaml.YAMLError as exc:
        print(exc)

datagen = PascalVocGenerator(**init_args['pascal_voc_generator']['test'])
dataload = ImageSetLoader(**init_args['image_set_loader']['test'])

weights = 'weights.h5'
model = load_model(weights)

for fn in dataload.filenames:
    x = dataload.load_img(fn)
    x = datagen.standardize(x)
    y = model.predict(x)
    dataload.save(x, y, fn)