Exemplo n.º 1
0
def train(config):
    """Trains classification model.

    # Arguments
        config: ConfigInterface object.
    """
    # Import keras.
    from keras.callbacks import ModelCheckpoint
    from keras.callbacks import TensorBoard
    from keras.callbacks import LearningRateScheduler
    from openem_train.inception.inception import inception_model
    from openem_train.inception.inception_dataset import InceptionDataset
    from openem_train.util.utils import find_epoch

    # Create tensorboard and checkpoints directories.
    tensorboard_dir = config.tensorboard_dir('classify')
    os.makedirs(config.checkpoints_dir('classify'), exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    # Build the inception model.
    model = inception_model(input_shape=(config.classify_height(),
                                         config.classify_width(), 3),
                            num_classes=config.num_classes())

    # If initial epoch is nonzero we load the model from checkpoints
    # directory.
    initial_epoch = config.classify_initial_epoch()
    if initial_epoch != 0:
        checkpoint = find_epoch(config.checkpoints_dir('classify'),
                                initial_epoch)
        model.load_weights(checkpoint)

    # Set up dataset interface.
    dataset = InceptionDataset(config)

    # Define learning rate schedule.
    def schedule(epoch):
        if epoch < 1:
            return 5e-4
        if epoch < 5:
            return 3e-4
        if epoch < 10:
            return 1e-4
        if epoch < 20:
            return 5e-5
        return 1e-5

    # Set trainable layers.
    for layer in model.layers:
        layer.trainable = True

    # Set up callbacks.
    checkpoint_best = ModelCheckpoint(config.checkpoint_best('classify'),
                                      verbose=1,
                                      save_weights_only=False,
                                      save_best_only=True)

    checkpoint_periodic = ModelCheckpoint(
        config.checkpoint_periodic('classify'),
        verbose=1,
        save_weights_only=False,
        period=1)

    tensorboard = TensorBoard(tensorboard_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=True)

    lr_sched = LearningRateScheduler(schedule=schedule)

    # Determine steps per epoch.
    batch_size = config.classify_batch_size()
    steps_per_epoch = config.classify_steps_per_epoch()
    if not steps_per_epoch:
        steps_per_epoch = dataset.train_batches(config.classify_batch_size())

    # Set up validation generator.
    validation_gen = None
    validation_steps = None
    if config.classify_do_validation():
        validation_gen = dataset.generate_test(
            batch_size=config.classify_val_batch_size())
        validation_steps = dataset.test_batches(
            config.classify_val_batch_size())

    # Fit the model.
    model.summary()
    model.fit_generator(
        dataset.generate(batch_size=config.classify_batch_size()),
        steps_per_epoch=steps_per_epoch,
        epochs=config.classify_num_epochs(),
        verbose=1,
        callbacks=[
            checkpoint_best, checkpoint_periodic, tensorboard, lr_sched
        ],
        validation_data=validation_gen,
        validation_steps=validation_steps,
        initial_epoch=initial_epoch)

    # Save the model.
    _save_model(config, model)
Exemplo n.º 2
0
def train(config):
    """Trains find ruler model.

    # Arguments
        config: ConfigInterface object.
    """
    # Import keras.
    from keras.callbacks import ModelCheckpoint
    from keras.callbacks import TensorBoard
    from keras.callbacks import LearningRateScheduler
    from openem_train.unet.unet import unet_model
    from openem_train.unet.unet_dataset import UnetDataset
    from openem_train.util.utils import find_epoch

    # Create tensorboard and checkpoints directories.
    tensorboard_dir = config.tensorboard_dir('find_ruler')
    os.makedirs(config.checkpoints_dir('find_ruler'), exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    # Build the unet model.
    model = unet_model(
        input_shape=(config.find_ruler_height(), config.find_ruler_width(), 3)
    )

    # If initial epoch is nonzero we load the model from checkpoints 
    # directory.
    initial_epoch = config.find_ruler_initial_epoch()
    if initial_epoch != 0:
        checkpoint = find_epoch(
            config.checkpoints_dir('find_ruler'),
            initial_epoch
        )
        model.load_weights(checkpoint)

    # Set up dataset interface.
    dataset = UnetDataset(config)

    # Define learning rate schedule.
    def schedule(epoch):
        if epoch < 10:
            return 1e-3
        if epoch < 25:
            return 2e-4
        if epoch < 60:
            return 1e-4
        if epoch < 80:
            return 5e-5
        return 2e-5

    # Set trainable layers.
    for layer in model.layers:
        layer.trainable = True

    # Set up callbacks.
    checkpoint_best = ModelCheckpoint(
        config.checkpoint_best('find_ruler'),
        verbose=1,
        save_weights_only=False,
        save_best_only=True)

    checkpoint_periodic = ModelCheckpoint(
        config.checkpoint_periodic('find_ruler'),
        verbose=1,
        save_weights_only=False,
        period=1)

    tensorboard = TensorBoard(
        tensorboard_dir,
        histogram_freq=0,
        write_graph=False,
        write_images=True)

    lr_sched = LearningRateScheduler(schedule=schedule)

    # Fit the model.
    model.summary()
    model.fit_generator(
        dataset.generate(batch_size=config.find_ruler_batch_size()),
        steps_per_epoch=60,
        epochs=config.find_ruler_num_epochs(),
        verbose=1,
        callbacks=[
            checkpoint_best,
            checkpoint_periodic,
            tensorboard,
            lr_sched
        ],
        validation_data=dataset.generate_validation(
            batch_size=config.find_ruler_val_batch_size()
        ),
        validation_steps=len(dataset.test_idx)//config.find_ruler_val_batch_size(),
        initial_epoch=initial_epoch
    )

    # Save the model.
    _save_model(config, model)
Exemplo n.º 3
0
def train(config):
    """Trains find ruler model.

    # Arguments
        config: ConfigInterface object.
    """
    # Import keras.
    from keras.callbacks import ModelCheckpoint
    from keras.callbacks import TensorBoard
    from keras.callbacks import ReduceLROnPlateau
    from openem_train.unet.unet import unet_model
    from openem_train.unet.unet_dataset import UnetDataset
    from openem_train.util.utils import find_epoch

    # Create tensorboard and checkpoints directories.
    tensorboard_dir = config.tensorboard_dir('find_ruler')
    os.makedirs(config.checkpoints_dir('find_ruler'), exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    # Build the unet model.
    num_channels = config.find_ruler_num_channels()
    model = unet_model(input_shape=(
        config.find_ruler_height(),
        config.find_ruler_width(),
        num_channels,
    ))

    # If initial epoch is nonzero we load the model from checkpoints
    # directory.
    initial_epoch = config.find_ruler_initial_epoch()
    if initial_epoch != 0:
        checkpoint = find_epoch(config.checkpoints_dir('find_ruler'),
                                initial_epoch)
        model.load_weights(checkpoint)

    # Set up dataset interface.
    dataset = UnetDataset(config)

    # Set trainable layers.
    for layer in model.layers:
        layer.trainable = True

    # Set up callbacks.
    checkpoint_best = ModelCheckpoint(config.checkpoint_best('find_ruler'),
                                      verbose=1,
                                      save_weights_only=False,
                                      save_best_only=True)

    checkpoint_periodic = ModelCheckpoint(
        config.checkpoint_periodic('find_ruler'),
        verbose=1,
        save_weights_only=False,
        period=1)

    tensorboard = TensorBoard(tensorboard_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=True)

    lr_sched = ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.2,
        patience=10,
        min_lr=0,
    )

    # Fit the model.
    model.summary()
    num_steps = int(config.find_ruler_steps_per_epoch() / 10)
    num_steps = min(
        num_steps,
        len(dataset.test_idx) // config.find_ruler_val_batch_size())
    model.fit_generator(
        dataset.generate(batch_size=config.find_ruler_batch_size()),
        steps_per_epoch=config.find_ruler_steps_per_epoch(),
        epochs=config.find_ruler_num_epochs(),
        verbose=1,
        callbacks=[
            checkpoint_best, checkpoint_periodic, tensorboard, lr_sched
        ],
        validation_data=dataset.generate_validation(
            batch_size=config.find_ruler_val_batch_size()),
        validation_steps=num_steps,
        initial_epoch=initial_epoch)

    # Save the model.
    _save_model(config, model)
Exemplo n.º 4
0
def train(config):
    """Trains detection model.

    # Arguments
        config: ConfigInterface object.
    """
    # Import keras.
    from keras.optimizers import Adam
    from keras.callbacks import ModelCheckpoint
    from keras.callbacks import TensorBoard
    from keras.applications.inception_v3 import preprocess_input
    from openem_train.ssd import ssd
    from openem_train.ssd.ssd_training import MultiboxLoss
    from openem_train.ssd.ssd_utils import BBoxUtility
    from openem_train.ssd.ssd_dataset import SSDDataset
    from openem_train.util.utils import find_epoch

    # Create tensorboard and checkpoints directories.
    tensorboard_dir = config.tensorboard_dir('detect')
    os.makedirs(config.checkpoints_dir('detect'), exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    # Build the ssd model.
    model = ssd.ssd_model(
        input_shape=(config.detect_height(), config.detect_width(), 3),
        num_classes=config.num_classes())
    
    # If initial epoch is nonzero we load the model from checkpoints 
    # directory.
    initial_epoch = config.detect_initial_epoch()
    if initial_epoch != 0:
        checkpoint = find_epoch(
            config.checkpoints_dir('detect'),
            initial_epoch
        )
        model.load_weights(checkpoint)

    # Set trainable layers.
    for layer in model.layers:
        layer.trainable = True

    # Set up loss and optimizer.
    loss_obj = MultiboxLoss(
        config.num_classes(),
        neg_pos_ratio=2.0,
        pos_cost_multiplier=1.1)
    adam = Adam(lr=3e-5)

    # Compile the model.
    model.compile(loss=loss_obj.compute_loss, optimizer=adam)
    model.summary()

    # Get prior box layers from model.
    prior_box_names = [
        'conv4_3_norm_mbox_priorbox',
        'fc7_mbox_priorbox',
        'conv6_2_mbox_priorbox',
        'conv7_2_mbox_priorbox',
        'conv8_2_mbox_priorbox',
        'pool6_mbox_priorbox']
    priors = []
    for prior_box_name in prior_box_names:
        layer = model.get_layer(prior_box_name)
        if layer is not None:
            priors.append(layer.prior_boxes)
    priors = np.vstack(priors)

    # Set up bounding box utility.
    bbox_util = BBoxUtility(config.num_classes(), priors)

    # Set up dataset interface.
    dataset = SSDDataset(
        config,
        bbox_util=bbox_util,
        preproc=lambda x: x)

    # Set up keras callbacks.
    checkpoint_best = ModelCheckpoint(
        config.checkpoint_best('detect'),
        verbose=1,
        save_weights_only=False,
        save_best_only=True)

    checkpoint_periodic = ModelCheckpoint(
        config.checkpoint_periodic('detect'),
        verbose=1,
        save_weights_only=False,
        period=1)

    tensorboard = TensorBoard(
        tensorboard_dir,
        histogram_freq=0,
        write_graph=True,
        write_images=True)

    # Determine steps per epoch.
    batch_size = config.detect_batch_size()
    steps_per_epoch = config.detect_steps_per_epoch()
    if not steps_per_epoch:
        steps_per_epoch = dataset.nb_train_samples // batch_size

    # Set up validation generator.
    val_batch_size = config.detect_val_batch_size()
    validation_gen = None
    validation_steps = None
    if config.detect_do_validation():
        validation_gen = dataset.generate_ssd(
            batch_size=val_batch_size,
            is_training=False
        )
        validation_steps = dataset.nb_test_samples // val_batch_size

    # Fit the model.
    model.fit_generator(
        dataset.generate_ssd(
            batch_size=batch_size,
            is_training=True),
        steps_per_epoch=steps_per_epoch,
        epochs=config.detect_num_epochs(),
        verbose=1,
        callbacks=[checkpoint_best, checkpoint_periodic, tensorboard],
        validation_data=validation_gen,
        validation_steps=validation_steps,
        initial_epoch=initial_epoch)

    # Save the model.
    _save_model(config, model)