Esempio n. 1
0
def get_trained_model(args):
    """ Returns a model with loaded weights. """

    model = get_frontend(input_width, input_height)

    if has_context_module:
        model = add_context(model)

    model = add_softmax(model)

    def load_tf_weights():
        """ Load pretrained weights converted from Caffe to TF. """

        # 'latin1' enables loading .npy files created with python2
        weights_data = np.load(args.weights_path, encoding='latin1').item()

        for layer in model.layers:
            if layer.name in weights_data.keys():
                layer_weights = weights_data[layer.name]
                layer.set_weights(
                    (layer_weights['weights'], layer_weights['biases']))

    def load_keras_weights():
        """ Load a Keras checkpoint. """
        model.load_weights(args.weights_path)

    if args.weights_path.endswith('.npy'):
        load_tf_weights()
    elif args.weights_path.endswith('.h5'):
        load_keras_weights()
    else:
        raise Exception("Unknown weights format.")

    return model
def train(train_list_fname='benchmark_RELEASE/dataset/train.txt',
          val_list_fname='benchmark_RELEASE/dataset/val.txt',
          img_root='benchmark_RELEASE/dataset/img',
          mask_root='benchmark_RELEASE/dataset/pngs',
          weights_path='converted/dilation8_pascal_voc.npy',
          batch_size=2,
          learning_rate=0.0001):

    # Create image generators for the training and validation sets. Validation has
    # no data augmentation.
    transformer_train = RandomTransformer(horizontal_flip=True,
                                          vertical_flip=True)
    datagen_train = SegmentationDataGenerator(transformer_train)

    transformer_val = RandomTransformer(horizontal_flip=False,
                                        vertical_flip=False)
    datagen_val = SegmentationDataGenerator(transformer_val)

    train_desc = 'lr{:.0e}-bs{:03d}'.format(learning_rate, batch_size)
    checkpoints_folder = 'trained/' + train_desc
    try:
        os.makedirs(checkpoints_folder)
    except OSError:
        shutil.rmtree(checkpoints_folder, ignore_errors=True)
        os.makedirs(checkpoints_folder)

    model_checkpoint = callbacks.ModelCheckpoint(
        checkpoints_folder + '/ep{epoch:02d}-vl{val_loss:.4f}.hdf5',
        monitor='loss')
    tensorboard_cback = callbacks.TensorBoard(
        log_dir='{}/tboard'.format(checkpoints_folder),
        histogram_freq=0,
        write_graph=False,
        write_images=False)
    csv_log_cback = callbacks.CSVLogger(
        '{}/history.log'.format(checkpoints_folder))
    reduce_lr_cback = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.2,
                                                  patience=5,
                                                  verbose=1,
                                                  min_lr=0.05 * learning_rate)

    model = add_softmax(get_frontend(500, 500))

    load_weights(model, weights_path)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=learning_rate, momentum=0.9),
                  metrics=['accuracy'])

    # Build absolute image paths
    def build_abs_paths(basenames):
        img_fnames = [os.path.join(img_root, f) + '.jpg' for f in basenames]
        mask_fnames = [os.path.join(mask_root, f) + '.png' for f in basenames]
        return img_fnames, mask_fnames

    train_basenames = [l.strip() for l in open(train_list_fname).readlines()]
    val_basenames = [l.strip() for l in open(val_list_fname).readlines()][:500]

    train_img_fnames, train_mask_fnames = build_abs_paths(train_basenames)
    val_img_fnames, val_mask_fnames = build_abs_paths(val_basenames)

    skipped_report_cback = callbacks.LambdaCallback(
        on_epoch_end=lambda a, b: open(
            '{}/skipped.txt'.format(checkpoints_folder), 'a').write(
                '{}\n'.format(datagen_train.skipped_count)))
    print("Inside build_abs_paths")
    model.fit_generator(
        datagen_train.flow_from_list(train_img_fnames,
                                     train_mask_fnames,
                                     shuffle=True,
                                     batch_size=batch_size,
                                     img_target_size=(500, 500),
                                     mask_target_size=(16, 16)),
        samples_per_epoch=len(train_basenames),
        nb_epoch=20,
        validation_data=datagen_val.flow_from_list(val_img_fnames,
                                                   val_mask_fnames,
                                                   batch_size=8,
                                                   img_target_size=(500, 500),
                                                   mask_target_size=(16, 16)),
        nb_val_samples=len(val_basenames),
        callbacks=[
            model_checkpoint,
            tensorboard_cback,
            csv_log_cback,
            reduce_lr_cback,
            skipped_report_cback,
        ])
Esempio n. 3
0
def train():
    global train_list_fname
    global val_list_fname
    global img_root
    global mask_root
    global weights_path
    global batch_size
    global learning_rate

    global modeltype

    train_data_gen = SegmentationDataGenerator(
        RandomTransformer(horizontal_flip=True, vertical_flip=True))
    val_data_gen = SegmentationDataGenerator(
        RandomTransformer(horizontal_flip=True, vertical_flip=True))

    trained_log = '{}-lr{:.0e}-bs{:03d}'.format(
        time.strftime("%Y-%m-%d %H:%M"), learning_rate, batch_size)
    checkpoints_folder = 'trained_log/' + trained_log
    try:
        os.makedirs(checkpoints_folder)
    except OSError:
        shutil.rmtree(checkpoints_folder, ignore_errors=True)
        os.makedirs(checkpoints_folder)

    model_checkpoint = callbacks.ModelCheckpoint(
        checkpoints_folder + '/ep{epoch:02d}-vl{val_loss:.4f}.hdf5',
        monitor='loss')
    model_tensorboard = callbacks.TensorBoard(
        log_dir='{}/tboard'.format(checkpoints_folder),
        histogram_freq=0,
        write_graph=False,
        write_images=False)
    model_csvlogger = callbacks.CSVLogger(
        '{}/history.log'.format(checkpoints_folder))
    model_reducelr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.2,
                                                 patience=5,
                                                 verbose=1,
                                                 min_lr=0.05 * learning_rate)

    model = add_softmax(dilated_frontend(500, 500))

    #load_weights(model, weights_path)

    model.compile(optimizer=optimizers.SGD(lr=learning_rate, momentum=0.9),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    train_basenames = [l.strip() for l in open(train_list_fname).readlines()]
    val_basenames = [l.strip() for l in open(val_list_fname).readlines()][:500]

    train_img_fnames, train_mask_fnames = build_abs_paths(train_basenames)
    val_img_fnames, val_mask_fnames = build_abs_paths(val_basenames)

    model_skipped = callbacks.LambdaCallback(on_epoch_end=lambda a, b: open(
        '{}/skipped.txt'.format(checkpoints_folder), 'a').write('{}\n'.format(
            train_data_gen.skipped_count)))

    model.fit_generator(
        train_data_gen.flow_from_list(train_img_fnames,
                                      train_mask_fnames,
                                      shuffle=True,
                                      batch_size=batch_size,
                                      img_target_size=(500, 500),
                                      mask_target_size=(16, 16)),
        steps_per_epoch=(len(train_basenames) / batch_size),
        epochs=50,
        validation_data=val_data_gen.flow_from_list(val_img_fnames,
                                                    val_mask_fnames,
                                                    batch_size=8,
                                                    img_target_size=(500, 500),
                                                    mask_target_size=(16, 16)),
        validation_steps=(len(val_basenames) / 8),
        callbacks=[
            model_checkpoint, model_tensorboard, model_csvlogger,
            model_reducelr, model_skipped
        ])