Пример #1
0
def train_model(
        learning_rate=0.01,
        decay_learning_rate=1e-8,
        all_trainable=False,
        model_weights_path=None,
        no_class=197,
        batch_size=32,
        epoch=200):

    train_dict = {}
    for i, imgname in enumerate(x_train):
        train_dict[imgname] = y_train[i]

    vali_dict = {}
    for j, labname in enumerate(x_vali):
        vali_dict[labname] = y_vali[j]

    dict_data = {'train': train_dict, 'valid': vali_dict}

    train_ge = generator(dict_data, num_class, is_train=True)
    valid_ge = generator(dict_data, num_class, is_train=False)

    model = bcnn(
        all_trainable=all_trainable,
        no_class=no_class)

    model.summary()

    if model_weights_path:
        model.load_weights(model_weights_path)

    # Callbacks
    name_loss = 'categorical_crossentropy'
    optimizer = adam(lr=learning_rate, decay=decay_learning_rate)
    model.compile(loss=name_loss, optimizer=optimizer, metrics=['accuracy'])

    checkpoint = keras.callbacks.ModelCheckpoint('./weights.hdf5',
                                                 monitor='acc',
                                                 verbose=1,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 mode='auto')

    # Train
    history = model.fit_generator(
        generator=train_ge,
        validation_data=valid_ge,
        epochs=epoch,
        steps_per_epoch=len(dict_data['train']) // batch_size,
        validation_steps=len(dict_data['valid']) // batch_size,
        callbacks=[checkpoint],
        verbose=1)

    model.save_weights('./new_model_weights.h5')

    return history
Пример #2
0
def main(argv=None):
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list

    # check if checkpoint path exists
    if not os.path.exists(FLAGS.checkpoint_path):
        os.mkdir(FLAGS.checkpoint_path)
    else:
        #if not FLAGS.restore:
        #    shutil.rmtree(FLAGS.checkpoint_path)
        #    os.mkdir(FLAGS.checkpoint_path)
        shutil.rmtree(FLAGS.checkpoint_path)
        os.mkdir(FLAGS.checkpoint_path)

    train_data_generator = data_processor.generator(FLAGS)
    train_samples_count = data_processor.count_samples(FLAGS)

    val_data = data_processor.load_data(FLAGS)

    if len(gpus) <= 1:
        print('Training with 1 GPU')

        if FLAGS.drn:
            east = EAST_DRN_model(input_size=FLAGS.input_size)
        else:
            east = EAST_model(FLAGS.input_size)
            
        parallel_model = east.model
    else:
        print('Training with %d GPUs' % len(gpus))
        with tf.device("/cpu:0"):
            east = EAST_model(FLAGS.input_size)
        if FLAGS.restore_model is not '':
            east.model.load_weights(FLAGS.restore_model)
        parallel_model = multi_gpu_model(east.model, gpus=len(gpus))

    score_map_loss_weight = K.variable(0.01, name='score_map_loss_weight')

    small_text_weight = K.variable(0., name='small_text_weight')

    lr_scheduler = LearningRateScheduler(lr_decay)
    ckpt = CustomModelCheckpoint(model=east.model, path=FLAGS.checkpoint_path + '/model-{epoch:02d}.h5', period=FLAGS.save_checkpoint_epochs, save_weights_only=True)
    tb = CustomTensorBoard(log_dir=FLAGS.checkpoint_path + '/train', score_map_loss_weight=score_map_loss_weight, small_text_weight=small_text_weight, data_generator=train_data_generator, write_graph=True)
    small_text_weight_callback = SmallTextWeight(small_text_weight)
    validation_evaluator = ValidationEvaluator(val_data, validation_log_dir=FLAGS.checkpoint_path + '/val')
    callbacks = [lr_scheduler, ckpt, tb, small_text_weight_callback, validation_evaluator]
    opt = AdamW(FLAGS.init_learning_rate)

    parallel_model.compile(loss=[dice_loss(east.overly_small_text_region_training_mask, east.text_region_boundary_training_mask, score_map_loss_weight, small_text_weight),
                                 rbox_loss(east.overly_small_text_region_training_mask, east.text_region_boundary_training_mask, small_text_weight, east.target_score_map)],
                           loss_weights=[1., 1.],
                           optimizer=opt)
    east.model.summary()

    model_json = east.model.to_json()
    with open(FLAGS.checkpoint_path + '/model.json', 'w') as json_file:
        json_file.write(model_json)

    history = parallel_model.fit_generator(train_data_generator, epochs=FLAGS.max_epochs, steps_per_epoch=train_samples_count/FLAGS.batch_size, workers=FLAGS.nb_workers, use_multiprocessing=True, callbacks=callbacks, verbose=1)
Пример #3
0
def main(_):
  # check if checkpoint path exists
  if not os.path.exists(FLAGS.checkpoint_path):
    os.mkdir(FLAGS.checkpoint_path)

  train_data_generator = data_processor.generator(FLAGS)
  train_samples_count = data_processor.count_samples(FLAGS)
  print('total batches per epoch : {}'.format(train_samples_count / FLAGS.batch_size))

  east = EAST_model(FLAGS.input_size)
  east.model.summary()

  score_map_loss_weight = tf.Variable(0.01, name='score_map_loss_weight')
  small_text_weight = tf.Variable(0., name='small_text_weight')

  lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    FLAGS.init_learning_rate,
    decay_steps=FLAGS.lr_decay_steps,
    decay_rate=FLAGS.lr_decay_rate,
    staircase=True)

  optimizer = tf.optimizers.Adam(lr_schedule)

  # set checkpoint manager
  ckpt = tf.train.Checkpoint(step=tf.Variable(0), model=east)
  ckpt_manager = tf.train.CheckpointManager(ckpt,
                                            directory=FLAGS.checkpoint_path,
                                            max_to_keep=5)
  latest_ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)

  # restore latest checkpoint
  if latest_ckpt:
    ckpt.restore(latest_ckpt)
    print('global_step : {}, checkpoint is restored!'.format(int(ckpt.step)))

  # set tensorboard summary writer
  summary_writer = tf.summary.create_file_writer(FLAGS.checkpoint_path + '/train')

  while int(ckpt.step) < (FLAGS.max_steps + 1):
    # load data
    [input_images, overly_small_text_region_training_masks, text_region_boundary_training_masks, score_maps], \
    [target_score_maps, target_geo_maps] = next(train_data_generator)

    # update parameter
    train_step(east,
               input_images,
               optimizer,
               overly_small_text_region_training_masks,
               text_region_boundary_training_masks,
               small_text_weight,
               target_score_maps,
               target_geo_maps,
               score_map_loss_weight
               )

    score_y_pred, geo_y_pred = east(input_images)
    _dice_loss = dice_loss(overly_small_text_region_training_masks, text_region_boundary_training_masks, score_map_loss_weight,
                           small_text_weight, target_score_maps, score_y_pred)
    _rbox_loss = rbox_loss(overly_small_text_region_training_masks, text_region_boundary_training_masks,
                           small_text_weight, target_score_maps, target_geo_maps, geo_y_pred)
    loss = _dice_loss + _rbox_loss

    print('Step {:06d}, dice_loss {:.4f}, rbox_loss {:.4f}, total_loss {:.4f}'.format(int(ckpt.step), _dice_loss, _rbox_loss, loss))

    if ckpt.step % FLAGS.save_checkpoint_steps == 0:
      # save checkpoint
      ckpt_manager.save(checkpoint_number=ckpt.step)
      print('global_step : {}, checkpoint is saved!'.format(int(ckpt.step)))

      with summary_writer.as_default():
        tf.summary.scalar('loss', loss, step=int(ckpt.step))
        tf.summary.scalar('pred_score_map_loss', _dice_loss, step=int(ckpt.step))
        tf.summary.scalar('pred_geo_map_loss ', _rbox_loss, step=int(ckpt.step))
        tf.summary.scalar('learning_rate ', optimizer.lr(ckpt.step).numpy(), step=int(ckpt.step))
        tf.summary.scalar('small_text_weight', small_text_weight, step=int(ckpt.step))

        tf.summary.image("input_image", tf.cast((input_images + 1) * 127.5, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("overly_small_text_region_training_mask", tf.cast(overly_small_text_region_training_masks * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("text_region_boundary_training_mask", tf.cast(text_region_boundary_training_masks * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("score_map_target", tf.cast(target_score_maps * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("score_map_pred", tf.cast(score_y_pred * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
        for i in range(4):
          tf.summary.image("geo_map_%d_target" % (i), tf.cast(tf.expand_dims(target_geo_maps[:, :, :, i], axis=3) / FLAGS.input_size * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
          tf.summary.image("geo_map_%d_pred" % (i), tf.cast(tf.expand_dims(geo_y_pred[:, :, :, i], axis=3) / FLAGS.input_size * 255, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("geo_map_4_target", tf.cast((tf.expand_dims(target_geo_maps[:, :, :, 4], axis=3) + 1) * 127.5, tf.uint8), step=int(ckpt.step), max_outputs=3)
        tf.summary.image("geo_map_4_pred", tf.cast((tf.expand_dims(geo_y_pred[:, :, :, 4], axis=3) + 1) * 127.5, tf.uint8), step=int(ckpt.step), max_outputs=3)

    ckpt.step.assign_add(1)