Example #1
0
def export_model(model_directory: str, output_dir: str, gpu: str):

    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.6

    # Import parameters from the json file
    params_json = import_params_from_json(
        json_filename=os.path.join(model_directory, 'config.json'))
    training_params = TrainingParams(**params_json['training_params'])
    parameters = Params(**params_json)

    model_params = {'Params': parameters, 'TrainingParams': training_params}

    # Config
    est_config = tf.estimator.RunConfig()
    est_config.replace(keep_checkpoint_max=10,
                       save_checkpoints_steps=training_params.save_interval,
                       session_config=config_sess,
                       save_checkpoints_secs=None,
                       save_summary_steps=1000)

    estimator = tf.estimator.Estimator(
        model_fn=crnn_fn,
        params=model_params,
        model_dir=model_directory,
        config=est_config,
    )

    estimator.export_savedmodel(output_dir,
                                serving_input_receiver_fn=serving_single_input(
                                    fixed_height=parameters.input_shape[0],
                                    min_width=10))
Example #2
0
def run(csv_files_train: List[str], csv_files_eval: List[str],
        output_model_dir: str, training_params: dict, _config):

    # Save config
    if not os.path.isdir(output_model_dir):
        os.makedirs(output_model_dir)
    else:
        assert _config.get('restore_model'), \
            '{0} already exists, you cannot use it as output directory. ' \
            'Set "restore_model=True" to continue training, or delete dir "rm -r {0}"'.format(output_model_dir)

    with open(os.path.join(output_model_dir, 'config.json'), 'w') as f:
        json.dump(_config, f, indent=4, sort_keys=True)

    parameters = Params(**_config)
    training_params = TrainingParams(**training_params)

    model_params = {'Params': parameters, 'TrainingParams': training_params}

    # Create export directory
    export_dir = os.path.join(output_model_dir, 'export')
    if not os.path.isdir(export_dir):
        os.makedirs(export_dir)

    # Check if alphabet contains all chars in csv input files
    discarded_chars = parameters.string_split_delimiter + parameters.csv_delimiter + string.whitespace[
        1:]
    parameters.alphabet.check_input_file_alphabet(
        parameters.csv_files_train + parameters.csv_files_eval,
        discarded_chars=discarded_chars,
        csv_delimiter=parameters.csv_delimiter)

    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.8
    # config_sess.gpu_options.allow_growth = True

    # Config estimator
    est_config = tf.estimator.RunConfig()
    if LooseVersion(tf.__version__) < LooseVersion('1.8'):
        est_config.replace(
            keep_checkpoint_max=10,
            save_checkpoints_steps=training_params.save_interval,
            session_config=config_sess,
            save_checkpoints_secs=None,
            save_summary_steps=1000,
            model_dir=output_model_dir)
    else:
        est_config.replace(
            keep_checkpoint_max=10,
            save_checkpoints_steps=training_params.save_interval,
            session_config=config_sess,
            save_checkpoints_secs=None,
            save_summary_steps=1000,
            model_dir=output_model_dir,
            train_distribute=distribution_gpus(parameters.num_gpus))

    estimator = tf.estimator.Estimator(model_fn=crnn_fn,
                                       params=model_params,
                                       model_dir=output_model_dir,
                                       config=est_config)

    for e in trange(0, training_params.n_epochs,
                    training_params.evaluate_every_epoch):

        estimator.train(input_fn=data_loader(
            csv_filename=csv_files_train,
            params=parameters,
            batch_size=training_params.train_batch_size,
            num_epochs=training_params.evaluate_every_epoch,
            data_augmentation=parameters.data_augmentation,
            image_summaries=True))

        estimator.export_savedmodel(
            export_dir,
            serving_input_receiver_fn=serving_single_input(
                fixed_height=parameters.input_shape[0], min_width=10))

        estimator.evaluate(
            input_fn=data_loader(csv_filename=csv_files_eval,
                                 params=parameters,
                                 batch_size=training_params.eval_batch_size,
                                 num_epochs=1))