Exemple #1
0
def export_model(model_directory: str, output_dir: str, gpu: str):

    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.6

    # Import parameters from the json file
    params_json = import_params_from_json(
        json_filename=os.path.join(model_directory, 'config.json'))
    training_params = TrainingParams(**params_json['training_params'])
    parameters = Params(**params_json)

    model_params = {'Params': parameters, 'TrainingParams': training_params}

    # Config
    est_config = tf.estimator.RunConfig()
    est_config.replace(keep_checkpoint_max=10,
                       save_checkpoints_steps=training_params.save_interval,
                       session_config=config_sess,
                       save_checkpoints_secs=None,
                       save_summary_steps=1000)

    estimator = tf.estimator.Estimator(
        model_fn=crnn_fn,
        params=model_params,
        model_dir=model_directory,
        config=est_config,
    )

    estimator.export_savedmodel(output_dir,
                                serving_input_receiver_fn=serving_single_input(
                                    fixed_height=parameters.input_shape[0],
                                    min_width=10))
Exemple #2
0
def default_config():
    csv_files_train = None
    csv_files_eval = None
    output_model_dir = None
    num_gpus = 1
    lookup_alphabet_file = ''
    input_shape = (32, 100)
    num_beam_paths = 2
    training_params = TrainingParams().to_dict()
    restore_model = False
Exemple #3
0
def default_config():
    csv_files_train = None
    csv_files_eval = None
    output_model_dir = None
    num_gpus = 1
    lookup_alphabet_file = ''
    input_shape = (32, 32)
    num_beam_paths = 2
    training_params = TrainingParams().to_dict()
    restore_model = True
    csv_delimiter = ';'
    string_split_delimiter = '|'
Exemple #4
0
def default_config():
    csv_files_train = None
    csv_files_eval = None
    output_model_dir = None
    num_gpus = 1
    lookup_alphabet_file = ''
    input_shape = (32, 100)
    num_beam_paths = 2
    training_params = TrainingParams().to_dict()
    restore_model = False
    csv_delimiter = ';'
    string_split_delimiter = '|'
    data_augmentation = True
    data_augmentation_max_rotation = 0.05
    input_data_n_parallel_calls = 4
Exemple #5
0
def run(csv_files_train: List[str], csv_files_eval: List[str],
        output_model_dir: str, training_params: dict, _config):

    # Save config
    if not os.path.isdir(output_model_dir):
        os.makedirs(output_model_dir)
    else:
        assert _config.get('restore_model'), \
            '{0} already exists, you cannot use it as output directory. ' \
            'Set "restore_model=True" to continue training, or delete dir "rm -r {0}"'.format(output_model_dir)

    with open(os.path.join(output_model_dir, 'config.json'), 'w') as f:
        json.dump(_config, f, indent=4, sort_keys=True)

    parameters = Params(**_config)
    training_params = TrainingParams(**training_params)

    model_params = {'Params': parameters, 'TrainingParams': training_params}

    # Create export directory
    export_dir = os.path.join(output_model_dir, 'export')
    if not os.path.isdir(export_dir):
        os.makedirs(export_dir)

    # Check if alphabet contains all chars in csv input files
    discarded_chars = parameters.string_split_delimiter + parameters.csv_delimiter + string.whitespace[
        1:]
    parameters.alphabet.check_input_file_alphabet(
        parameters.csv_files_train + parameters.csv_files_eval,
        discarded_chars=discarded_chars,
        csv_delimiter=parameters.csv_delimiter)

    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.8
    # config_sess.gpu_options.allow_growth = True

    # Config estimator
    est_config = tf.estimator.RunConfig()
    if LooseVersion(tf.__version__) < LooseVersion('1.8'):
        est_config.replace(
            keep_checkpoint_max=10,
            save_checkpoints_steps=training_params.save_interval,
            session_config=config_sess,
            save_checkpoints_secs=None,
            save_summary_steps=1000,
            model_dir=output_model_dir)
    else:
        est_config.replace(
            keep_checkpoint_max=10,
            save_checkpoints_steps=training_params.save_interval,
            session_config=config_sess,
            save_checkpoints_secs=None,
            save_summary_steps=1000,
            model_dir=output_model_dir,
            train_distribute=distribution_gpus(parameters.num_gpus))

    estimator = tf.estimator.Estimator(model_fn=crnn_fn,
                                       params=model_params,
                                       model_dir=output_model_dir,
                                       config=est_config)

    for e in trange(0, training_params.n_epochs,
                    training_params.evaluate_every_epoch):

        estimator.train(input_fn=data_loader(
            csv_filename=csv_files_train,
            params=parameters,
            batch_size=training_params.train_batch_size,
            num_epochs=training_params.evaluate_every_epoch,
            data_augmentation=parameters.data_augmentation,
            image_summaries=True))

        estimator.export_savedmodel(
            export_dir,
            serving_input_receiver_fn=serving_single_input(
                fixed_height=parameters.input_shape[0], min_width=10))

        estimator.evaluate(
            input_fn=data_loader(csv_filename=csv_files_eval,
                                 params=parameters,
                                 batch_size=training_params.eval_batch_size,
                                 num_epochs=1))