def create_train_runner(hparams, num_workers):
    params = {}
    steps_per_epoch = int(hparams.num_examples_per_epoch / hparams.batch_size)
    return low_level_runner.TrainLowLevelRunner(iterations=steps_per_epoch,
                                                hparams=hparams,
                                                per_host_v1=True)

    input_fn = DistributedPipeline(hparams, num_workers)
    runner.initialize(input_fn, params)
    mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
    runner.build_model(model_fn, params)
    return runner
def create_train_runner(hparams):
    hparams.tgt_sos_id, hparams.tgt_eos_id = 1, 2
    steps_per_epoch = int(hparams.num_examples_per_epoch / hparams.batch_size)
    return low_level_runner.TrainLowLevelRunner(iterations=steps_per_epoch,
                                                hparams=hparams)
def create_train_runner(hparams, num_workers):
    params = {}
    steps_per_epoch = int(hparams.num_examples_per_epoch / hparams.batch_size)
    return low_level_runner.TrainLowLevelRunner(iterations=steps_per_epoch,
                                                hparams=hparams,
                                                per_host_v1=True)