def run_experiment(params): """Testbed for running model training and evaluation.""" dataset = inputs.download_data(params.train_path, params.eval_path) estimator = model.get_estimator(params) trial_id = _get_trial_id() model_dir = os.path.join(params.model_dir, trial_id) _train_and_evaluate(estimator, dataset, model_dir, params)
def run_training(params): """Initializes the estimator and runs train_and_evaluate.""" estimator = model.get_estimator(params) train_input_fn = inputs.get_input_fn( params.train_path, shuffle=True, batch_size=params.batch_size, num_epochs=params.num_epochs, ) train_spec = tf.estimator.TrainSpec( input_fn=train_input_fn, max_steps=params.max_steps, ) eval_input_fn = inputs.get_input_fn( params.eval_path, shuffle=False, batch_size=params.batch_size, ) exporter = tf.estimator.BestExporter( "export", inputs.get_serving_input_fn(params.export_format), exports_to_keep=1) eval_spec = tf.estimator.EvalSpec( input_fn=eval_input_fn, throttle_secs=1, steps=params.eval_steps, start_delay_secs=1, exporters=[exporter], ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def run_experiment(arguments): """Testbed for running model training and evaluation.""" logging.info('Arguments: %s', arguments) # Get estimator estimator = model.get_estimator(arguments) # Run training and evaluation _train_and_evaluate(estimator, arguments.job_dir)
def run_experiment(flags): """Testbed for running model training and evaluation.""" # Get data for training and evaluation dataset = utils.read_df_from_bigquery(flags.input, num_samples=flags.num_samples) # Get model estimator = model.get_estimator(flags) # Run training and evaluation _train_and_evaluate(estimator, dataset, flags.job_dir)
def run_experiment(flags): """Testbed for running model training and evaluation.""" # Get data for training and evaluation dataset = utils.read_df_from_bigquery( flags.input, num_samples=flags.num_samples) # Get model estimator = model.get_estimator(flags) # Run training and evaluation _train_and_evaluate(estimator, dataset, flags.job_dir)
def run_experiment(arguments): """Testbed for running model training and evaluation.""" # Get data for training and evaluation logging.info('Arguments: %s', arguments) dataset = utils.read_df_from_gcs(arguments.input) # Get estimator estimator = model.get_estimator(arguments) # Run training and evaluation _train_and_evaluate(estimator, dataset, arguments.job_dir)
def run_experiment(hparams): """Train and evaluate tf.estimator model""" print(hparams) train_spec = tf.estimator.TrainSpec( input_fn=input_utils._get_train_input_fn(hparams.train_file, hparams.batch_size, hparams.num_epochs, tf.estimator.ModeKeys.TRAIN), max_steps=hparams.train_steps) # final_exporter = tf.estimator.FinalExporter('final_exporter', input_utils.serving_input_fn) eval_spec = tf.estimator.EvalSpec(input_fn=input_utils._get_train_input_fn( hparams.eval_file, hparams.batch_size, hparams.num_epochs, tf.estimator.ModeKeys.EVAL)) # exporters=[final_exporter]) # Checkpoints to save run_config = tf.estimator.RunConfig(model_dir=hparams.job_dir, save_checkpoints_steps=100, keep_checkpoint_max=200) estimator = model.get_estimator(hparams) return tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)