def _experiment_fn(run_config, hparams): # num_epochs can control duration if train_steps isn't # passed to Experiment train_input = lambda: model.input_fn( hparams.train_files, num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size, ) # Don't shuffle evaluation data eval_input = lambda: model.input_fn(hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False) return tf.contrib.learn.Experiment( tf.estimator.Estimator( model.generate_model_fn( embedding_size=hparams.embedding_size, # Construct layers sizes with exponetial decay hidden_units=[ max( 2, int(hparams.first_layer_size * hparams.scale_factor**i)) for i in range(hparams.num_layers) ], learning_rate=hparams.learning_rate), config=run_config), train_input_fn=train_input, eval_input_fn=eval_input, **experiment_args)
def run_experiment(hparams): # Create training and eval input functions train_input = lambda: model.generate_input_fn( args.train_files, num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size, ) # Don't shuffle evaluation data eval_input = lambda: model.generate_input_fn( args.eval_files, batch_size=hparams.eval_batch_size, shuffle=False) # Define TrainSpec and EvalSpec instances # Define exporters for Eval Spec exporters = [] exporters.append( tf.estimator.FinalExporter('mnist', model.example_serving_input_fn)) eval_spec = tf.estimator.EvalSpec(eval_input, steps=hparams.eval_steps, exporters=exporters, throttle_secs=60) train_spec = tf.estimator.TrainSpec(train_input, max_steps=hparams.max_steps) # Create estimator estimator = tf.estimator.Estimator(model.generate_model_fn(hparams), config=tf.estimator.RunConfig( model_dir=hparams.job_dir, )) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def run_experiment(hparams): """Run the training and evaluate using the high level API""" train_input = lambda: model.input_fn(hparams.train_files, num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size) # Don't shuffle evaluation data eval_input = lambda: model.input_fn( hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False) train_spec = tf.estimator.TrainSpec(train_input, max_steps=hparams.train_steps) exporter = tf.estimator.FinalExporter( 'census', model.SERVING_FUNCTIONS[hparams.export_format]) eval_spec = tf.estimator.EvalSpec(eval_input, steps=hparams.eval_steps, exporters=[exporter], name='census-eval') model_fn = model.generate_model_fn( embedding_size=hparams.embedding_size, # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(hparams.first_layer_size * hparams.scale_factor**i)) for i in range(hparams.num_layers) ], learning_rate=hparams.learning_rate) estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=hparams.job_dir) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def _experiment_fn(run_config, hparams): # num_epochs can control duration if train_steps isn't # passed to Experiment train_input = lambda: model.generate_input_fn( hparams.train_files, num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size, ) # Don't shuffle evaluation data eval_input = lambda: model.generate_input_fn( hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False ) return tf.contrib.learn.Experiment( tf.estimator.Estimator( model.generate_model_fn( embedding_size=hparams.embedding_size, # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(hparams.first_layer_size * hparams.scale_factor**i)) for i in range(hparams.num_layers) ], learning_rate=hparams.learning_rate ), config=run_config ), train_input_fn=train_input, eval_input_fn=eval_input, **experiment_args )
def _experimenter_fn(run_config, hparams): """ :param run_config: :param hparams: :return: """ # Create the training function. training_fn = lambda: generate_input_fn( hparams.train_files, epochs=hparams.epochs, batch_size=hparams.train_batch_size, mapping=hparams.mapping, shuffle=True, defaults=hparams.defaults, features=hparams.features, ) # Create the evaluating function. evaluating_fn = lambda: generate_input_fn( hparams.eval_files, batch_size=hparams.eval_batch_size, mapping=hparams.mapping, shuffle=False, defaults=hparams.defaults, features=hparams.features, ) return learn.Experiment( tf.estimator.Estimator( generate_model_fn( learning_rate=hparams.learning_rate, hidden_units=hparams.hidden_units, dropout=hparams.dropout, weights=hparams.weights, ), config=run_config, ), train_input_fn=training_fn, eval_input_fn=evaluating_fn, **args )
def run_experiment(hparams): """Run the training and evaluate using the high level API""" train_input = lambda: model.input_fn( hparams.train_files, num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size ) # Don't shuffle evaluation data eval_input = lambda: model.input_fn( hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False ) train_spec = tf.estimator.TrainSpec(train_input, max_steps=hparams.train_steps ) exporter = tf.estimator.FinalExporter('census', model.SERVING_FUNCTIONS[hparams.export_format]) eval_spec = tf.estimator.EvalSpec(eval_input, steps=hparams.eval_steps, exporters=[exporter], name='census-eval' ) model_fn = model.generate_model_fn( embedding_size=hparams.embedding_size, # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(hparams.first_layer_size * hparams.scale_factor**i)) for i in range(hparams.num_layers) ], learning_rate=hparams.learning_rate) estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=hparams.job_dir) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def _experiment_fn(run_config, hparams): # num_epochs can control duration if train_steps isn't # passed to Experiment train_input = lambda: model.generate_input_fn( [ os.path.join(os.environ['PIPELINE_INPUT_PATH'], train_file) for train_file in hparams.train_files ], num_epochs=hparams.num_epochs, batch_size=hparams.train_batch_size, ) # Don't shuffle evaluation data eval_input = lambda: model.generate_input_fn([ os.path.join(os.environ['PIPELINE_INPUT_PATH'], eval_file) for eval_file in hparams.eval_files ], batch_size=hparams. eval_batch_size, shuffle=False) return tf.contrib.learn.Experiment( tf.estimator.Estimator( model.generate_model_fn( embedding_size=hparams.embedding_size, # Construct layers sizes with exponential decay hidden_units=[ max( 2, int(hparams.first_layer_size * hparams.scale_factor**i)) for i in range(hparams.num_layers) ], learning_rate=hparams.learning_rate), config=run_config), train_input_fn=train_input, eval_input_fn=eval_input, # export_fn # checkpoint_and_export=True, **experiment_args)