def experiment_fn_inner(run_config, hparams): if hparams.dataset == "COCO": dataset = COCO("data/coco") elif hparams.dataset == "challenger.ai": dataset = ChallengerAI("data/challenger.ai") else: raise Exception("Unknown Dataset Name: '%s'." % hparams.dataset) hparams.add_hparam("vocab_size", len(dataset.word_to_idx)) estimator = Estimator(model_fn=model_fn_inner, params=hparams, config=run_config) train_init_hook = IteratorInitializerHook("train") val_init_hook = IteratorInitializerHook("val") experiment = Experiment( estimator=estimator, train_input_fn=dataset.get_tfrecords_input_fn("train", hparams.bin_size), eval_input_fn=dataset.get_tfrecords_input_fn("val", hparams.bin_size), train_steps=hparams.train_steps, eval_steps=hparams.eval_steps, train_steps_per_iteration=hparams.steps_per_eval, eval_hooks=[val_init_hook], ) experiment.extend_train_hooks([train_init_hook]) return experiment
def experiment_fn(run_config, hparams): eval_initializer_hook = IteratorInitializerHook() estimator = Estimator(model_fn=model_fn, params=hparams, config=run_config) train_dataset = CornellMovieDataset( os.path.join(config.PROCESSED_PATH, "train_ids.enc"), os.path.join(config.PROCESSED_PATH, "train_ids.dec"), os.path.join(config.PROCESSED_PATH, "vocab.enc"), os.path.join(config.PROCESSED_PATH, "vocab.dec"), ) test_dataset = CornellMovieDataset( os.path.join(config.PROCESSED_PATH, "test_ids.enc"), os.path.join(config.PROCESSED_PATH, "test_ids.dec"), os.path.join(config.PROCESSED_PATH, "vocab.enc"), os.path.join(config.PROCESSED_PATH, "vocab.dec"), ) experiment = Experiment(estimator=estimator, train_input_fn=train_dataset.input_fn, eval_input_fn=test_dataset.input_fn, train_steps=hparams.train_steps, eval_hooks=[eval_initializer_hook], eval_steps=None, train_steps_per_iteration=hparams.steps_per_eval) return experiment
def experiment_fn(run_config, hparams): train_iter, train_input_hook = get_inputs(hparams.train_dataset_paths, hparams.vocab_paths, hparams.batch_size) eval_iter, eval_input_hook = get_inputs(hparams.eval_dataset_paths, hparams.vocab_paths, hparams.batch_size, mode=estimator.ModeKeys.EVAL) def train_input_fn(): return train_iter.get_next() def eval_input_fn(): return eval_iter.get_next() exp_estimator = get_estimator(run_config, hparams) run_config.replace(save_checkpoints_steps=hparams.min_eval_frequency) return Experiment(estimator=exp_estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=hparams.num_steps, min_eval_frequency=hparams.min_eval_frequency, train_monitors=[train_input_hook], eval_hooks=[eval_input_hook], eval_steps=None)
def experiment_fn(run_config, hparams): train_input_fn, eval_input_fn = make_input_fns() estimator = Estimator(model_fn=model_fn, config=run_config, params=hparams) experiment = Experiment(estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn) return experiment
def expirement_fn(run_config, hparams): regressor = DNNRegressor(..., config=run_config, hidden_units=hparams['units']) return Experiment(estimator=regressor, train_input_fn=pandas_input_fn(...), eval_input_fn=pandas_input_fn(...))
def experiment_fn(run_config, hparams): estimator = Estimator(model_fn=config.model_fn, config=run_config, params=hparams) experiment = Experiment(estimator=estimator, train_input_fn=config.input_fns['train'], eval_input_fn=config.input_fns['valid']) return experiment
def _experiment_fn(run_config, hparams): return Experiment( model.build_estimator(run_config, hparams), train_input_fn=input_pipe.get_input_fn("train", **input_param), eval_input_fn=input_pipe.get_input_fn("val", **input_param), train_steps=exp_param['train_steps'], eval_steps=exp_param['eval_steps'], min_eval_frequency=exp_param['min_eval_frequency'])
def experiment_fn(run_config, hparams): return Experiment( estimator=create_model(config=run_config, hparams=hparams), train_input_fn=train_input_fn, eval_input_fn=val_input_fn, train_steps=10000, eval_steps=200, train_steps_per_iteration=1000, )
def experiment_fn(output_dir): return Experiment( Estimator(model_fn=model.make_model_fn(**args.__dict__), model_dir=output_dir), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, continuous_eval_throttle_secs=args.min_eval_seconds, min_eval_frequency=args.min_train_eval_rate, # Until Experiment moves to train_and_evaluate call internally local_eval_frequency=args.min_train_eval_rate)
def experiment_fn(output_dir): return Experiment( Estimator(model_fn=model.make_model_fn(args), model_dir=output_dir), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, continuous_eval_throttle_secs=args.min_eval_seconds, min_eval_frequency=args.min_train_eval_rate, # Until learn_runner is updated to use train_and_evaluate local_eval_frequency=args.min_train_eval_rate)
def experiment_fn(run_config, hparams): return Experiment( estimator=create_estimator(config=run_config, hparams=hparams), train_input_fn=train_input_fn, eval_input_fn=val_input_fn, train_steps=10000, eval_delay_secs=1, eval_steps=15, train_steps_per_iteration=130, )
def _experiment_fn(output_dir): return Experiment( model.build_estimator(output_dir), train_input_fn=model.get_input_fn( filename=os.path.join(data_dir, 'GOOG_series_train.csv'), batch_size=train_batch_size), eval_input_fn=model.get_input_fn( filename=os.path.join(data_dir, 'GOOG_series_validation.csv'), batch_size=eval_batch_size), train_steps=train_steps, eval_steps=eval_steps, **experiment_args )
def _experiment_fn(output_dir): return Experiment( models.build_estimator(output_dir), train_input_fn=models.get_input_fn( filename=os.path.join(data_dir, 'train.tfrecords')), eval_input_fn=models.get_input_fn( filename=os.path.join(data_dir, 'test.tfrecords')), export_strategies=[saved_model_export_utils.make_export_strategy( models.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1)], train_steps=train_steps, eval_steps=eval_steps, **experiment_args )
def _create_my_experiment(run_config, hparams): feature_columns = [tf.feature_column.numeric_column("x", shape=[28, 28])] print feature_columns mnist = mnist_data.read_data_sets(FLAGS.data_dir, one_hot=False) train_input_fn, train_input_hook = get_train_inputs(batch_size=128, mnist_data=mnist) eval_input_fn, eval_input_hook = get_test_inputs(batch_size=128, mnist_data=mnist) return Experiment(estimator=dnn.DNNClassifier( hidden_units=hparams.num_hidden_units, feature_columns=feature_columns, model_dir=FLAGS.model_dir), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn)
def _experiment_fn(output_dir): return Experiment( model.build_estimator(output_dir, model_type, learning_rate), train_input_fn=input_pipe.get_input_fn("train", data_dir, **input_pipe_settings), eval_input_fn=input_pipe.get_input_fn("eval", data_dir, **input_pipe_settings), train_steps=train_steps, eval_steps=eval_steps, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **other_experiment_args)
def experiment_fn(run_config, hparams): input_fn_factory = ModelInputs(hparams.batch_size, hparams.train_dataset_path) train_input_fn, train_input_hook = input_fn_factory.get_inputs() eval_input_fn, eval_input_hook = input_fn_factory.get_inputs( mode=estimator.ModeKeys.EVAL) exp_estimator = get_estimator(run_config, hparams) run_config.replace(save_checkpoints_steps=hparams.min_eval_frequency) return Experiment(estimator=exp_estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=hparams.num_steps, min_eval_frequency=hparams.min_eval_frequency, train_monitors=[train_input_hook], eval_hooks=[eval_input_hook], eval_steps=1000)
def _experiment_fn(output_dir): train_input = model.generate_input_fn(train_file, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = model.generate_input_fn(eval_file, batch_size=eval_batch_size) return Experiment(model.build_estimator(job_dir, embedding_size=embedding_size, hidden_units=hidden_units), train_input_fn=train_input, eval_input_fn=eval_input, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **experiment_args)
def _experiment_fn(output_dir): return Experiment(model.build_estimator(output_dir), train_input_fn=model.get_input_fn( filename=os.path.join(data_dir, "train_data.csv"), batch_size=train_batch_size), eval_input_fn=model.get_input_fn( filename=os.path.join(data_dir, "train_data.csv"), batch_size=eval_batch_size), export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=train_steps, eval_metrics=model.get_eval_metrics(), eval_steps=eval_steps, **experiment_args)
def _experiment_fn(output_dir): input_fn = model.generate_csv_input_fn train_input = input_fn(train_data_paths, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = input_fn(eval_data_paths, batch_size=eval_batch_size, mode=tf.contrib.learn.ModeKeys.EVAL) return Experiment(model.build_estimator(output_dir, hidden_units=hidden_units), train_input_fn=train_input, eval_input_fn=eval_input, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], eval_metrics=model.get_eval_metrics(), **experiment_args)
def experiment_fn(run_config, hparams): splits = 40 train_path = 'output/batches/train-{}.npz' val_path = 'output/batches/val.npz' batch_size = tf.flags.FLAGS.batch_size train_hook = FeedFnHook(path_fmt=train_path, splits=splits, batch_size=batch_size) val_hook = FeedFnHook(path_fmt=val_path, splits=1, batch_size=batch_size) estimator = Estimator( model_fn=model_fn, config=run_config, params=hparams) experiment = Experiment( estimator=estimator, train_input_fn=input_fn, eval_input_fn=input_fn, train_monitors=[train_hook], eval_hooks=[val_hook] ) return experiment
def _experiment_fn(output_dir): input_fn = (model.generate_csv_input_fn if format == 'csv' else model.generate_tfrecord_input_fn) train_input = input_fn( train_data_paths, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = input_fn( eval_data_paths, batch_size=eval_batch_size, mode=tf.contrib.learn.ModeKeys.EVAL) return Experiment( model.build_estimator( output_dir, nbuckets=nbuckets, hidden_units=parse_to_int(hidden_units) ), train_input_fn=train_input, eval_input_fn=eval_input, export_strategies=[saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], eval_metrics=model.get_eval_metrics(), #min_eval_frequency = 1000, # change this to speed up training on large datasets **experiment_args )
def _experiment_fn(run_config, hparams): valid_experiment_keys = [ 'eval_metrics', 'train_monitors', 'eval_hooks', 'local_eval_frequency', 'eval_delay_secs', 'continuous_eval_throttle_secs', 'min_eval_frequency', 'delay_workers_by_global_step', 'train_steps_per_iteration' ] experiment_params = { k: v for k, v in self.customer_params.items() if k in valid_experiment_keys } logger.info("creating Experiment:") logger.info(experiment_params) ''' TensorFlow input functions (train_input_fn, and eval_input_fn) can return features and labels, or a function that returns features and labels Examples of valid input functions: def train_input_fn(training_dir, hyperparameters): ... return tf.estimator.inputs.numpy_input_fn(x={"x": train_data}, y=train_labels) def train_input_fn(training_dir, hyperparameters): ... return features, labels ''' def _train_input_fn(): """Prepare parameters for the train_input_fn and invoke it""" declared_args = inspect.getargspec( self.customer_script.train_input_fn) invoke_args = { arg: self._resolve_value_for_training_input_fn_parameter(arg) for arg in declared_args.args } return _function( self.customer_script.train_input_fn(**invoke_args))() def _eval_input_fn(): declared_args = inspect.getargspec( self.customer_script.eval_input_fn) invoke_args = { arg: self._resolve_value_for_training_input_fn_parameter(arg) for arg in declared_args.args } return _function( self.customer_script.eval_input_fn(**invoke_args))() ''' TensorFlow serving input functions (serving_input_fn) can return a ServingInputReceiver object or a function that a ServingInputReceiver Examples of valid serving input functions: def serving_input_fn(params): feature_spec = {INPUT_TENSOR_NAME: tf.FixedLenFeature(dtype=tf.float32, shape=[4])} return tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec) def serving_input_fn(hyperpameters): inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 32, 32, 3])} return tf.estimator.export.ServingInputReceiver(inputs, inputs) ''' def _serving_input_fn(): return _function( self.customer_script.serving_input_fn( self.customer_params))() def _export_strategy(): if self.saves_training(): return [ saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ] logger.warn( "serving_input_fn not specified, model NOT saved, use checkpoints to reconstruct" ) return None return Experiment(estimator=self._build_estimator( run_config=run_config, hparams=hparams), train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, export_strategies=_export_strategy(), train_steps=self.train_steps, eval_steps=self.eval_steps, **experiment_params)
def _experiment_fn(output_dir): valid_experiment_keys = [ 'eval_metrics', 'train_monitors', 'eval_hooks', 'local_eval_frequency', 'eval_delay_secs', 'continuous_eval_throttle_secs', 'min_eval_frequency', 'delay_workers_by_global_step', 'train_steps_per_iteration' ] experiment_params = { k: v for k, v in self.customer_params.items() if k in valid_experiment_keys } logging.info("creating Experiment:") logging.info(experiment_params) ''' TensorFlow input functions (train_input_fn, and eval_input_fn) can return features and labels, or a function that returns features and labels Examples of valid input functions: def train_input_fn(training_dir, hyperparameters): ... return tf.estimator.inputs.numpy_input_fn(x={"x": train_data}, y=train_labels) def train_input_fn(training_dir, hyperparameters): ... return features, labels ''' def _train_input_fn(): return _function( self.customer_script.train_input_fn( self.training_path, self.customer_params))() def _eval_input_fn(): return _function( self.customer_script.eval_input_fn(self.training_path, self.customer_params))() ''' TensorFlow serving input functions (serving_input_fn) can return a ServingInputReceiver object or a function that a ServingInputReceiver Examples of valid serving input functions: def serving_input_fn(params): feature_spec = {INPUT_TENSOR_NAME: tf.FixedLenFeature(dtype=tf.float32, shape=[4])} return tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec) def serving_input_fn(hyperpameters): inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 32, 32, 3])} return tf.estimator.export.ServingInputReceiver(inputs, inputs) ''' def _serving_input_fn(): return _function( self.customer_script.serving_input_fn( self.customer_params))() return Experiment( estimator=estimator, train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=self.train_steps, eval_steps=self.eval_steps, **experiment_params)