def _experiment_fn(run_config, hparams): # Create Estimator # seems to be the only way to stop CUDA_OUT_MEMORY_ERRORs estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params=hparams, ) #import ipdb; ipdb.set_trace() #eval_hook = hooks.EvalMetricsHook(FLAGS.train_dir) # export_strategies=[saved_model_export_utils.make_export_strategy( # model.SERVING_FUNCTIONS[args.export_format], # exports_to_keep=1, # default_output_alternative_key=None, # )] export_strategy = learn.make_export_strategy( lambda: serving_input_fn(hparams), default_output_alternative_key=None, ) return learn.Experiment( estimator=estimator, train_input_fn=lambda: train_input_fn(hparams), eval_input_fn=lambda: eval_input_fn(hparams), train_steps=10000, eval_steps=5, export_strategies=[export_strategy], min_eval_frequency=100, #eval_hooks = [eval_hook] )
def experiment_fn(output_dir): # run experiment #train_monitors = tf.contrib.learn.monitors.ValidationMonitor(test_set.target, test_set.target,every_n_steps=5) #logging_hook = tf.train.LoggingTensorHook({"accuracy" : tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class')}, every_n_iter=10) return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir, config=tf.contrib.learn.RunConfig( save_checkpoints_steps=CHECKPOINT_STEPS, save_checkpoints_secs=None, save_summary_steps=SUMMARY_STEPS)), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, checkpoint_and_export=True, train_monitors=None, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=TRAIN_STEPS, eval_steps=EVAL_STEPS)
def experiment_fn(output_dir): """Function used in creating the Experiment object.""" hparams = model.create_hparams(hparams_overrides) tf.logging.info('Using tf %s', str(tf.__version__)) tf.logging.info('Using hyperparameters %s', hparams) time_crossed_features = [ cross.split(':') for cross in hparams.time_crossed_features if cross and cross != 'n/a' ] train_input_fn = model.get_input_fn( mode=tf.estimator.ModeKeys.TRAIN, input_files=[os.path.join(input_dir, 'train')], label_name=label_name, dedup=hparams.dedup, time_windows=hparams.time_windows, include_age=hparams.include_age, categorical_context_features=hparams.categorical_context_features, sequence_features=hparams.sequence_features, time_crossed_features=time_crossed_features, batch_size=hparams.batch_size) eval_input_fn = model.get_input_fn( mode=tf.estimator.ModeKeys.EVAL, input_files=[os.path.join(input_dir, 'validation')], label_name=label_name, dedup=hparams.dedup, time_windows=hparams.time_windows, include_age=hparams.include_age, categorical_context_features=hparams.categorical_context_features, sequence_features=hparams.sequence_features, time_crossed_features=time_crossed_features, # Fixing the batch size to get comparable evaluations. batch_size=32) serving_input_fn = model.get_serving_input_fn( dedup=hparams.dedup, time_windows=hparams.time_windows, include_age=hparams.include_age, categorical_context_features=hparams.categorical_context_features, sequence_features=hparams.sequence_features, time_crossed_features=time_crossed_features,) estimator = model.make_estimator(hparams, label_values.split(','), output_dir) return contrib_learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, export_strategies=[ contrib_learn.utils.saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=num_train_steps, eval_steps=num_eval_steps, eval_delay_secs=0, continuous_eval_throttle_secs=60, **experiment_args)
def experiment_fn_with_params(output_dir, data, **kwargs): ITERATIONS = 10000 mnist = input_data.read_data_sets(data) # loads training and eval data in memory return learn.Experiment( estimator=learn.Estimator(model_fn=conv_model, model_dir=output_dir, config=training_config), train_input_fn=lambda: train_data_input_fn(mnist), eval_input_fn=lambda: eval_data_input_fn(mnist), train_steps=ITERATIONS, eval_steps=1, export_strategies=export_strategy )
def experiment_fn(output_dir): return tflearn.Experiment( tflearn.LinearRegressor(feature_columns=feature_cols, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=metrics.streaming_root_mean_squared_error) })
def _experiment_fn(output_dir): return learn.Experiment( learn.Estimator(model_fn=model.make_model_fn(args), model_dir=output_dir), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=args.max_steps, eval_metrics=model.METRICS, continuous_eval_throttle_secs=args.min_eval_seconds, min_eval_frequency=args.min_train_eval_rate, # Until learn_runner is updated to use train_and_evaluate local_eval_frequency=args.min_train_eval_rate)
def experiment_fn(output_dir): ITERATIONS = 10000 mnist = input_data.read_data_sets(tempfile.mkdtemp()) return learn.Experiment( estimator=learn.Estimator(model_fn=conv_model, model_dir=output_dir, config=trainingConfig), train_input_fn=lambda: train_data_input_fn(mnist), eval_input_fn=lambda: eval_data_input_fn(mnist), train_steps=ITERATIONS, eval_steps=1, local_eval_frequency=30, #secs between evals (?) - deprecated but learn_runner needs updating... eval_metrics=evaluationMetrics )
def experiment_fn(output_dir): PADWORD = '[PAD]' MAX_DOCUMENT_LENGTH = 3 titles = [ 'Biodegradable Bags Cause Outrage in Italy', 'Tom Brady denies key points of ESPN Patriots article', 'Aldi to open first Kingwood store', PADWORD ] labels = ['International', 'Sport', 'Business'] TARGETS = tf.constant(["International", "Sport", "Business"]) words = tf.sparse_tensor_to_dense(tf.string_split(titles), default_value=PADWORD) vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor( MAX_DOCUMENT_LENGTH) vocab_processor.fit(titles) outfilename = "/Users/eliapalme/Newsriver/Newsriver-classifier/training/vocabfile.vcb" vocab_processor.save(outfilename) nwords = len(vocab_processor.vocabulary_) ## Transform the documents using the vocabulary. XX = np.array(list(vocab_processor.fit_transform(titles))) # make targets numeric table = tf.contrib.lookup.index_table_from_tensor(mapping=TARGETS, num_oov_buckets=1, default_value=-1) features = tf.constant(["International", "Sport", "Business"]) targetX = table.lookup(features) return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir), train_input_fn=XX, eval_input_fn=targetX, eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=TRAIN_STEPS)
def _experiment_fn(output_dir): runconfig = learn.RunConfig(gpu_memory_fraction=0.6, ) estimator = learn.Estimator(model_fn=cnn_maker.make_model( args.learning_rate), model_dir=output_dir, config=runconfig) return learn.Experiment( estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=args.num_epochs, eval_metrics=cnn_maker.METRICS, # AGREGAR METRICAS continuous_eval_throttle_secs=args.min_eval_seconds, min_eval_frequency=args.min_train_eval_rate, )
def experiment_fn(output_dir): return tflearn.Experiment( tflearn.Estimator(model_fn=simple_rnn, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=metrics.streaming_root_mean_squared_error) }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ])
def _experiment_fn(output_dir): return tflearn.Experiment( get_model(output_dir, nbuckets, hidden_units, learning_rate), train_input_fn=read_dataset(traindata, mode=tf.contrib.learn.ModeKeys.TRAIN, num_training_epochs=num_training_epochs, batch_size=batch_size), eval_input_fn=read_dataset(evaldata), export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], eval_metrics = { 'rmse' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities'), 'training/hptuning/metric' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities') }, min_eval_frequency = 100, **args )
def experiment_fn(output_dir): # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ])
def train_fn(output_dir): tf.logging.info('Inside train_fn') logs_path = 'logs' writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) return tflearn.Experiment(tflearn.Estimator(model_fn=nn_model, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_validate(), eval_metrics={ 'acc': tflearn.MetricSpec( metric_fn=metrics.streaming_accuracy, prediction_key='class') }, export_strategies=None, train_steps=TRAIN_STEPS)
def experiment_fn(run_config, unused_hparams): """The tf.learn experiment_fn. Args: run_config: The run config to be passed to the KMeansClustering. unused_hparams: Hyperparameters; not applicable. Returns: A tf.contrib.learn.Experiment. """ kmeans = contrib_learn.KMeansClustering(num_clusters=num_clusters, config=run_config) return contrib_learn.Experiment(estimator=kmeans, train_steps=train_steps, train_input_fn=input_fn, eval_steps=1, eval_input_fn=input_fn, min_eval_frequency=min_eval_frequency)
def _experimenter_fn(run_config, hparams): """ :param run_config: :param hparams: :return: """ # Create the training function. training_fn = lambda: generate_input_fn( hparams.train_files, epochs=hparams.epochs, batch_size=hparams.train_batch_size, mapping=hparams.mapping, shuffle=True, defaults=hparams.defaults, features=hparams.features, ) # Create the evaluating function. evaluating_fn = lambda: generate_input_fn( hparams.eval_files, batch_size=hparams.eval_batch_size, mapping=hparams.mapping, shuffle=False, defaults=hparams.defaults, features=hparams.features, ) return learn.Experiment( tf.estimator.Estimator( generate_model_fn( learning_rate=hparams.learning_rate, hidden_units=hparams.hidden_units, dropout=hparams.dropout, weights=hparams.weights, ), config=run_config, ), train_input_fn=training_fn, eval_input_fn=evaluating_fn, **args )
def experiment_fn(output_dir): wide, deep = get_wide_deep() return tflearn.Experiment( tflearn.DNNLinearCombinedRegressor(model_dir=output_dir, linear_feature_columns=wide, dnn_feature_columns=deep, dnn_hidden_units=[64, 32]), train_input_fn=read_dataset('train'), eval_input_fn=read_dataset('eval'), eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=metrics.streaming_root_mean_squared_error ) }, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )] )
def experiment_fn(output_dir): get_train = model.read_dataset(train_data_paths, mode=tf.contrib.learn.ModeKeys.TRAIN) get_valid = model.read_dataset(eval_data_paths, mode=tf.contrib.learn.ModeKeys.EVAL) # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=model.simple_rnn, model_dir=output_dir), train_input_fn=get_train, eval_input_fn=get_valid, eval_metrics={ 'rmse': tflearn.MetricSpec(metric_fn=tf.contrib.metrics. streaming_root_mean_squared_error) }, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **experiment_args)
def experiment_fn(output_dir): # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=rnn_model, model_dir=output_dir), #train_input_fn=get_train(), train_input_fn=get_input_fn(training_set), #eval_input_fn=get_valid(), eval_input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False), eval_steps=50, eval_metrics={ 'acc': tflearn.MetricSpec( metric_fn=metrics.streaming_accuracy, prediction_key='class' ) }, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], train_steps = TRAIN_STEPS, #train_monitors=hooks, #eval_hooks=hooks )
def _experiment_fn(run_config, hparams): """Outputs `Experiment` object given `output_dir`. Args: run_config: `EstimatorConfig` object fo run configuration. hparams: `HParams` object that contains hyperparameters. Returns: `Experiment` object """ estimator = learn.Estimator( model_fn=model_fn, config=run_config, params=hparams) num_train_steps = 1 if FLAGS.oom_test else FLAGS.num_train_steps num_eval_steps = 1 if FLAGS.oom_test else FLAGS.num_eval_steps return learn.Experiment( estimator=estimator, train_input_fn=_get_train_input_fn(), eval_input_fn=_get_eval_input_fn(), train_steps=num_train_steps, eval_steps=num_eval_steps, eval_delay_secs=FLAGS.num_eval_delay_secs)
def _experiment_fn(output_dir): train_input = model.generate_input_fn(train_file, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = model.generate_input_fn(eval_file, batch_size=eval_batch_size) return learn.Experiment( model.build_estimator(job_dir, embedding_size=embedding_size, hidden_units=hidden_units), train_input_fn=train_input, eval_input_fn=eval_input, eval_metrics={ 'training/hptuning/metric': learn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='logits') }, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **experiment_args)
def populate_experiment(run_config, hparams, pipeline_config_path, train_steps=None, eval_steps=None, model_fn_creator=create_model_fn, **kwargs): """Populates an `Experiment` object. EXPERIMENT CLASS IS DEPRECATED. Please switch to tf.estimator.train_and_evaluate. As an example, see model_main.py. Args: run_config: A `RunConfig`. hparams: A `HParams`. pipeline_config_path: A path to a pipeline config file. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. eval_steps: Number of evaluation steps per evaluation cycle. If None, the number of evaluation steps is set from the `EvalConfig` proto. model_fn_creator: A function that creates a `model_fn` for `Estimator`. Follows the signature: * Args: * `detection_model_fn`: Function that returns `DetectionModel` instance. * `configs`: Dictionary of pipeline config objects. * `hparams`: `HParams` object. * Returns: `model_fn` for `Estimator`. **kwargs: Additional keyword arguments for configuration override. Returns: An `Experiment` that defines all aspects of training, evaluation, and export. """ tf.logging.warning( 'Experiment is being deprecated. Please use ' 'tf.estimator.train_and_evaluate(). See model_main.py for ' 'an example.') train_and_eval_dict = create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps, eval_steps=eval_steps, model_fn_creator=model_fn_creator, save_final_config=True, **kwargs) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_steps = train_and_eval_dict['train_steps'] export_strategies = [ contrib_learn.utils.saved_model_export_utils.make_export_strategy( serving_input_fn=predict_input_fn) ] return contrib_learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fns[0], train_steps=train_steps, eval_steps=None, export_strategies=export_strategies, eval_delay_secs=120, )
def input_fn(data_set): feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES} labels = tf.constant(data_set[LABEL].values) return feature_cols, labels run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=1) regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, hidden_units=[10, 10], model_dir="/tmp/boston_model", config=run_config) experiment = learn.Experiment(estimator=regressor, train_input_fn=lambda: input_fn(training_set), eval_input_fn=lambda: input_fn(test_set), train_steps=5000, eval_steps=1) # Parameter Server if run_config.task_type and run_config.task_type == learn.TaskType.PS: print("Start PS on {} ...".format(run_config.master)) experiment.run_std_server() if run_config.is_chief: print("This is chief worker on {} ...".format(run_config.master)) experiment.train(0) # Evaluating the Model ev = experiment.evaluate(1) loss_score = ev["loss"]