def main(): args_parser = argparse.ArgumentParser() args = parameters.initialise_arguments(args_parser) parameters.HYPER_PARAMS = hparam.HParams(**args.__dict__) # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[args.verbosity] / 10) # Directory to store output model and checkpoints output_dir = args.job_dir # Run the training job learn_runner.run(experiment.generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[ saved_model_export_utils.make_export_strategy( serving.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1, default_output_alternative_key=None, ) ]), run_config=run_config.RunConfig(model_dir=output_dir), hparams=parameters.HYPER_PARAMS)
def main(): args_parser = argparse.ArgumentParser() args = parameters.initialise_arguments(args_parser) parameters.HYPER_PARAMS = args print('') print('Hyper-parameters:') print(parameters.HYPER_PARAMS) print('') # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[args.verbosity] / 10) # Directory to store output model and checkpoints model_dir = args.job_dir # If job_dir_reuse is False then remove the job_dir if it exists print("Resume training:", args.reuse_job_dir) if not args.reuse_job_dir: if tf.gfile.Exists(args.job_dir): tf.gfile.DeleteRecursively(args.job_dir) print("Deleted job_dir {} to avoid re-use".format(args.job_dir)) else: print("No job_dir available to delete") else: print("Reusing job_dir {} if it exists".format(args.job_dir)) run_config = tf.estimator.RunConfig( tf_random_seed=19830610, log_step_count_steps=1000, save_checkpoints_secs= 120, # change if you want to change frequency of saving checkpoints keep_checkpoint_max=3, model_dir=model_dir) run_config = run_config.replace(model_dir=model_dir) print("Model Directory:", run_config.model_dir) # Run the train and evaluate experiment time_start = datetime.utcnow() print("") print("Experiment started at {}".format(time_start.strftime("%H:%M:%S"))) print(".......................................") run_experiment(run_config) time_end = datetime.utcnow() print(".......................................") print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S"))) print("") time_elapsed = time_end - time_start print("Experiment elapsed time: {} seconds".format( time_elapsed.total_seconds())) print("")
def main(): args_parser = argparse.ArgumentParser() args = parameters.initialise_arguments(args_parser) parameters.HYPER_PARAMS = hparam.HParams(**args.__dict__) # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[args.verbosity] / 10) # Directory to store output model and checkpoints output_dir = args.job_dir # If job_dir_reuse is False then remove the job_dir if it exists if not args.reuse_job_dir: if tf.gfile.Exists(args.job_dir): tf.gfile.DeleteRecursively(args.job_dir) tf.logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir)) else: tf.logging.info("No job_dir available to delete") else: tf.logging.info("Reusing job_dir {} if it exists".format(args.job_dir)) # Run the training experiment learn_runner.run( experiment.generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( serving.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1, default_output_alternative_key=None, )] ), run_config=tf.contrib.learn.RunConfig( model_dir=output_dir, log_device_placement=True ), schedule="train_and_evaluate", hparams=parameters.HYPER_PARAMS )