Beispiel #1
0
    def load_estimator(self):
        """

        Returns
        -------
        estimator
            A tf.estimator.DNNClassifier

        """

        # Feature columns describe how to use the input.
        my_feature_columns = []
        for key in self.train_x.keys():
            my_feature_columns.append(
                tf.feature_column.numeric_column(key=key))

        run_config = RunConfig()
        run_config = run_config.replace(model_dir=self.model_path)

        return tf.estimator.DNNClassifier(
            feature_columns=my_feature_columns,
            # Two hidden layers of 10 nodes each.
            hidden_units=[10, 10],
            # The model must choose between 3 classes.
            n_classes=3,
            # Use runconfig to load model,
            config=run_config,
            model_dir=self.model_path)
Beispiel #2
0
def main():
    tf.logging.set_verbosity(tf.logging.DEBUG)

    parsed_args = get_parser().parse_args()

    session_config = tf.ConfigProto(allow_soft_placement=True)
    session_config.gpu_options.allow_growth = True
    run_config = RunConfig(session_config=session_config)
    run_config = run_config.replace(model_dir=get_model_dir(parsed_args))

    params = HParams(learning_rate=parsed_args.lr,
                     train_steps=parsed_args.train_steps,
                     steps_per_eval=parsed_args.steps_per_eval,
                     batch_size=parsed_args.batch_size,
                     vgg_model_path=parsed_args.vgg_model_path,
                     selector=parsed_args.selector,
                     dropout=parsed_args.dropout,
                     ctx2out=parsed_args.ctx2out,
                     prev2out=parsed_args.prev2out,
                     dataset=parsed_args.dataset,
                     eval_steps=parsed_args.eval_steps,
                     hard_attention=parsed_args.hard_attention,
                     use_sampler=parsed_args.use_sampler,
                     bin_size=14)

    learn_runner.run(experiment_fn=experiment_fn_inner,
                     run_config=run_config,
                     schedule="continuous_train_and_eval",
                     hparams=params)
Beispiel #3
0
def main(_):
    mnist = input_data.read_data_sets("/tmp/data")
    X_train = mnist.train.images
    X_test = mnist.test.images
    Y_train = mnist.train.labels.astype("int")
    Y_test = mnist.test.labels.astype("int")

    config = RunConfig(tf_random_seed=42, save_checkpoints_secs=10)
    feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(
        X_train)
    validation_monitor = monitors.ValidationMonitor(x=X_test,
                                                    y=Y_test,
                                                    every_n_steps=100)
    dnn_clf = DNNClassifier(
        hidden_units=[300, 100],
        n_classes=10,
        feature_columns=feature_cols,
        config=config,
        model_dir="/home/mtb/Projects/machine_learning/tensorflow/mnist")

    dnn_clf.fit(X_train,
                Y_train,
                batch_size=50,
                steps=4000,
                monitors=[validation_monitor])
    accuracy_score = dnn_clf.evaluate(x=X_test, y=Y_test)["accuracy"]

    print(' accuracy_score:   {0} '.format(accuracy_score))
Beispiel #4
0
def get_unthreaded_estimator(model_path, feature_columns):
    return tf.estimator.DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[10, 10],
        n_classes=3,
        config=RunConfig().replace(model_dir=model_path),
        model_dir=model_path)
Beispiel #5
0
def generate_results(model_dir, results_path):
    os.makedirs(os.path.dirname(results_path), exist_ok=True)
    vocab = np.load('output/processed-annotations/vocab.npy')
    run_config = RunConfig(model_dir=model_dir)
    hparams = get_hparams(model_dir=model_dir, create=False)
    print(hparams)
    estimator = Estimator(model_fn=model_fn, config=run_config, params=hparams)
    val_path = tf.flags.FLAGS.batch_path
    splits = tf.flags.FLAGS.batch_splits
    batch_size = tf.flags.FLAGS.batch_size
    hook = FeedFnHook(path_fmt=val_path,
                      splits=splits,
                      batch_size=batch_size,
                      predict=True,
                      single_pass=True)

    results = []
    it = tqdm(desc='Generating results')
    for prediction in estimator.predict(input_fn=predict_input_fn,
                                        hooks=[hook]):
        caption = calc_caption(prediction=prediction, vocab=vocab)
        results.append({
            'image_id': np.asscalar(prediction['image_ids']),
            'caption': caption
        })
        it.update(1)
    with open(results_path, 'w') as f:
        json.dump(results, f)
Beispiel #6
0
def start_prediction(output_directory,
                     data_directory,
                     dataset_name,
                     model_dir,
                     network_name,
                     batch_size,
                     batch_threads,
                     num_classes=None):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     augment=False)

    if num_classes is None:
        num_classes = dataset_factory.get_dataset('train').num_classes()

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        model_dir, network_name, num_classes),
                                       model_dir=model_dir,
                                       config=run_config,
                                       params={})
    image_size = nets_factory.get_input_size(network_name)

    run_prediction_and_evaluation(output_directory, batch_size, batch_threads,
                                  dataset_factory, estimator, image_size)
Beispiel #7
0
def main(_):
    mnist = load_dataset('mnist')
    train_data = mnist.train.images
    train_labels = mnist.train.labels
    test_data = mnist.test.images
    test_labels = mnist.test.labels
    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True
    config = RunConfig(session_config=session_config)
    mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                              model_dir='./model/',
                                              config=config)
    #tf.estimator.EstimatorSpec()
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    train_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': train_data},
                                                        y=train_labels,
                                                        batch_size=100,
                                                        num_epochs=None,
                                                        shuffle=True)
    mnist_classifier.train(input_fn=train_input_fn,
                           steps=20000,
                           hooks=[logging_hook])
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': test_data},
                                                       y=test_labels,
                                                       num_epochs=1,
                                                       shuffle=False)
    eval_result = mnist_classifier.evaluate(input_fn=eval_input_fn)
    print(eval_result)
Beispiel #8
0
def build_estimator(model_dir):
    config = RunConfig(save_checkpoints_secs=180)
    print('----------------------TF_CONFIG--------------------------')
    print(os.environ['TF_CONFIG'])
    print('---------------------------------------------------------')
    return tf.estimator.Estimator(model_fn=_cnn_model_fn,
                                  model_dir=model_dir,
                                  config=config)
Beispiel #9
0
def start_training(data_directory, dataset_name, output_directory,
                   network_name, batch_size, learning_rate, batch_threads,
                   num_epochs, initial_checkpoint, checkpoint_exclude_scopes,
                   ignore_missing_variables, trainable_scopes,
                   not_trainable_scopes, fixed_learning_rate,
                   learning_rate_decay_rate, do_evaluation,
                   learning_rate_decay_steps):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory)
    model_params = {
        'learning_rate':
        learning_rate,
        'fixed_learning_rate':
        fixed_learning_rate,
        'learning_rate_decay_rate':
        learning_rate_decay_rate,
        'learning_rate_decay_steps':
        (dataset_factory.get_dataset('train').get_number_of_samples() if
         learning_rate_decay_steps is None else learning_rate_decay_steps) //
        batch_size
    }

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        output_directory, network_name,
        dataset_factory.get_dataset('train').num_classes(), initial_checkpoint,
        checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes,
        not_trainable_scopes),
                                       params=model_params,
                                       model_dir=output_directory,
                                       config=run_config)
    image_size = nets_factory.get_input_size(network_name)

    dataset = dataset_factory.get_dataset('train')
    evaluation_summary_writer = get_evaluation_summary_writer(
        do_evaluation, output_directory)

    for epoch in range(num_epochs):
        run_training(dataset=dataset,
                     batch_size=batch_size,
                     batch_threads=batch_threads,
                     epoch=epoch,
                     estimator=estimator,
                     num_epochs=num_epochs,
                     image_size=image_size)

        if do_evaluation:
            run_evaluation_conserving_best(
                estimator=estimator,
                batch_size=2 * batch_size,
                batch_threads=batch_threads,
                dataset_factory=dataset_factory,
                image_size=image_size,
                evaluation_summary_writer=evaluation_summary_writer)

    print('Finished training')
Beispiel #10
0
def main(_argv):
    model_dir = tf.flags.FLAGS.model_dir
    run_config = RunConfig(model_dir=model_dir)
    hparams = HParams(generator_steps=1,
                      discriminator_steps=1,
                      latent_units=100,
                      dis_lr=1e-4,
                      gen_lr=1e-3)
    tf.contrib.learn.learn_runner.run(experiment_fn=experiment_fn,
                                      run_config=run_config,
                                      schedule=tf.flags.FLAGS.schedule,
                                      hparams=hparams)
Beispiel #11
0
def start_training(data_directory, dataset_name, mean, output_directory,
                   network_name, batch_size, learning_rate, learning_rate_gen,
                   beta1_gen, separable_conv, batch_threads, num_epochs,
                   initial_checkpoint, checkpoint_exclude_scopes,
                   ignore_missing_variables, trainable_scopes,
                   not_trainable_scopes, fixed_learning_rate,
                   learning_rate_decay_rate, do_evaluation,
                   learning_rate_decay_steps, img_size):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     mean=mean)
    model_params = {
        'learning_rate':
        learning_rate,
        'learning_rate_gen':
        learning_rate_gen,
        'beta1_gen':
        beta1_gen,
        'fixed_learning_rate':
        fixed_learning_rate,
        'learning_rate_decay_rate':
        learning_rate_decay_rate,
        'separable_conv':
        separable_conv,
        'learning_rate_decay_steps':
        (dataset_factory.get_dataset('train').get_number_of_samples() if
         learning_rate_decay_steps is None else learning_rate_decay_steps) //
        batch_size
    }

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        output_directory, network_name,
        dataset_factory.get_dataset('train').num_classes(), initial_checkpoint,
        checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes,
        not_trainable_scopes),
                                       params=model_params,
                                       model_dir=output_directory,
                                       config=run_config)

    image_size = img_size

    for epoch in range(num_epochs):
        run_training(dataset_factory,
                     batch_size=batch_size,
                     batch_threads=batch_threads,
                     epoch=epoch,
                     estimator=estimator,
                     num_epochs=num_epochs,
                     image_size=image_size)
    print('Finished training')
Beispiel #12
0
def main(_argv):
    if tf.flags.FLAGS.debug:
        enable_debugging_monkey_patch()
    model_dir = tf.flags.FLAGS.model_dir
    os.makedirs(model_dir, exist_ok=True)
    print("model_dir={}".format(model_dir))
    run_config = RunConfig(model_dir=model_dir)
    hparams = get_hparams(model_dir, create=True)
    estimator = tf.contrib.learn.learn_runner.run(
        experiment_fn=experiment_fn,
        run_config=run_config,
        schedule=tf.flags.FLAGS.schedule,
        hparams=hparams)
Beispiel #13
0
def main(_argv, config: TextConfig):
    model_dir = tf.flags.FLAGS.model_dir
    os.makedirs(model_dir, exist_ok=True)
    print("model_dir={}".format(model_dir))
    run_config = RunConfig(
        model_dir=model_dir,
        save_checkpoints_steps=tf.flags.FLAGS.save_checkpoints_steps)
    # save_checkpoints_secs=tf.flags.FLAGS.save_checkpoints_secs)
    hparams = get_hparams(model_dir, create=True)
    estimator = run(experiment_fn=make_experiment_fn(config),
                    run_config=run_config,
                    schedule=tf.flags.FLAGS.schedule,
                    hparams=hparams)
    def _build_run_config(self):
        valid_runconfig_keys = ['save_summary_steps', 'save_checkpoints_secs', 'save_checkpoints_steps',
                                'keep_checkpoint_max', 'keep_checkpoint_every_n_hours', 'log_step_count_steps']

        runconfig_params = {k: v for k, v in self.customer_params.items() if k in valid_runconfig_keys}

        logger.info("creating RunConfig:")
        logger.info(runconfig_params)

        run_config = RunConfig(
            model_dir=self.model_path,
            **runconfig_params
        )
        return run_config
Beispiel #15
0
def main():
    hparams = HParams(**HPARAMS)
    run_config = RunConfig(model_dir='./save')

    if len(argv) < 2 or argv[1] == 'train':
        learn_runner.run(
            experiment_fn=experiment_fn,
            run_config=run_config,
            schedule="train_and_evaluate",
            hparams=hparams,
        )
    elif argv[1] == 'predict':
        pass
    else:
        print('Unknown Operation.')
Beispiel #16
0
def main(_argv):
    # Pass command-line arguments to RunConfig
    run_config = RunConfig(
        model_dir=tf.flags.FLAGS.model_dir,
        save_checkpoints_steps=tf.flags.FLAGS.save_checkpoints_steps)
    # Default hyperparameters
    hparams = HParams(l2=1e-3, lr=1e-3, hidden_layers=3, hidden_units=200) \
        # Parse the hparams command-line argument
    hparams.parse(tf.flags.FLAGS.hparams)
    # Run the experiment
    run(
        experiment_fn=experiment_fn,
        run_config=run_config,
        schedule=tf.flags.FLAGS.schedule,
        hparams=hparams)
Beispiel #17
0
def start_training(data_directory, dataset_name, output_directory, network_name, batch_size, learning_rate, batch_threads, num_epochs, initial_checkpoint, checkpoint_exclude_scopes,ignore_missing_variables, trainable_scopes, fixed_learning_rate, learning_rate_decay_rate, num_classes):
    dataset_factory = DatasetFactory(dataset_name=dataset_name, data_directory=data_directory)
    model_params = {'learning_rate': learning_rate,'fixed_learning_rate': fixed_learning_rate,'learning_rate_decay_rate': learning_rate_decay_rate,'learning_rate_decay_steps': dataset_factory.get_dataset('train').get_number_of_samples() // batch_size}

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(output_directory, network_name, dataset_factory.get_dataset('train').num_classes() if num_classes is None else num_classes, initial_checkpoint,checkpoint_exclude_scopes, ignore_missing_variables, trainable_scopes),params=model_params,model_dir=output_directory,config=run_config)
    image_size = nets_factory.get_input_size(network_name)
  

    for epoch in range(num_epochs):
        run_training(dataset_factory, batch_size, batch_threads, epoch, estimator, num_epochs, image_size)
        #run_validation(dataset_factory, batch_size, batch_threads, estimator, image_size)

    run_evaluation(batch_size, batch_threads, dataset_factory, estimator, image_size)
Beispiel #18
0
def main():
  parsed_args = get_parser().parse_args()
  with open(os.path.join("data/challenger.ai", 'word_to_idx.pkl'), 'rb') as f:
    word_to_idx = pickle.load(f)
  hparams = HParams(vocab_size=len(word_to_idx),
                    batch_size=parsed_args.batch_size,
                    selector=parsed_args.selector,
                    dropout=parsed_args.dropout,
                    ctx2out=parsed_args.ctx2out,
                    prev2out=parsed_args.prev2out,
                    hard_attention=parsed_args.hard_attention,
                    bin_size=14)
  run_config = RunConfig(model_dir=parsed_args.model_dir)
  estimator = Estimator(
    model_fn=model_fn_inner,
    params=hparams,
    config=run_config)

  dataset = ChallengerAI("data/challenger.ai")

  input_fn = dataset.get_tfrecords_test_input_fn(bin_size=hparams.bin_size)
  val_init_hook = IteratorInitializerHook("infer")

  idx_to_word = {v: k for k, v in word_to_idx.items()}
  del word_to_idx

  results = estimator.predict(input_fn, hooks=[val_init_hook])
  all_predicions = []
  image_ids = []
  num_generated = 0
  for batch_result in results:
    image_id, pred = batch_result["image_id"], batch_result["predictions"]
    result = ''.join([idx_to_word[idx] for idx in pred if idx != 0 and idx != 2])
    all_predicions.append(result)
    image_ids.append(image_id.decode("utf-8").split(".")[0])
    num_generated = num_generated + 1
    if num_generated % 1000 == 0:
      print("Generated %d" % num_generated)

  total_results = [{"image_id": img_id, "caption": pred}
                   for img_id, pred
                   in zip(image_ids, all_predicions)]
  with open("result.json", "w", encoding="utf-8") as f:
    json.dump(total_results, f, ensure_ascii=False)
Beispiel #19
0
def start_prediction(data_directory, dataset_name, mean, model_dir,
                     network_name, batch_size, batch_threads, num_classes,
                     result_dir, img_size, model, mode):
    dataset_factory = DatasetFactory(dataset_name=dataset_name,
                                     data_directory=data_directory,
                                     mean=mean,
                                     augment=False,
                                     num_classes=num_classes)

    run_config = RunConfig(keep_checkpoint_max=10, save_checkpoints_steps=None)
    # Instantiate Estimator
    estimator = tf.estimator.Estimator(model_fn=get_model_function(
        model_dir, network_name,
        dataset_factory.get_dataset('train').num_classes()),
                                       model_dir=model_dir,
                                       config=run_config,
                                       params={})

    image_size = img_size
    run_prediction_and_evaluation(batch_size, batch_threads, dataset_factory,
                                  estimator, image_size, result_dir, mode)
def generate_captions(model_dir, output_dir):
    os.makedirs(output_dir, exist_ok=True)
    vocab = np.load('output/processed-annotations/vocab.npy')
    run_config = RunConfig(model_dir=model_dir)
    hparams = get_hparams(model_dir, create=False)
    print(hparams)
    estimator = Estimator(
        model_fn=model_fn,
        config=run_config,
        params=hparams)
    val_path = tf.flags.FLAGS.batch_path
    use_slot_vocab = hparams.use_slot_vocab
    hook = FeedFnHook(path_fmt=val_path, splits=1, batch_size=tf.flags.FLAGS.batch_size, predict=True)
    with open(os.path.join(output_dir, 'captions.csv'), 'w', newline='') as f:
        w = csv.writer(f)
        w.writerow(['Index', 'Caption'])
        for i, prediction in enumerate(estimator.predict(input_fn=predict_input_fn, hooks=[hook])):
            caption = write_prediction(os.path.join(output_dir, '{:08d}'.format(i)),
                                       prediction=prediction, vocab=vocab, use_slot_vocab=use_slot_vocab)
            w.writerow([i, caption])
            if i > 100:
                break
Beispiel #21
0
    def _build_estimator(self):
        valid_runconfig_keys = [
            'save_summary_steps', 'save_checkpoints_secs',
            'save_checkpoints_steps', 'keep_checkpoint_max',
            'keep_checkpoint_every_n_hours', 'log_step_count_steps'
        ]

        runconfig_params = {
            k: v
            for k, v in self.customer_params.items()
            if k in valid_runconfig_keys
        }

        logging.info("creating RunConfig:")
        logging.info(runconfig_params)

        run_config = RunConfig(model_dir=self.model_path, **runconfig_params)

        if hasattr(self.customer_script, 'estimator_fn'):
            logging.info("invoking estimator_fn")
            return self.customer_script.estimator_fn(run_config,
                                                     self.customer_params)
        elif hasattr(self.customer_script, 'keras_model_fn'):
            logging.info("involing keras_model_fn")
            model = self.customer_script.keras_model_fn(self.customer_params)
            return tf.keras.estimator.model_to_estimator(keras_model=model,
                                                         config=run_config)
        else:
            logging.info("creating the estimator")

            # transforming hyperparameters arg to params, which is required by tensorflow
            def _model_fn(features, labels, mode, params):
                return self.customer_script.model_fn(features, labels, mode,
                                                     params)

            return tf.estimator.Estimator(model_fn=_model_fn,
                                          params=self.customer_params,
                                          config=run_config)
Beispiel #22
0
def main():
    hparams = HParams(**HPARAMS)
    run_config = RunConfig(model_dir='./save')

    if len(argv) < 2 or argv[1] == 'train':
        learn_runner.run(
            experiment_fn=experiment_fn,
            run_config=run_config,
            schedule="train_and_evaluate",
            hparams=hparams,
        )
    elif argv[1] == 'predict':
        input_fn_factory = ModelInputs(hparams.vocab_paths, 1)
        predict_input_fn, predict_input_hook = input_fn_factory.get_inputs(
            hparams.predict_dataset_path,
            mode=estimator.ModeKeys.PREDICT,
            num_infer=1)
        classifier = get_estimator(run_config, hparams)
        predictions = classifier.predict(input_fn=predict_input_fn,
                                         hooks=[predict_input_hook])
        print_predictions(predictions, hparams)
    else:
        print('Unknown Operation.')
Beispiel #23
0
def main():
    parsed_args = get_parser().parse_args()
    with open(os.path.join("data", 'word_to_idx.pkl'), 'rb') as f:
        word_to_idx = pickle.load(f)
    hparams = HParams(vocab_size=len(word_to_idx),
                      batch_size=parsed_args.batch_size,
                      selector=parsed_args.selector,
                      dropout=parsed_args.dropout,
                      ctx2out=parsed_args.ctx2out,
                      prev2out=parsed_args.prev2out)
    run_config = RunConfig(model_dir=parsed_args.model_dir)
    estimator = Estimator(model_fn=model_fn, params=hparams, config=run_config)

    image_ids, input_fn = get_input_fn()
    val_init_hook = IteratorInitializerHook("infer")

    idx_to_word = {v: k for k, v in word_to_idx.items()}
    del word_to_idx

    pred_results = estimator.predict(input_fn, hooks=[val_init_hook])
    all_predicions = []
    num_generated = 0
    for pred in pred_results:
        result = ' '.join(
            [idx_to_word[idx] for idx in pred if idx != 0 and idx != 2])
        all_predicions.append(result)
        num_generated = num_generated + 1
        if num_generated % 1000 == 0:
            print("Generated %d" % num_generated)

    total_results = [{
        "image_id": img_id,
        "caption": pred
    } for img_id, pred in zip(image_ids, all_predicions)]
    with open("result.json", "w") as f:
        json.dump(total_results, f)
Beispiel #24
0
            clip_max=1.0,
            time_shift_samples=0,
        )
        result[FINGERPRINT_KEY]=getMfcc(getTransformedAudioLocal(**audio_options))
        yield result

test_input_fn = generator_input_fn(
    x=test_data_generator,
    batch_size=TEST_BATCH_SIZE, 
    shuffle=False, 
    num_epochs=1,
    queue_capacity= 10 * TEST_BATCH_SIZE, 
    num_threads=1,
)

model = create_estimator(
            config=RunConfig(model_dir=model_dir), 
            hparams=HParams(**params),
        )
it = model.predict(input_fn=test_input_fn)

submission = dict()
for t in tqdm(it):
    fname, label = t['fname'].decode(), id2name[t['label']]
    submission[fname] = label

with open(os.path.join(model_dir, 'submission.csv'), 'w') as fout:
    fout.write('fname,label\n')
    for fname, label in submission.items():
        fout.write('{},{}\n'.format(fname, label))
Beispiel #25
0
from tensorflow.contrib.layers import one_hot_column, real_valued_column, sparse_column_with_keys
from tensorflow.contrib.layers.python.layers.feature_column import _OneHotColumn, _RealValuedColumn, _SparseColumnKeys
from tensorflow.contrib.learn.python.learn.utils.input_fn_utils import InputFnOps

import numpy
import os
import pandas
import shutil
import tempfile
import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

#tf.reset_default_graph()

estimator_conf = RunConfig(num_cores = 1, tf_random_seed = 42)

def load_csv(name):
	return pandas.read_csv("csv/" + name)

def store_csv(df, name):
	df.to_csv("csv/" + name, index = False)

def store_savedmodel(estimator, serving_input_fn, name):
	savemodel_dir = estimator.export_savedmodel(tempfile.mkdtemp(), serving_input_fn = serving_input_fn, as_text = True)
	savemodel_dir = savemodel_dir.decode("UTF-8")

	if(os.path.isdir("savedmodel/" + name)):
		shutil.rmtree("savedmodel/" + name)
	shutil.move(savemodel_dir, "savedmodel/" + name)
Beispiel #26
0
 def setUpClass(self):
     """ setup test env
     """
     tf.logging.set_verbosity(tf.logging.INFO)
     self.estimator_conf = RunConfig(num_cores=1, tf_random_seed=42)
     self.data = TestData()
Beispiel #27
0
    args = parser.parse_args()

    # Input pipe settings
    input_param = {
        'data_dir': args.data_dir,
        'batch_size': args.batch_size,
        'buffer_size': args.buffer_size,
        'epochs': args.train_epochs,
        'num_parallel_calls': args.num_parallel_calls,
        'img_sizes': input_pipe.get_tf_record_image_size(args.data_dir),
        'padding': args.unet_padding
    }

    # Create run configuration default
    run_config = RunConfig()
    run_config = run_config.replace(model_dir=os.path.join(args.output_dir, args.model_type))
    run_config = run_config.replace(save_summary_steps=args.save_summary_steps)
    run_config = run_config.replace(save_checkpoints_steps=args.save_checkpoints_steps)

    # Define model and input parameters
    hparams = HParams(
        learning_rate=args.learning_rate,
        l2_gain=args.l2_gain,
        model_type=args.model_type,
        rmsprop_momentum=args.rmsprop_momentum,
        opt_epsilon=args.opt_epsilon,
        rmsprop_decay=args.rmsprop_decay,
        padding=args.unet_padding,
        optimizer=args.optimizer,
        model_dir=run_config.model_dir