Esempio n. 1
0
def main(unused_argv):
    """Main function that trains and evaluates the translation model"""
    hparams = create_hparams(FLAGS)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(hparams.device)
    print('Building models')
    train_model, eval_model, encode_model = build_models(hparams)
    train_loop(train_model, eval_model, encode_model, hparams)
Esempio n. 2
0
def main(unused_argv):
    """Main function that trains and evaluats the translation model"""
    hparams = create_hparams(FLAGS)
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
    train_model = build_models(hparams)
    test_model = build_models(hparams, 'EVAL')
    encode_model = build_models(hparams, 'ENCODE')
    train_loop(train_model=train_model,
               eval_model=test_model,
               encoder_model=encode_model,
               hparams=hparams)
Esempio n. 3
0
    def __init__(self,
                 model_dir=_default_model_dir,
                 use_gpu=True,
                 batch_size=256,
                 gpu_mem_frac=0.1,
                 beam_width=10,
                 num_top=1,
                 maximum_iterations=1000,
                 cpu_threads=5,
                 emb_activation=None):
        """Constructor for the inference model.

        Args:
            model_dir: Path to the model directory.
            use_gpu: Flag for GPU usage.
            batch_size: Number of samples to process per step.
            gpu_mem_frac: If GPU is used, what memory fraction should be used?
            beam_width:  Width of the the window used for the beam search decoder.
            num_top: Number of most probable sequnces as output of the beam search decoder.
            emb_activation: Activation function used in the bottleneck layer.
        Returns:
            None
        """
        self.num_top = num_top
        self.use_gpu = use_gpu
        parser = argparse.ArgumentParser()
        add_arguments(parser)
        flags = parser.parse_args([])
        flags.hparams_from_file = True
        flags.save_dir = model_dir
        self.hparams = create_hparams(flags)
        self.hparams.set_hparam("save_dir", model_dir)
        self.hparams.set_hparam("batch_size", batch_size)
        self.hparams.set_hparam("gpu_mem_frac", gpu_mem_frac)
        self.hparams.add_hparam("beam_width", beam_width)
        self.hparams.set_hparam("cpu_threads", cpu_threads)
        self.encode_model, self.decode_model = build_models(
            self.hparams, modes=["ENCODE", "DECODE"])
        self.maximum_iterations = maximum_iterations
Esempio n. 4
0
def main(unused_argv):

  # Replace MODEL_DIR with the folder current run to resume training from a set of hyperparameters
  # MODEL_DIR = '/Users/eduardolitonjua/Desktop/Retrieval-System/runs/1472130056' 
  hparams = hyperparameters.create_hparams()

  model_fn = model.create_model_fn(
    hparams,
    model_impl=dual_encoder_model)

  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=MODEL_DIR,
    config=tf.contrib.learn.RunConfig())

  input_fn_train = inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.TRAIN,
    input_files=[TRAIN_FILE],
    batch_size=hparams.batch_size,
    num_epochs=FLAGS.num_epochs)

  input_fn_eval = inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[VALIDATION_FILE],
    batch_size=hparams.eval_batch_size,
    num_epochs=1)

  eval_metrics = metrics.create_evaluation_metrics()

  class EvaluationMonitor(tf.contrib.learn.monitors.EveryN):
    def every_n_step_end(self, step, outputs):
      self._estimator.evaluate(
        input_fn=input_fn_eval,
        metrics=eval_metrics,
        steps=None)

  eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every, first_n_steps=-1)
  estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
import tensorflow as tf
import os
import numpy as np
import time
import scipy
import nibabel as nib
import hyperparameters
from model_3D import deep_model_128in as model

from data_reformating_3D import reformat_eval_data_3D as reformat_data_3D

from loss import loss


# Get hyperparameters
hparams = hyperparameters.create_hparams()

# Set data input path
test_data_dir =  hparams.input_dir + '/test'

# Load preprocessed input data
images_dict, labels_dict = load_data_eval(test_data_dir)


# Extract images from dict
images = []
for k, v in images_dict.items():
    images.append(v.get_data())

# Extract labels from dict
labels = []