def train(hparams, output_dir):
  prefix = output_dir
  #remove trash
  # prefix = "~/trash/loop_{}".format(random.randint(10000, 99999))
  data_dir = os.path.expanduser(prefix + "/data")
  tmp_dir = os.path.expanduser(prefix + "/tmp")
  output_dir = os.path.expanduser(prefix + "/output")
  tf.gfile.MakeDirs(data_dir)
  tf.gfile.MakeDirs(tmp_dir)
  tf.gfile.MakeDirs(output_dir)
  last_model = ""
  start_time = time.time()
  line = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    "
  for iloop in range(hparams.epochs):
      time_delta = time.time() - start_time
      print(line+"Step {}.1. - generate data from policy. "
            "Time: {}".format(iloop, str(datetime.timedelta(seconds=time_delta))))
      FLAGS.problems = "gym_discrete_problem"
      FLAGS.agent_policy_path = last_model
      gym_problem = problems.problem(FLAGS.problems)
      gym_problem.num_steps = hparams.true_env_generator_num_steps
      iter_data_dir = os.path.join(data_dir, str(iloop))
      tf.gfile.MakeDirs(iter_data_dir)
      gym_problem.generate_data(iter_data_dir, tmp_dir)

      time_delta = time.time() - start_time
      print(line+"Step {}.2. - generate env model. "
            "Time: {}".format(iloop, str(datetime.timedelta(seconds=time_delta))))
      # 2. generate env model
      FLAGS.data_dir = iter_data_dir
      FLAGS.output_dir = output_dir
      FLAGS.model = hparams.generative_model
      FLAGS.hparams_set = hparams.generative_model_params
      FLAGS.train_steps = hparams.model_train_steps
      FLAGS.eval_steps = 1
      t2t_trainer.main([])

      time_delta = time.time() - start_time
      print(line+"Step {}.3. - evalue env model. "
            "Time: {}".format(iloop, str(datetime.timedelta(seconds=time_delta))))
      gym_simulated_problem = problems.problem("gym_simulated_discrete_problem")
      gym_simulated_problem.num_steps = hparams.simulated_env_generator_num_steps
      gym_simulated_problem.generate_data(iter_data_dir, tmp_dir)

      # time_delta = time.time() - start_time
      print(line+"Step {}.4. - train PPO in model env."
            " Time: {}".format(iloop, str(datetime.timedelta(seconds=time_delta))))
      ppo_epochs_num=hparams.ppo_epochs_num
      ppo_hparams = trainer_lib.create_hparams("atari_base", "epochs_num={},simulated_environment=True,eval_every_epochs=0,save_models_every_epochs={}".format(ppo_epochs_num+1, ppo_epochs_num),
                                           data_dir=output_dir)
      ppo_hparams.epoch_length = hparams.ppo_epoch_length
      ppo_dir = tempfile.mkdtemp(dir=data_dir, prefix="ppo_")
      in_graph_wrappers = [(TimeLimitWrapper, {"timelimit": 150}),
                           (PongT2TGeneratorHackWrapper, {"add_value": -2})] + gym_problem.in_graph_wrappers
      ppo_hparams.add_hparam("in_graph_wrappers", in_graph_wrappers)
      rl_trainer_lib.train(ppo_hparams, "PongNoFrameskip-v4", ppo_dir)

      last_model = ppo_dir + "/model{}.ckpt".format(ppo_epochs_num)
示例#2
0
    def test_glow_inference(self):
        hparams = glow.glow_hparams()
        hparams.depth = 15
        hparams.n_levels = 2
        hparams.data_dir = ''
        curr_dir = tempfile.mkdtemp()

        # Training pipeline
        with tf.Graph().as_default():
            cifar_problem = problems.problem(
                'image_cifar10_plain_random_shift')
            hparams.problem = cifar_problem
            model = glow.Glow(hparams, tf_estimator.ModeKeys.TRAIN)
            train_dataset = cifar_problem.dataset(MODES.TRAIN)
            one_shot = train_dataset.make_one_shot_iterator()
            x_batch, y_batch = self.batch(one_shot)
            features = {'inputs': x_batch, 'targets': y_batch}
            model_path = os.path.join(curr_dir, 'model')
            model(features)

            with tf.Session() as session:
                saver = tf.train.Saver()
                session.run(tf.global_variables_initializer())

                init_op = tf.get_collection('glow_init_op')
                session.run(init_op)
                z = session.run([model.z])
                mean_z = np.mean(z)
                is_undefined = np.isnan(mean_z) or np.isinf(mean_z)
                self.assertTrue(not is_undefined)
                saver.save(session, model_path)

        # Inference pipeline
        with tf.Graph().as_default():
            cifar_problem = problems.problem(
                'image_cifar10_plain_random_shift')
            hparams.problem = cifar_problem
            model = glow.Glow(hparams, tf_estimator.ModeKeys.PREDICT)
            test_dataset = cifar_problem.dataset(MODES.EVAL)
            one_shot = test_dataset.make_one_shot_iterator()
            x_batch, y_batch = self.batch(one_shot)
            features = {'inputs': x_batch, 'targets': y_batch}
            model_path = os.path.join(curr_dir, 'model')

            predictions = model.infer(features)
            with tf.Session() as session:
                saver = tf.train.Saver()
                saver.restore(session, model_path)
                predictions_np = session.run(predictions)
                self.assertTrue(np.all(predictions_np <= 255))
                self.assertTrue(np.all(predictions_np >= 0))
示例#3
0
  def test_glow_inference(self):
    hparams = glow.glow_hparams()
    hparams.depth = 15
    hparams.n_levels = 2
    hparams.data_dir = ''
    curr_dir = tempfile.mkdtemp()

    # Training pipeline
    with tf.Graph().as_default():
      cifar_problem = problems.problem('image_cifar10_plain_random_shift')
      hparams.problem = cifar_problem
      model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
      train_dataset = cifar_problem.dataset(MODES.TRAIN)
      one_shot = train_dataset.make_one_shot_iterator()
      x_batch, y_batch = self.batch(one_shot)
      features = {'inputs': x_batch, 'targets': y_batch}
      model_path = os.path.join(curr_dir, 'model')
      model(features)

      with tf.Session() as session:
        saver = tf.train.Saver()
        session.run(tf.global_variables_initializer())

        init_op = tf.get_collection('glow_init_op')
        session.run(init_op)
        z = session.run([model.z])
        mean_z = np.mean(z)
        is_undefined = np.isnan(mean_z) or np.isinf(mean_z)
        self.assertTrue(not is_undefined)
        saver.save(session, model_path)

    # Inference pipeline
    with tf.Graph().as_default():
      cifar_problem = problems.problem('image_cifar10_plain_random_shift')
      hparams.problem = cifar_problem
      model = glow.Glow(hparams, tf.estimator.ModeKeys.PREDICT)
      test_dataset = cifar_problem.dataset(MODES.EVAL)
      one_shot = test_dataset.make_one_shot_iterator()
      x_batch, y_batch = self.batch(one_shot)
      features = {'inputs': x_batch, 'targets': y_batch}
      model_path = os.path.join(curr_dir, 'model')

      predictions = model.infer(features)
      with tf.Session() as session:
        saver = tf.train.Saver()
        saver.restore(session, model_path)
        predictions_np = session.run(predictions)
        self.assertTrue(np.all(predictions_np <= 255))
        self.assertTrue(np.all(predictions_np >= 0))
示例#4
0
def _train_and_eval_dataset_v1(problem_name, data_dir):
    """Return train and evaluation datasets, feature info and supervised keys."""
    from tensor2tensor import problems  # pylint: disable=g-import-not-at-top
    problem = problems.problem(problem_name)
    train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)
    train_dataset = train_dataset.map(_select_features)
    eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)
    eval_dataset = eval_dataset.map(_select_features)
    hparams = problem.get_hparams()
    # We take a few training examples to guess the shapes.
    input_shapes, target_shapes = [], []
    example_tensor = train_dataset.make_one_shot_iterator().get_next()
    sess = tf.Session()
    example1 = sess.run(example_tensor)
    example2 = sess.run(example_tensor)
    example3 = sess.run(example_tensor)
    # We use "inputs" as input except for purely auto-regressive tasks like
    # language models where "targets" are used as input_key.
    input_key = "inputs" if "inputs" in example1 else "targets"
    supervised_keys = ([input_key], ["targets"])
    for example in [example1, example2, example3]:
        input_shapes.append(list(example[input_key].shape))
        target_shapes.append(list(example["targets"].shape))
    input_vocab_size = hparams.vocab_size[input_key]
    target_vocab_size = hparams.vocab_size["targets"]
    input_info = _make_info(input_shapes, input_vocab_size)
    target_info = _make_info(target_shapes, target_vocab_size)
    info = {input_key: input_info, "targets": target_info}
    return train_dataset, eval_dataset, info, supervised_keys
    def testBuildDataset(self):
        # See all the available problems
        self.assertTrue(len(problems.available()) > 10)

        # Retrieve a problem by name
        print(problems.available())
        exit(0)
        problem = problems.problem("translate_ende_wmt8k")

        # Access train and dev datasets through Problem
        train_dataset = problem.dataset(MODES.TRAIN)
        dev_dataset = problem.dataset(MODES.EVAL)

        # Access vocab size and other info (e.g. the data encoders used to
        # encode/decode data for the feature, used below) through feature_info.
        feature_info = problem.feature_info
        self.assertTrue(feature_info["inputs"].vocab_size > 0)
        self.assertTrue(feature_info["targets"].vocab_size > 0)

        train_example = train_dataset.make_one_shot_iterator().get_next()
        dev_example = dev_dataset.make_one_shot_iterator().get_next()

        with tf.Session() as sess:
            train_ex_val, _ = sess.run([train_example, dev_example])
            _ = feature_info["inputs"].encoder.decode(train_ex_val["inputs"])
            _ = feature_info["targets"].encoder.decode(train_ex_val["targets"])
示例#6
0
def _train_and_eval_dataset_v1(problem_name, data_dir):
    """Return train and evaluation datasets, feature info and supervised keys."""
    problem = problems.problem(problem_name)
    train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)
    train_dataset = train_dataset.map(_select_features)
    eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)
    eval_dataset = eval_dataset.map(_select_features)
    supervised_keys = (["inputs"], ["targets"])
    hparams = problem.get_hparams()
    # We take a few training examples to guess the shapes.
    input_shapes, target_shapes = [], []
    example_tensor = train_dataset.make_one_shot_iterator().get_next()
    sess = tf.Session()
    example1 = sess.run(example_tensor)
    example2 = sess.run(example_tensor)
    example3 = sess.run(example_tensor)
    for example in [example1, example2, example3]:
        input_shapes.append(list(example["inputs"].shape))
        target_shapes.append(list(example["targets"].shape))
    input_vocab_size = hparams.vocab_size["inputs"]
    target_vocab_size = hparams.vocab_size["targets"]
    input_info = _make_info(input_shapes, input_vocab_size)
    target_info = _make_info(target_shapes, target_vocab_size)
    info = {"inputs": input_info, "targets": target_info}
    return train_dataset, eval_dataset, info, supervised_keys
示例#7
0
    def test_glow(self):
        with tf.Graph().as_default():
            hparams = glow.glow_hparams()
            hparams.depth = 15
            hparams.n_levels = 2
            hparams.init_batch_size = 256
            hparams.batch_size = 1
            cifar_problem = problems.problem(
                'image_cifar10_plain_random_shift')
            hparams.problem = cifar_problem
            model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
            train_dataset = cifar_problem.dataset(MODES.TRAIN)
            one_shot = train_dataset.make_one_shot_iterator()
            x_batch, y_batch = self.batch(one_shot)
            features = {'inputs': x_batch, 'targets': y_batch}
            _, obj_dict = model.body(features)
            objective = obj_dict['training']
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())

                # Run initialization.
                init_op = tf.get_collection('glow_init_op')
                sess.run(init_op)

                # Run forward pass.
                obj_np = sess.run(objective)
                mean_obj = np.mean(obj_np)

                # Check that one forward-propagation does not NaN, i.e
                # initialization etc works as expected.
                self.assertTrue(mean_obj > 0 and mean_obj < 10.0)
示例#8
0
  def test_glow(self):
    with tf.Graph().as_default():
      hparams = glow.glow_hparams()
      hparams.depth = 15
      hparams.n_levels = 2
      hparams.init_batch_size = 256
      hparams.batch_size = 1
      hparams.data_dir = ''
      cifar_problem = problems.problem('image_cifar10_plain_random_shift')
      hparams.problem = cifar_problem
      model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
      train_dataset = cifar_problem.dataset(MODES.TRAIN)
      one_shot = train_dataset.make_one_shot_iterator()
      x_batch, y_batch = self.batch(one_shot)
      features = {'inputs': x_batch, 'targets': y_batch}
      _, obj_dict = model.body(features)
      objective = obj_dict['training']
      with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Run initialization.
        init_op = tf.get_collection('glow_init_op')
        sess.run(init_op)

        # Run forward pass.
        obj_np = sess.run(objective)
        mean_obj = np.mean(obj_np)

        # Check that one forward-propagation does not NaN, i.e
        # initialization etc works as expected.
        self.assertTrue(mean_obj > 0 and mean_obj < 10.0)
示例#9
0
 def __init__(self, config):
     self.translate_problem = problems.problem(config.PROBLEM)
     self.encoder = self.translate_problem.feature_encoders(
         config.VOCAB_DIR)
     self.hparams = trainer_lib.create_hparams(config.HPARAMS,
                                               data_dir=config.VOCAB_DIR,
                                               problem_name=config.PROBLEM)
     self.checkpoint_path = config.CHECKPOINT_PATH
     self.translate_model = registry.model(config.MODEL)(self.hparams,
                                                         Modes.PREDICT)
示例#10
0
def example_apply_model(ckpt_path,
                        hparams_set="img2img_transformer2d_tiny",
                        problem_name="img2img_allen_brain_dim8to32",
                        model_name="img2img_transformer",
                        data_dir="/mnt/nfs-east1-d/data",
                        input_dim=8,
                        output_dim=32):

    # HACK: Avoid re-instantiating the model which causes problems...
    # TODO: Better way to handle this, e.g. delete from globals.
    if 'model' not in globals():

        hp = trainer_lib.create_hparams(hparams_set,
                                        data_dir=data_dir,
                                        problem_name=problem_name)

        model = registry.model(model_name)(hp, Modes.TRAIN)

    problem_object = problems.problem(problem_name)

    dataset = problem_object.dataset(Modes.TRAIN, data_dir)

    with tfe.restore_variables_on_create(ckpt_path):
        for count, example in enumerate(tfe.Iterator(dataset)):
            if count > 1234:
                break

        # Example input
        fig = plt.figure(figsize=(8, 8))
        example["inputs"] = tf.reshape(example["inputs"],
                                       [1, input_dim, input_dim, 3])
        fig.add_subplot(1, 3, 1)
        plt.imshow(example["inputs"].numpy()[0])

        # Example target
        fig.add_subplot(1, 3, 2)
        example["targets"] = tf.reshape(example["targets"],
                                        [1, output_dim, output_dim, 3])
        plt.imshow(example["targets"].numpy()[0])

        # Dummy target (expected by model)
        example["targets"] = tf.reshape(
            tf.zeros((1, output_dim, output_dim, 3), dtype=np.uint8),
            [1, output_dim, output_dim, 3])

        # Produce and display prediction
        predictions, _ = model(example)
        fig.add_subplot(1, 3, 3)
        inferred = demo.infer(predictions)
        plt.imshow(inferred)
        plt.show()

    return example, predictions, inferred
示例#11
0
  def __init__(
      self, hparams_set, model_name, data_dir, problem_name, beam_size=1):
    inputs, targets, samples, att_mats = build_model(
        hparams_set, model_name, data_dir, problem_name, beam_size=beam_size)

    # Fetch the problem
    ende_problem = problems.problem(problem_name)
    encoders = ende_problem.feature_encoders(data_dir)

    self.inputs = inputs
    self.targets = targets
    self.att_mats = att_mats
    self.samples = samples
    self.encoders = encoders
示例#12
0
  def __init__(
      self, hparams_set, model_name, data_dir, problem_name, beam_size=1):
    inputs, targets, samples, att_mats = build_model(
        hparams_set, model_name, data_dir, problem_name, beam_size=beam_size)

    # Fetch the problem
    ende_problem = problems.problem(problem_name)
    encoders = ende_problem.feature_encoders(data_dir)

    self.inputs = inputs
    self.targets = targets
    self.att_mats = att_mats
    self.samples = samples
    self.encoders = encoders
示例#13
0
    def input_data():
        """Input function to be returned."""
        prob = problems.problem(problem_name)
        if data == 'image_cifar100':
            dataset = prob.dataset(mode, preprocess=augmented)
            if not augmented: dataset = dataset.map(map_func=standardization)
        else:
            dataset = prob.dataset(mode)

        dataset = dataset.batch(batch_size)
        dataset = dataset.repeat(repeat_num)
        dataset = dataset.make_one_shot_iterator().get_next()
        if data_format == 'CHW':
            dataset['inputs'] = tf.transpose(dataset['inputs'], (0, 3, 1, 2))
        return dataset['inputs'], tf.squeeze(
            tf.one_hot(dataset['targets'], class_num))
示例#14
0
def _train_and_eval_dataset_v1(problem_name, data_dir):
    """Return train and evaluation datasets, feature info and supervised keys."""
    problem = problems.problem(problem_name)
    train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)
    eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)
    supervised_keys = ("inputs", "targets")
    hparams = problem.get_hparams()
    # We take a few training examples to guess the shapes.
    input_shapes, target_shapes = [], []
    for example in train_dataset.take(3):
        input_shapes.append(example["inputs"].shape.as_list())
        target_shapes.append(example["targets"].shape.as_list())
    input_info = _make_info(input_shapes,
                            hparams.modality["inputs"].top_dimensionality)
    target_info = _make_info(target_shapes,
                             hparams.modality["targets"].top_dimensionality)
    info = {"inputs": input_info, "targets": target_info}
    return train_dataset, eval_dataset, info, supervised_keys
  def __init__(
      self, hparams_set, model_name, data_dir, problem_name, return_beams,beam_size,custom_problem_type,force_decode_len):
      ###
    inputs, targets, input_extra_length_ph,samples, att_mats = build_model(
        hparams_set, model_name, data_dir, problem_name, return_beams,beam_size,custom_problem_type,force_decode_len)

    # Fetch the problem
    ende_problem = problems.problem(problem_name)
    encoders = ende_problem.feature_encoders(data_dir)

    self.return_beams=return_beams
    self.beam_size=beam_size

    self.inputs = inputs
    self.targets = targets
    self.att_mats = att_mats
    self.samples = samples
    self.encoders = encoders
    self.input_extra_length_ph = input_extra_length_ph
示例#16
0
def _train_and_eval_dataset_v1(problem_name, data_dir):
    """Return train and evaluation datasets, feature info and supervised keys."""
    problem = problems.problem(problem_name)
    train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)
    train_dataset = train_dataset.map(_select_features)
    eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)
    eval_dataset = eval_dataset.map(_select_features)
    supervised_keys = (["inputs"], ["targets"])
    hparams = problem.get_hparams()
    # We take a few training examples to guess the shapes.
    input_shapes, target_shapes = [], []
    for example in train_dataset.take(3):
        input_shapes.append(example["inputs"].shape.as_list())
        target_shapes.append(example["targets"].shape.as_list())
    input_vocab_size = hparams.vocab_size["inputs"]
    target_vocab_size = hparams.vocab_size["targets"]
    input_info = _make_info(input_shapes, input_vocab_size)
    target_info = _make_info(target_shapes, target_vocab_size)
    info = {"inputs": input_info, "targets": target_info}
    return train_dataset, eval_dataset, info, supervised_keys
示例#17
0
    def __init__(self, root_dir, model_name="transformer",
                 hparams_set="transformer_base_single_gpu"):
        data_dir = os.path.join(root_dir, 'data')
        train_dir = os.path.join(root_dir, 'train')

        problem_name_file = open(os.path.join(root_dir, 'problem_name'))
        problem_name = problem_name_file.readline()[:-1]
        problem_name_file.close()
        problem = problems.problem(problem_name)

        self.encoders = problem.feature_encoders(data_dir)

        hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir,
                                             problem_name=problem_name)

        self.translate_model = registry.model(model_name)(hparams, Modes.EVAL)

        checkpoint_file = open(os.path.join(train_dir, 'checkpoint'))
        ckpt_name = checkpoint_file.readline()[24:-2]
        checkpoint_file.close()
        self.ckpt_path = os.path.join(train_dir, ckpt_name)
示例#18
0
    def test_glow(self):
        with tf.Graph().as_default():
            hparams = glow.glow_hparams()
            model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
            cifar_problem = problems.problem(
                'image_cifar10_plain_random_shift')
            train_dataset = cifar_problem.dataset(MODES.TRAIN)
            one_shot = train_dataset.make_one_shot_iterator()
            x_batch, y_batch = self.batch(one_shot)
            features = {'inputs': x_batch, 'targets': y_batch}
            _, obj_dict = model.body(features)
            objective = obj_dict['training']
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                obj_np = sess.run(objective)
                mean_obj = np.mean(obj_np)

                # Check that one forward-propagation does not NaN, i.e
                # initialization etc works as expected.
                is_undefined = np.isnan(mean_obj) or np.isinf(mean_obj)
                self.assertTrue(not is_undefined)
def main(_):

    # Fetch the problem
    wmt_problem = problems.problem(FLAGS.problem)

    # Declare the path we need
    data_dir = FLAGS.data_dir

    checkpoint_dir = FLAGS.model_dir
    ckpt_name = FLAGS.problem
    # ckpt_dir = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))
    ckpt_dir = tf.train.latest_checkpoint(checkpoint_dir)

    # Create hparams and the model
    model_name = FLAGS.model
    hparams_set = FLAGS.hparams_set
    hparams = trainer_lib.create_hparams(hparams_set,
                                         data_dir=data_dir,
                                         problem_name=FLAGS.problem)

    # Get the encoders from the problem
    encoders = wmt_problem.feature_encoders(data_dir)

    translate_model = registry.model(model_name)(hparams, Modes.EVAL)

    sys.stdout.write('> ')
    sys.stdout.flush()
    sentence_en = sys.stdin.readline().strip()
    while sentence_en:
        if sentence_en == 'q':
            print("Close this process")
            break
        outputs = translate(encoders, translate_model, ckpt_dir, sentence_en)
        print(outputs)
        print('> ', end='')
        sys.stdout.flush()
        sentence_en = sys.stdin.readline()
    def __init__(self,
                 hparams_set,
                 use_bottom_up_features,
                 model_name,
                 data_dir,
                 problem_name,
                 beam_size=1):

        if use_bottom_up_features:
            inputs, bottom_up_features, objet_boxes, targets, samples, att_mats = build_model_object(
                hparams_set,
                model_name,
                data_dir,
                problem_name,
                beam_size=beam_size)
        else:
            inputs, targets, samples, att_mats = build_model(
                hparams_set,
                model_name,
                data_dir,
                problem_name,
                beam_size=beam_size)

        # Fetch the problem
        ende_problem = problems.problem(problem_name)
        encoders = ende_problem.feature_encoders(data_dir)

        self.inputs = inputs
        self.targets = targets
        self.att_mats = att_mats
        self.samples = samples
        self.encoders = encoders
        self.bottom_up_features = None
        self.object_boxes = None
        if use_bottom_up_features:
            self.bottom_up_features = bottom_up_features
            self.object_boxes = objet_boxes
示例#21
0
  def testBuildDataset(self):
    # See all the available problems
    self.assertTrue(len(problems.available()) > 10)

    # Retrieve a problem by name
    problem = problems.problem("translate_ende_wmt8k")

    # Access train and dev datasets through Problem
    train_dataset = problem.dataset(MODES.TRAIN)
    dev_dataset = problem.dataset(MODES.EVAL)

    # Access vocab size and other info (e.g. the data encoders used to
    # encode/decode data for the feature, used below) through feature_info.
    feature_info = problem.feature_info
    self.assertTrue(feature_info["inputs"].vocab_size > 0)
    self.assertTrue(feature_info["targets"].vocab_size > 0)

    train_example = train_dataset.make_one_shot_iterator().get_next()
    dev_example = dev_dataset.make_one_shot_iterator().get_next()

    with tf.Session() as sess:
      train_ex_val, _ = sess.run([train_example, dev_example])
      _ = feature_info["inputs"].encoder.decode(train_ex_val["inputs"])
      _ = feature_info["targets"].encoder.decode(train_ex_val["targets"])
示例#22
0
                'split': problem.DatasetSplit.TRAIN,
                'shards': 200
            },
            {
                'split': problem.DatasetSplit.EVAL,
                'shards': 1
            },
        ]


DATA_DIR = os.path.expanduser('t2t-tatabahasa/data')
TMP_DIR = os.path.expanduser('t2t-tatabahasa/tmp')
TRAIN_DIR = os.path.expanduser('t2t-tatabahasa/train-small')

PROBLEM = 'grammar'
t2t_problem = problems.problem(PROBLEM)

train_steps = 200000
eval_steps = 20
batch_size = 1024 * 6
save_checkpoints_steps = 10000
ALPHA = 0.0005
schedule = 'continuous_train_and_eval'
MODEL = 'transformer_tag'
HPARAMS = 'transformer_base'

from tensor2tensor.utils.trainer_lib import create_run_config, create_experiment
from tensor2tensor.utils.trainer_lib import create_hparams
from tensor2tensor.utils import registry
from tensor2tensor import models
from tensor2tensor import problems
示例#23
0
tf.gfile.MakeDirs(checkpoint_dir)
gs_data_dir = "./tensor2tensor-data"
gs_ckpt_dir = "./tensor2tensor-checkpoints/"
tf.gfile.MakeDirs(gs_data_dir)
tf.gfile.MakeDirs(gs_ckpt_dir)
"""# Download MNIST and inspect it"""

# A Problem is a dataset together with some fixed pre-processing.
# It could be a translation dataset with a specific tokenization,
# or an image dataset with a specific resolution.
#
# There are many problems available in Tensor2Tensor
problems.available()

# Fetch the MNIST problem
mnist_problem = problems.problem("image_mnist")
# The generate_data method of a problem will download data and process it into
# a standard format ready for training and evaluation.
mnist_problem.generate_data(data_dir, tmp_dir)

# Now let's see the training MNIST data as Tensors.
mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN,
                                                   data_dir)).next()
image = mnist_example["inputs"]
label = mnist_example["targets"]

plt.imshow(image.numpy()[:, :, 0].astype(np.float32),
           cmap=plt.get_cmap('gray'))
print("Label: %d" % label.numpy())
"""# Translate from English to German with a pre-trained model"""
示例#24
0
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
import os
import numpy as np
from zh_vi import translate_zhvi

DATA_DIR = os.path.expanduser("C:\\Users\\NguyenPM\\PycharmProjects\\transformer\\testdata\\data")  # This folder contain the data
TRAIN_DIR = os.path.expanduser("C:\\Users\\NguyenPM\\PycharmProjects\\transformer\\testdata\\train")  # This folder contain the model
tf.io.gfile.makedirs(DATA_DIR)
tf.io.gfile.makedirs(TRAIN_DIR)

PROBLEM = "translate_zhvi"
MODEL = "transformer" # Our model
HPARAMS = "transformer_big"

zhvi_problem = problems.problem(PROBLEM)

# Copy the vocab file locally so we can encode inputs and decode model outputs
vocab_name = "vocab.translate_zhvi.16384.subwords"
vocab_file = os.path.join(DATA_DIR, vocab_name)

# Get the encoders from the problem
encoders = zhvi_problem.feature_encoders(DATA_DIR)

ckpt_path = tf.train.latest_checkpoint(os.path.join(TRAIN_DIR))
print(ckpt_path)

def translate(inputs):
  encoded_inputs = encode(inputs)
  with tfe.restore_variables_on_create(ckpt_path):
    model_output = translate_model.infer(encoded_inputs)["outputs"]
示例#25
0
Modes = tf.estimator.ModeKeys

# Setup some directories
data_dir = os.path.expanduser("~/t2t/data")
tmp_dir = os.path.expanduser("~/t2t/tmp")
train_dir = os.path.expanduser("~/t2t/train")
checkpoint_dir = os.path.expanduser("~/t2t/checkpoints")
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
tf.gfile.MakeDirs(train_dir)
tf.gfile.MakeDirs(checkpoint_dir)
gs_data_dir = "gs://tensor2tensor-data"
gs_ckpt_dir = "gs://tensor2tensor-checkpoints/"

# Fetch the MNIST problem
ptb_problem = problems.problem("languagemodel_ptb10k")


# Setup helper functions for encoding and decoding
def encode(input_str, output_str=None):
    """Input str to features dict, ready for inference"""
    inputs = encoders["targets"].encode(input_str) + [1]  # add EOS id
    batch_inputs = tf.reshape(inputs, [1, -1, 1])  # Make it 3D.
    return {"targets": batch_inputs}


def decode(integers):
    """List of ints to str"""
    integers = list(np.squeeze(integers))
    if 1 in integers:
        integers = integers[:integers.index(1)]
示例#26
0
train_dir = os.path.expanduser("./t2t/train")
checkpoint_dir = os.path.expanduser("./t2t/checkpoints")
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
tf.gfile.MakeDirs(train_dir)
tf.gfile.MakeDirs(checkpoint_dir)
gs_data_dir = "gs://tensor2tensor-data"
gs_ckpt_dir = "gs://tensor2tensor-checkpoints"

# A Problem is a dataset together with some fixed pre-processing
# It could be a translation dataset with a specific tokenization
# or an image dataset with a specific resolution
#
# There are many problems available in Tensor2Tensor
# print(problems.available())

# Fetch the MNIST problem
mnist_problem = problems.problem("image_mnist")
# The generate_data method of a problem with download data and process it into
# a standard format ready for training and evaluation.
mnist_problem.generate_data(data_dir, tmp_dir)


# Now let's see the training MNIST data as Tensors.
mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN, data_dir)).next()
image = mnist_example["inputs"]
label = mnist_example["targets"]

plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap('gray'))
print("Label: %d" % label.numpy())
plt.show()
示例#27
0
# Other setup
Modes = tf.estimator.ModeKeys

# Setup some directories
data_dir = os.path.expanduser("data")
tmp_dir = os.path.expanduser("tmp")
train_dir = os.path.expanduser("train")
checkpoint_dir = os.path.expanduser("output")
# tf.gfile.MakeDirs(data_dir)
# tf.gfile.MakeDirs(tmp_dir)
#tf.gfile.MakeDirs(train_dir)
# tf.gfile.MakeDirs(checkpoint_dir)
gs_data_dir = "data"
gs_ckpt_dir = "checkpoints"

ende_problem = problems.problem("sentiment_imdb")
# Fetch the problem

# Copy the vocab file locally so we can encode inputs and decode model outputs
# All vocabs are stored on GCS
vocab_name = "vocab.sentiment_imdb.8192.subwords"
vocab_file = os.path.join(gs_data_dir, vocab_name)

# Get the encoders from the problem
encoders = ende_problem.feature_encoders(data_dir)
print encoders
# Setup helper functions for encoding and decoding
def encode(input_str, output_str=None):
    """Input str to features dict, ready for inference"""
    inputs = encoders["inputs"].encode(input_str) + [1]  # add EOS id
    batch_inputs = tf.reshape(inputs, [1, -1, 1])  # Make it 3D.
    def run(self):
        self.params.logger.info("begin DataReader")
        if self.device is None:
            device_id = ""
        else:
            device_id = self.device[len(self.device) - 1]
        os.environ["CUDA_VISIBLE_DEVICES"] = device_id

        # Enable TF Eager execution
        # config = tf.ConfigProto(
        #         log_device_placement=self.params.log_device_placement,
        #         allow_soft_placement=True,
        #         device_count = {'GPU': 0}
        #         )
        #config.gpu_options.allow_growth = True
        #config.gpu_options.per_process_gpu_memory_fraction=0.5

        tfe = tf.contrib.eager
        tfe.enable_eager_execution()

        # Setup the training data

        # Fetch the MNIST problem
        problem = problems.problem(self.params.problem)
        # The generate_data method of a problem will download data and process it into
        # a standard format ready for training and evaluation.
        if self.params.generate_data == True:
            problem.generate_data(self.params.data_dir, self.params.tmp_dir)

        Modes = tf.estimator.ModeKeys

        if self.params.mode == "train":
            mode = Modes.TRAIN
            max_epochs = self.params.max_epochs
            start_epoch = get_ckpt_iters(
                self.params
            ) * self.params.batch_size // self.params.train_dataset_size
            num_repeats = max_epochs - start_epoch

        elif self.params.mode == "predict":
            mode = Modes.EVAL
            max_epochs = self.params.max_epochs
            start_epoch = get_ckpt_iters(
                self.params
            ) * self.params.batch_size // self.params.train_dataset_size
            num_repeats = 1
            self.params.logger.info("epoch #%d" % self.params.max_epochs)

        model_data = []
        if num_repeats > 0:
            dataset = problem.dataset(mode, self.params.data_dir)

            dataset = dataset.shuffle(
                buffer_size=256,
                reshuffle_each_iteration=self.params.reshuffle_each_epoch)
            dataset = dataset.repeat(num_repeats).batch(self.params.batch_size)

            pre_r = -1
            for count, example in enumerate(tfe.Iterator(dataset)):
                if self.params.mode == "train":
                    r = start_epoch + count * self.params.batch_size // self.params.train_dataset_size
                elif self.params.mode == "predict":
                    r = start_epoch

                if r > pre_r:
                    self.params.logger.info("epoch #%d" % (r + 1))
                    pre_r = r

                inputs, targets = example["inputs"], example["targets"]
                model_data.append([inputs.numpy(), targets.numpy()])

        self.params.logger.info("end DataReader")
        return model_data
示例#29
0
Modes = tf.estimator.ModeKeys

# Setup some directories
data_dir = "./t2t/data/"
tmp_dir = "./t2t/tmp/"
train_dir = "./t2t/train/"
checkpoint_dir = "./t2t/checkpoints/"
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
tf.gfile.MakeDirs(train_dir)
tf.gfile.MakeDirs(checkpoint_dir)

#%%
# Translate English -> German
problem_name = "translate_ende_wmt32k"
ende_problem = problems.problem(problem_name)

# Get the encoders from the problem
encoders = ende_problem.feature_encoders(data_dir)


# Setup helper functions for encoding and decoding
def encode(input_str, output_str=None):
    """Input str to features dict, ready for inference"""
    inputs = encoders["inputs"].encode(input_str) + [1]  # add EOS id
    batch_inputs = tf.reshape(inputs, [1, -1, 1])  # Make it 3D.
    return {"inputs": batch_inputs}


def decode(integers):
    """List of ints to str"""
示例#30
0
def get_data(dataset_name,
             mode="train",
             batch_size=256,
             num_epochs=20,
             prep_fn=None,
             preprocess_batch=None,
             metadata=None):
    """
    Construct a tf.data.Dataset for the specified dataset.

    Args:
        dataset: string representing the dataset to load
        mode: string ("train" or "test") representing mode in which to run
        batch_size: integer representing size of batch
        prep_fn: optional preprocessing function that takes a tf.data.Dataset
            and returns a preprocessed Dataset.
    Returns:
        A tf.data.Dataset to be consumed by training or eval loops
    """
    dataset = None
    if metadata is None:
        metadata = {}
    if dataset_name == "cifar10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        x_train = x_train.astype(np.float32)
        x_test = x_test.astype(np.float32)
        y_train = np.squeeze(y_train, axis=1).astype(np.int32)
        y_test = np.squeeze(y_test, axis=1).astype(np.int32)
    elif dataset_name == "cifar100":
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype(np.float32)
        x_test = x_test.astype(np.float32)
        y_train = np.squeeze(y_train, axis=1).astype(np.int32)
        y_test = np.squeeze(y_test, axis=1).astype(np.int32)
    elif dataset_name in ["envi_iwslt32k", "enfr_wmt_small8k"]:
        data_dir = os.path.join("data", dataset_name)
        tmp_dir = os.path.join("data", dataset_name + "_tmp")
        t2t_name = "translate_%s" % dataset_name
        problem = problems.problem(t2t_name)
        problem.generate_data(data_dir, tmp_dir)
        dataset = problem.dataset(mode, data_dir)
        metadata["problem"] = problem
        metadata["max_length"] = 30
    elif dataset_name == "anki_spaeng":
        path_to_zip = tf.keras.utils.get_file(
            'spa-eng.zip',
            origin='http://download.tensorflow.org/data/spa-eng.zip',
            extract=True)
        path_to_file = os.path.dirname(path_to_zip) + "/spa-eng/spa.txt"
        raise RuntimeError("Not implemented")
    else:
        raise ValueError("Unknown dataset: %s" % dataset_name)

    if prep_fn:
        if dataset:
            dataset, metadata = prep_fn(dataset, metadata)
        else:
            x_train, y_train, x_test, y_test, metadata = prep_fn(
                x_train, y_train, x_test, y_test, metadata)

    if dataset is None:
        if mode == "train":
            x, y = x_train, y_train
        elif mode == "test":
            x, y = x_test, y_test
        else:
            ValueError("Invalid mode: %s" % mode)
        dataset = tensorflow.data.Dataset.from_tensor_slices({
            "inputs": x,
            "targets": y
        })
    dataset = dataset.repeat(num_epochs).shuffle(buffer_size=500)
    drop_remainder = mode == "train"
    dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
    if preprocess_batch:
        dataset = preprocess_batch(dataset, metadata)
    return dataset, metadata
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics

# Enable TF Eager execution
from tensorflow.contrib.eager.python import tfe
tfe.enable_eager_execution()

# Other setup
Modes = tf.estimator.ModeKeys

ckpt_path = sys.argv[1]
fin_name = sys.argv[2]
fout_name = sys.argv[3]

# Fetch the problem
ende_problem = problems.problem("translate_ende_wmt32k")

# Get the encoders from the problem
encoders = ende_problem.feature_encoders(ckpt_path)

# Setup helper functions for encoding and decoding
def encode(input_str, output_str=None):
    """Input str to features dict, ready for inference"""
    inputs = encoders["inputs"].encode(input_str) + [1]  # add EOS id
    batch_inputs = tf.reshape(inputs, [1, -1, 1])  # Make it 3D.
    return {"inputs": batch_inputs}

def decode(integers):
    """List of ints to str"""
    integers = list(np.squeeze(integers))
    if 1 in integers:
示例#32
0
data_dir = os.path.expanduser(
    "/home/huhan/20g_disk_d/python/projects/t2t/data")
tmp_dir = os.path.expanduser("/home/huhan/20g_disk_d/python/projects/t2t/tmp")
train_dir = os.path.expanduser(
    "/home/huhan/20g_disk_d/python/projects/t2t/train")
checkpoint_dir = os.path.expanduser(
    "/home/huhan/20g_disk_d/python/projects/t2t/checkpoints")
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
tf.gfile.MakeDirs(train_dir)
tf.gfile.MakeDirs(checkpoint_dir)

gs_ckpt_dir = "gs://tensor2tensor-checkpoints/"

problem_name = "librispeech_clean"
asr_problem = problems.problem(problem_name)
encoders = asr_problem.feature_encoders(None)

model_name = "transformer"
hparams_set = "transformer_librispeech_tpu"

hparams = trainer_lib.create_hparams(hparams_set,
                                     data_dir=data_dir,
                                     problem_name=problem_name)
asr_model = registry.model(model_name)(hparams, Modes.PREDICT)


def encode(x):
    waveforms = encoders["waveforms"].encode(x)
    encoded_dict = asr_problem.preprocess_example(
        {
示例#33
0
    return inputs


def decode(integers):
    """List of ints to str"""
    integers = list(np.squeeze(integers))
    if 1 in integers:
        integers = integers[:integers.index(1)]  # 1 is EOS id
    return encoders["inputs"].decode(np.squeeze(integers))


data_dir = '/media/satan/3e75fcb2-d0e7-4b40-9145-34692ff66ff1/sunmingsheng/DataSet/t2t/data'
tmp_dir = '/media/satan/3e75fcb2-d0e7-4b40-9145-34692ff66ff1/sunmingsheng/DataSet/t2t/tmp'

dataLen = {'train': 287113, 'test': 11490, 'dev': 13368}
cnn_dailymail = problems.problem('summarize_cnn_dailymail32k')
PADDING = 0
SOURCE_PADDING_LENGTH = 1000
TARGET_PADDING_LENGTH = 100

encoders = cnn_dailymail.feature_encoders(data_dir)

kind = ['test', 'train', 'dev']
for k in kind:
    source_name = 'cnndm.{}.source'.format(k)
    target_name = 'cnndm.{}.target'.format(k)
    with open(os.path.join(tmp_dir, target_name), encoding='utf-8') as f0:
        targets = f0.readlines()
        targets = [t.split('\n')[0] for t in targets]  # 去除换行符号
        for i in tqdm(range(dataLen[k]), desc=target_name):
            targets[i] = encode(targets[i], max_length=TARGET_PADDING_LENGTH)
示例#34
0
tfe = tf.contrib.eager
tfe.enable_eager_execution()

# Other setup
Modes = tf.estimator.ModeKeys

# Setup some directories
data_dir = '/home/team55/notespace/data/t2t_big/data'
# Create hparams and the model
model_name = "transformer"
hparams_set = "transformer_big"
checkpoint_dir = '/home/team55/notespace/data/t2t_big/model'
ckpt_path = tf.train.latest_checkpoint(checkpoint_dir)

problem_name = 'jddc_big'
jddc_problem = problems.problem(problem_name)
# Get the encoders from the problem
encoders = jddc_problem.feature_encoders(data_dir)

hparams = trainer_lib.create_hparams(hparams_set,
                                     data_dir=data_dir,
                                     problem_name=problem_name)
jddc_model = registry.model(model_name)(hparams, Modes.EVAL)


# Setup helper functions for encoding and decoding
def encode(input_str):
    """Input str to features dict, ready for inference"""
    inputs = encoders["inputs"].encode(input_str) + [1]  # add EOS id
    batch_inputs = tf.reshape(inputs, [1, -1, 1])  # Make it 3D.
    return {"inputs": batch_inputs}