Beispiel #1
0
def main(unused_argv):
  train_iterator = jsonl.JSONLinesIterator(FLAGS.train_path)

  with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
    # Build the graph.
    global_step = tf.Variable(0, name='global_step', trainable=False)
    m = arith_model.ArithmeticRecursiveGenerativeModel(FLAGS.embedding_length)

    variables = tf.trainable_variables()

    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                               global_step,
                                               FLAGS.decay_steps,
                                               FLAGS.learning_rate_decay_factor,
                                               staircase=True)

    if FLAGS.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    elif FLAGS.optimizer == 'adam':
      optimizer = tf.train.AdamOptimizer(learning_rate)
    elif FLAGS.optimizer == 'rmsprop':
      optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
                                            decay=0.9,
                                            momentum=0.9,
                                            epsilon=1e-5)
    else:
      raise RuntimeError('Unknown optimizer %s' % FLAGS.optimizer)

    train_op = optimizer.minimize(m.loss,
                                  global_step=global_step,
                                  var_list=variables)

    supervisor = tf.Supervisor(is_chief=(FLAGS.task == 0),
                               logdir=FLAGS.logdir,
                               global_step=global_step,
                               save_model_secs=60,
                               summary_op=None)
    sess = supervisor.prepare_or_wait_for_session()

    # Run the trainer.
    for unused_i in xrange(FLAGS.max_steps):
      batch = [next(train_iterator) for _ in xrange(FLAGS.batch_size)]

      _, step, batch_loss, kl_loss = sess.run(
          [train_op, global_step, m.loss, m.kl_div_mean],
          feed_dict=m.build_feed_dict(batch))
      if step % 100 == 1:
        print('step=%d:  batch loss=%f, kl loss=%f' % (step, batch_loss,
                                                       kl_loss))

        exprs = m.sample_exprs(sess, 100)
        correct = np.sum(arith_utils.eval_expr(expr) == 0 for expr in exprs)
        print('Correct:', correct / 100.)
        for expr in exprs[:10]:
          print(arith_utils.stringify_expr(expr), arith_utils.eval_expr(expr))
Beispiel #2
0
def run_experiment(general_cfg, seed):
    tf.set_random_seed(seed)
    np.random.seed(seed)
    data_dir_path = '/cs/grad/pazbu/paz/dev/projects/dnanet-v2/data'
    ds = EnhancersData(data_dir_path)
    log_files(data_dir_path)

    x = tf.placeholder(tf.float32, shape=[None, 4, 1000])
    y_ = tf.placeholder(tf.uint8, shape=[None, 2])
    keep_prob = tf.placeholder(tf.float32)

    y_conv = CNN(x, dropout_keep_prob=keep_prob)

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)
    train_step = tf.train.AdamOptimizer(1e-2).minimize(cross_entropy)

    y_pred_sig = tf.sigmoid(y_conv)
    correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y_pred_sig, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    num_runs = 2
    num_epochs = general_cfg["num_epochs"]
    mini_batch_size = general_cfg["batch_size"]
    iters_per_epoch = int(ds.train.num_examples / mini_batch_size)

    sv = tf.Supervisor(logdir="/cs/grad/pazbu/paz/dev/projects/dnanet-v2/chk")
    with sv.managed_session() as sess:
        for step in range(100000):
            if sv.should_stop():
                break
            batch = ds.train.next_batch(mini_batch_size)
            if step % 100 == 0:
                # out = y_.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
                # print(out)
                train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
                valid_accuracy = accuracy.eval(feed_dict={x: ds.validation.seqs,
                                                          y_: ds.validation.labels,
                                                          keep_prob: 1.0})
                print('run: %d, epoch: %d, iteration: %d, train accuracy: %g, validation accuracy: %g' %
                      (0, 0, step, train_accuracy, valid_accuracy))
            train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
Beispiel #3
0
    def run(self):
        """Run training."""
        is_chief = FLAGS.task_id == 0 or not FLAGS.supervisor
        sv = None

        def init_fn(sess, saver):
            ckpt = None
            if FLAGS.save_dir and sv is None:
                load_dir = FLAGS.save_dir
                ckpt = tf.train.get_checkpoint_state(load_dir)
            if ckpt and ckpt.model_checkpoint_path:
                logging.info('restoring from %s', ckpt.model_checkpoint_path)
                saver.restore(sess, ckpt.model_checkpoint_path)
            elif FLAGS.load_path:
                logging.info('restoring from %s', FLAGS.load_path)
                saver.restore(sess, FLAGS.load_path)

        if FLAGS.supervisor:
            with tf.device(
                    tf.ReplicaDeviceSetter(FLAGS.ps_tasks,
                                           merge_devices=True)):
                self.global_step = tf.contrib.framework.get_or_create_global_step(
                )
                tf.set_random_seed(FLAGS.tf_seed)
                self.controller = self.get_controller(self.env)
                self.model = self.controller.model
                self.controller.setup()
                with tf.variable_scope(tf.get_variable_scope(), reuse=True):
                    self.eval_controller = self.get_controller(self.eval_env)
                    self.eval_controller.setup(train=False)

                saver = tf.train.Saver(max_to_keep=10)
                step = self.model.global_step
                sv = tf.Supervisor(
                    logdir=FLAGS.save_dir,
                    is_chief=is_chief,
                    saver=saver,
                    save_model_secs=600,
                    summary_op=None,  # we define it ourselves
                    save_summaries_secs=60,
                    global_step=step,
                    init_fn=lambda sess: init_fn(sess, saver))
                sess = sv.PrepareSession(FLAGS.master)
        else:
            tf.set_random_seed(FLAGS.tf_seed)
            self.global_step = tf.contrib.framework.get_or_create_global_step()
            self.controller = self.get_controller(self.env)
            self.model = self.controller.model
            self.controller.setup()
            with tf.variable_scope(tf.get_variable_scope(), reuse=True):
                self.eval_controller = self.get_controller(self.eval_env)
                self.eval_controller.setup(train=False)

            saver = tf.train.Saver(max_to_keep=10)
            sess = tf.Session()
            sess.run(tf.initialize_all_variables())
            init_fn(sess, saver)

        self.sv = sv
        self.sess = sess

        logging.info('hparams:\n%s', self.hparams_string())

        model_step = sess.run(self.model.global_step)
        if model_step >= self.num_steps:
            logging.info('training has reached final step')
            return

        losses = []
        rewards = []
        all_ep_rewards = []
        for step in range(1 + self.num_steps):

            if sv is not None and sv.ShouldStop():
                logging.info('stopping supervisor')
                break

            self.do_before_step(step)

            (loss, summary, total_rewards,
             episode_rewards) = self.controller.train(sess)
            _, greedy_episode_rewards = self.eval_controller.eval(sess)
            self.controller.greedy_episode_rewards = greedy_episode_rewards
            losses.append(loss)
            rewards.append(total_rewards)
            all_ep_rewards.extend(episode_rewards)

            if (random.random() < 0.1 and summary and episode_rewards
                    and is_chief and sv and sv._summary_writer):
                sv.summary_computed(sess, summary)

            model_step = sess.run(self.model.global_step)
            if is_chief and step % self.validation_frequency == 0:
                logging.info(
                    'at training step %d, model step %d: '
                    'avg loss %f, avg reward %f, '
                    'episode rewards: %f, greedy rewards: %f', step,
                    model_step, np.mean(losses), np.mean(rewards),
                    np.mean(all_ep_rewards), np.mean(greedy_episode_rewards))

                losses = []
                rewards = []
                all_ep_rewards = []

            if model_step >= self.num_steps:
                logging.info('training has reached final step')
                break

        if is_chief and sv is not None:
            logging.info('saving final model to %s', sv.save_path)
            sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
Beispiel #4
0
def generate_samples(hparams, data, id_to_word, log_dir, output_file):
    """"Generate samples.

    Args:
      hparams:  Hyperparameters for the MaskGAN.
      data: Data to evaluate.
      id_to_word: Dictionary of indices to words.
      log_dir: Log directory.
      output_file:  Output file for the samples.
  """
    # Boolean indicating operational mode.
    is_training = False

    # Set a random seed to keep fixed mask.
    np.random.seed(0)

    with tf.Graph().as_default():
        # Construct the model.
        model = train_mask_gan.create_MaskGAN(hparams, is_training)

        ## Retrieve the initial savers.
        init_savers = model_utils.retrieve_init_savers(hparams)

        ## Initial saver function to supervisor.
        init_fn = partial(model_utils.init_fn, init_savers)

        is_chief = FLAGS.task == 0

        # Create the supervisor.  It will take care of initialization, summaries,
        # checkpoints, and recovery.
        sv = tf.Supervisor(logdir=log_dir,
                           is_chief=is_chief,
                           saver=model.saver,
                           global_step=model.global_step,
                           recovery_wait_secs=30,
                           summary_op=None,
                           init_fn=init_fn)

        # Get an initialized, and possibly recovered session.  Launch the
        # services: Checkpointing, Summaries, step counting.
        #
        # When multiple replicas of this program are running the services are
        # only launched by the 'chief' replica.
        with sv.managed_session(FLAGS.master,
                                start_standard_services=False) as sess:

            # Generator statefulness over the epoch.
            [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run(
                [model.eval_initial_state, model.fake_gen_initial_state])

            for n in xrange(FLAGS.number_epochs):
                print('Epoch number: %d' % n)
                # print('Percent done: %.2f' % float(n) / float(FLAGS.number_epochs))
                iterator = get_iterator(data)
                for x, y, _ in iterator:
                    if FLAGS.eval_language_model:
                        is_present_rate = 0.
                    else:
                        is_present_rate = FLAGS.is_present_rate
                    tf.logging.info('Evaluating on is_present_rate=%.3f.' %
                                    is_present_rate)

                    model_utils.assign_percent_real(sess,
                                                    model.percent_real_update,
                                                    model.new_rate,
                                                    is_present_rate)

                    # Randomly mask out tokens.
                    p = model_utils.generate_mask()

                    eval_feed = {
                        model.inputs: x,
                        model.targets: y,
                        model.present: p
                    }

                    if FLAGS.data_set == 'ptb':
                        # Statefulness for *evaluation* Generator.
                        for i, (c, h) in enumerate(model.eval_initial_state):
                            eval_feed[c] = gen_initial_state_eval[i].c
                            eval_feed[h] = gen_initial_state_eval[i].h

                        # Statefulness for the Generator.
                        for i, (c,
                                h) in enumerate(model.fake_gen_initial_state):
                            eval_feed[c] = fake_gen_initial_state_eval[i].c
                            eval_feed[h] = fake_gen_initial_state_eval[i].h

                    [gen_initial_state_eval, fake_gen_initial_state_eval,
                     _] = sess.run([
                         model.eval_final_state, model.fake_gen_final_state,
                         model.global_step
                     ],
                                   feed_dict=eval_feed)

                    generate_logs(sess, model, output_file, id_to_word,
                                  eval_feed)
            output_file.close()
            print('Closing output_file.')
            return
Beispiel #5
0
def train(hparams):
    """Run training loop."""
    data_iterator, clause_metadata = load_data(random_start=True)

    with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
        # The following three lines prevent hangs during distributed training.
        vs = tf.get_variable_scope()
        if vs.caching_device is None:
            vs.set_caching_device(lambda op: op.device)

        # Build the graph.
        global_step = slim.variables.get_or_create_global_step()
        if FLAGS.model_type == 'tree':
            m = cnf_model.CNFTreeModel(data_iterator, hparams, clause_metadata)
        else:
            m = cnf_model.CNFSequenceModel(data_iterator, hparams,
                                           clause_metadata)

        variables = tf.trainable_variables()

        learning_rate = tf.train.exponential_decay(
            hparams.learning_rate,
            global_step,
            hparams.decay_steps,
            hparams.learning_rate_decay_factor,
            staircase=True)

        if hparams.optimizer == 'sgd':
            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        elif hparams.optimizer == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate)
        elif hparams.optimizer == 'rmsprop':
            optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
                                                  decay=0.9,
                                                  momentum=0.9,
                                                  epsilon=1e-5)
        else:
            raise RuntimeError('Unknown optimizer %s' % hparams.optimizer)

        if FLAGS.master not in ('', 'local') and FLAGS.sync_replicas:
            replica_id = tf.constant(FLAGS.task, tf.int32, shape=())
            optimizer = tf.LegacySyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                replica_id=replica_id,
                total_num_replicas=FLAGS.worker_replicas)

        tf.contrib.deprecated.scalar_summary('lr', learning_rate)
        tf.contrib.deprecated.scalar_summary('loss', m.loss)
        for metric_name, metric_value in m.metrics.items():
            tf.contrib.deprecated.scalar_summary('metric/' + metric_name,
                                                 metric_value)

        grads_and_vars = optimizer.compute_gradients(m.loss, variables)
        if hparams.grad_max_norm > 0:
            g, v = zip(*grads_and_vars)
            g, global_norm = tf.clip_by_global_norm(g, hparams.grad_max_norm)
            tf.contrib.deprecated.scalar_summary('global_norm', global_norm)
            grads_and_vars = zip(g, v)
        train_op = optimizer.apply_gradients(grads_and_vars, global_step)
        summary_op = tf.get_summary_op()

        if FLAGS.master not in ('', 'local') and FLAGS.sync_replicas:
            init_token_op = optimizer.get_init_tokens_op()
            chief_queue_runner = optimizer.get_chief_queue_runner()

        saver = tf.Saver(keep_checkpoint_every_n_hours=1.0)

        supervisor = tf.Supervisor(
            is_chief=(FLAGS.task == 0),
            logdir=FLAGS.tf_log_dir,
            global_step=global_step,
            saver=saver,
            # We are going to compute summaries ourselves.
            summary_op=None,
            save_model_secs=FLAGS.save_model_secs,
            # But we set this so that this computes global_step/sec.
            save_summaries_secs=FLAGS.save_summaries_secs)
        sess = supervisor.prepare_or_wait_for_session(FLAGS.master)

        # TODO(ricshin):
        # Rewrite this to use supervisor.managed_session().
        # Look at how slim/learning.py handles SyncReplicas, in particular
        # init_token_op.  Use normal text summaries once they exist.
        # Use supervisor.should_stop().
        if FLAGS.task == 0:
            if FLAGS.master not in ('', 'local') and FLAGS.sync_replicas:
                supervisor.start_queue_runners(sess, [chief_queue_runner])
                sess.run(init_token_op)

            sampling_temps = [
                float(x) for x in FLAGS.sampling_temps.split(',')
            ]

            def summarize():
                try:
                    summary_strs, global_step_val = sess.run(
                        [summary_op, global_step])
                    summaries = tf.Summary.FromString(summary_strs)

                    for i, temp in itertools.product(
                            xrange(FLAGS.num_summary_samples), sampling_temps):
                        cnf = textwrap.wrap(
                            cnf_utils.unparse_cnf(m.sample(sess)))
                        summaries.value.add(
                            tag='formula_temp%g_%d' % (temp, i),
                            tensor=make_tensor_proto('\n'.join(cnf)))

                    supervisor.summary_writer.add_summary(
                        summaries.SerializeToString(), global_step_val)
                    status_str = ', '.join('%s=%f' %
                                           (value.tag, value.simple_value)
                                           for value in summaries.value
                                           if value.HasField('simple_value'))
                    tf.logging.info('step=%d: %s', global_step_val, status_str)
                except:
                    # The supervisor eats the backtrace, so print it here.
                    traceback.print_exc()
                    raise

            supervisor.loop(FLAGS.save_summaries_secs, summarize)

        # Run the trainer.
        for unused_i in xrange(hparams.max_steps):
            sess.run(train_op)
Beispiel #6
0
def evaluate_model(hparams, data, train_dir, log, id_to_word,
                   data_ngram_counts, stop_words_id):
  """Evaluate MaskGAN model.
  Args:
    hparams:  Hyperparameters for the MaskGAN.
    data: Data to evaluate.
    train_dir: Path to a directory containing checkpoints.
    id_to_word: Dictionary of indices to words.
    data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the
      data_set.
  """
  tf.logging.error('Evaluate model.')

  # Boolean indicating operational mode.
  is_training = False

  if FLAGS.mode == MODE_VALIDATION:
    logdir = FLAGS.base_directory + '/validation'
  elif FLAGS.mode == MODE_TRAIN_EVAL:
    logdir = FLAGS.base_directory + '/train_eval'
  elif FLAGS.mode == MODE_TEST:
    logdir = FLAGS.base_directory + '/test'
  else:
    raise NotImplementedError

  # Wait for a checkpoint to exist.
  print(train_dir)
  print(tf.train.latest_checkpoint(train_dir))
  while not tf.train.latest_checkpoint(train_dir):
    tf.logging.error('Waiting for checkpoint...')
    print('Waiting for checkpoint...')
    time.sleep(10)

  with tf.Graph().as_default():
    # Use a separate container for each trial
    container_name = ''
    with tf.container(container_name):

      # Construct the model.
      if FLAGS.num_rollouts == 1:
        model = create_MaskGAN(hparams, is_training)
      elif FLAGS.num_rollouts > 1:
        model = rollout.create_rollout_MaskGAN(hparams, is_training)
      else:
        raise ValueError

      # Create the supervisor.  It will take care of initialization, summaries,
      # checkpoints, and recovery.  We only pass the trainable variables
      # to load since things like baselines keep batch_size which may not
      # match between training and evaluation.
      evaluation_variables = tf.trainable_variables()
      evaluation_variables.append(model.global_step)
      eval_saver = tf.train.Saver(var_list=evaluation_variables)
      sv = tf.Supervisor(logdir=logdir)
      sess = sv.PrepareSession(FLAGS.eval_master, start_standard_services=False)

      tf.logging.info('Before sv.Loop.')
      sv.Loop(FLAGS.eval_interval_secs, evaluate_once,
              (data, sv, model, sess, train_dir, log, id_to_word,
               data_ngram_counts, eval_saver, stop_words_id))

      sv.WaitForStop()
      tf.logging.info('sv.Stop().')
      sv.Stop()