Exemplo n.º 1
0
  def test_preprocess_and_read(self):
    max_length = 30

    # Write 2 tfrecord shards
    csv_path = os.path.join(_TEST_DATA_DIR, 'trembl.csv')
    data.csv_to_tfrecord(csv_path=csv_path, outdir=self._tmpdir, idx=0, total=2)
    data.csv_to_tfrecord(csv_path=csv_path, outdir=self._tmpdir, idx=1, total=2)

    # Construct dataset
    train_files, test_files = data.get_train_test_files(self._tmpdir)
    train_ds, test_ds = data.load_dataset(
        train_files=train_files,
        test_files=test_files,
        batch_size=1,
        shuffle_buffer=1,
        max_train_length=max_length)

    # Load CSV manually
    seqs = []
    with tf.gfile.GFile(csv_path) as f:
      for line in f:
        print(line)
        seq = line.strip().split(',')[-1]
        enc = data.protein_domain.encode([seq], pad=False)[0][:max_length]
        seqs.append(enc)

    # Confirm we got the same sequences.
    for ds_x, target in itertools.zip_longest(iter(train_ds), seqs):
      ds_x = ds_x._numpy()[0]
      self.assertAllEqual(target, ds_x[:len(target)])

    for ds_x, target in itertools.zip_longest(iter(test_ds), seqs):
      ds_x = ds_x._numpy()[0]
      self.assertAllEqual(target, ds_x[:len(target)])
Exemplo n.º 2
0
def run_experiment(
        model_dir,
        data_dir=None,
        xid=None,
        batch_size_per_device=128,
        eval_frequency=500,
        checkpoint_frequency=10000,
        save_checkpoints=True,
        restore_checkpoint=True,
        num_eval_steps=None,
        epochs=None,
        max_train_steps=1000000,  # 1 million
        max_train_length=512,
        train_summary_frequency=100,
        max_eval_length=None,
        model_cls=models.FlaxLM):
    """Run experiment.

  Args:
    model_dir: Directory to save checkpoints and metrics to.
    data_dir: Directory to load data.
    xid: Optional experiment id.
    batch_size_per_device: Batch size per device.
    eval_frequency: Steps per eval.
    checkpoint_frequency: How often to checkpoint. If None, only checkpoint once
      at end of run.
    save_checkpoints: If True, checkpoints model according to
      checkpoint_frequency
    restore_checkpoint: If True, will restore checkpoint from directory. Useful
      for robustness to preemption.
    num_eval_steps: Number of eval steps to take on eval dataset.
    epochs: Number of train epochs.
    max_train_steps: Stop training after N steps.
    max_train_length: Crop training sequences to this length.
    train_summary_frequency: Frequency to write train metrics.
    max_eval_length: Maximum eval length. Defaults to max_train_length.
    model_cls: Model class to use.

  Returns:
    FlaxLM resulting from running training.
  """
    if xid is not None:
        model_dir = os.path.join(model_dir,
                                 '%s_l%s' % (str(xid), max_train_length))
    tf.enable_v2_behavior()
    if jax.host_id() == 0:
        summary_writer = tf_summary.create_file_writer(os.path.join(
            model_dir, 'metrics'),
                                                       max_queue=1,
                                                       flush_millis=1000)
        train_summary_writer = logging_lib.ScalarSummary(step=None,
                                                         scope='train/',
                                                         enable_tf=True,
                                                         verbose=0)
        eval_summary_writer = logging_lib.ScalarSummary(step=None,
                                                        scope='eval/',
                                                        enable_tf=True,
                                                        verbose=0)

    batch_size = batch_size_per_device * jax.local_device_count()
    max_eval_length = max_eval_length or max_train_length
    train_files, test_files = data.get_train_valid_files(directory=data_dir)
    train_ds, eval_ds = data.load_dataset(train_files=train_files,
                                          test_files=test_files,
                                          batch_size=batch_size,
                                          max_train_length=max_train_length,
                                          max_eval_length=max_eval_length,
                                          shuffle_buffer=16384)

    with contextlib.ExitStack() as stack:  # pylint: disable=using-constant-test
        if jax.host_id() == 0:
            # Only need metric writer context manager on host 0.
            stack.enter_context(summary_writer.as_default())
        model = model_cls(domain=data.protein_domain, batch_size=batch_size)

        if restore_checkpoint:
            try:
                model.load_checkpoint(model_dir)
            except ValueError:
                # No checkpoint to load -> raises ValueError.
                pass
        start_step = model.train_step

        train_ds = train_ds.repeat(epochs)
        train_iter = iter(train_ds)
        train_metrics = []
        tick = time.time()

        if jax.host_id() == 0:
            _write_gin_configs(os.path.join(model_dir, 'config.gin'))

        num_evals = 0
        for step, batch in zip(range(start_step, max_train_steps), train_iter):
            batch = jax.tree_map(lambda x: x._numpy(), batch)  # pylint: disable=protected-access
            metrics = model.fit_batch(batch)
            train_metrics.append(metrics)

            if jax.host_id() == 0 and (
                (save_checkpoints and checkpoint_frequency
                 and step % checkpoint_frequency == 0 and step > 0)
                    or step == max_train_steps - 1):
                model.save_checkpoint(model_dir)

            if (step + 1) % train_summary_frequency == 0:
                summary = evaluation.combine_metrics(train_metrics)
                logging.info('train in step: %d, loss: %.4f', step,
                             summary['loss'])
                if jax.host_id() == 0:
                    tock = time.time()
                    steps_per_sec = eval_frequency / (tock - tick)
                    tick = tock
                    train_summary_writer('steps per second', steps_per_sec,
                                         step)
                    for key, val in summary.items():
                        if jnp.isnan(val):
                            raise ValueError(f'NaN in {key} at step {step}.')
                        train_summary_writer(key, val, step)

                # reset metric accumulation for next evaluation cycle.
                train_metrics = []

            if eval_frequency and (step + 1) % eval_frequency == 0:
                eval_summary = evaluation.evaluate(
                    model=model,
                    eval_ds=eval_ds,
                    num_eval_steps=num_eval_steps)

                logging.info('eval in step: %d, loss: %.4f', step,
                             eval_summary['loss'])
                if jax.host_id() == 0:
                    for key, val in eval_summary.items():
                        eval_summary_writer(key, val, step)
                    tf_summary.flush()
                    summary_writer.flush()

                    if num_evals == 0:
                        # Write out config on first eval.
                        _write_gin_configs(
                            os.path.join(model_dir, 'config_after_eval.gin'))
                    num_evals += 1

    if jax.host_id() == 0:
        tf_summary.flush()
        summary_writer.close()
        _write_gin_configs(os.path.join(model_dir, 'config_end.gin'))
    return model