Пример #1
0
def get_batches(dataset: tf.data.Dataset,
                batch_size: int = 64) -> tf.data.Dataset:
    """Returns a Dataset that consists of padded batches when iterated over."""
    return dataset.padded_batch(batch_size,
                                padded_shapes={
                                    'idx': [],
                                    'sentence': [-1],
                                    'label': [1],
                                    'length': []
                                },
                                drop_remainder=False).prefetch(
                                    tf.data.experimental.AUTOTUNE)
Пример #2
0
def pack_dataset(dataset: tf.data.Dataset,
                 key2length: Union[int, Dict[str, int]],
                 keys: Optional[List[str]] = None) -> tf.data.Dataset:
    """Creates a 'packed' version of a dataset on-the-fly.

  Adapted from the mesh-tf implementation.

  This is meant to replace the irritation of having to create a separate
  "packed" version of a dataset to train efficiently on TPU.
  Each example in the output dataset represents several examples in the
  input dataset.
  For each key in the input dataset, two additional keys are created:
  <key>_segmentation: an int32 tensor identifying the parts
     representing the original example.
  <key>_position: an int32 tensor identifying the position within the original
     example.
  Example:
  Two input examples get combined to form an output example.
  The input examples are:
  {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
  {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
  The output example is:
  {
                 "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
    "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
        "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
                "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
   "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
       "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
  }
  0 represents padding in both the inputs and the outputs.
  Sequences in the incoming examples are truncated to length "length", and the
  sequences in the output examples all have fixed (padded) length "length".

  Args:
    dataset: a tf.data.Dataset
    key2length: an integer, or a dict from feature-key to integer
    keys: a list of strings (e.g. ["inputs", "targets"])

  Returns:
    a tf.data.Dataset
  """
    shapes = tf.nest.map_structure(lambda spec: spec.shape,
                                   dataset.element_spec)
    if keys is None:
        keys = list(shapes.keys())
    for k in keys:
        if k not in shapes:
            raise ValueError(
                'Key %s not found in dataset.  Available keys are %s' %
                (k, shapes.keys()))
        if not shapes[k].is_compatible_with(tf.TensorShape([None])):
            raise ValueError('Tensors to be packed must be one-dimensional.')
    # make sure that the length dictionary contains all keys as well as the
    # keys suffixed by "_segmentation" and "_position"
    if isinstance(key2length, int):
        key2length = {k: key2length for k in keys}
    for k in keys:
        for suffix in ['_segmentation', '_position']:
            key2length[k + suffix] = key2length[k]

    # trim to length
    dataset = dataset.map(lambda x: {k: x[k][:key2length[k]]
                                     for k in keys},
                          num_parallel_calls=AUTOTUNE)
    # Setting batch_size=length ensures that the concatenated sequences (if they
    # have length >=1) are sufficient to fill at least one packed example.
    batch_size = max(key2length.values())
    dataset = dataset.padded_batch(batch_size,
                                   padded_shapes={k: [-1]
                                                  for k in keys})
    dataset = _pack_with_tf_ops(dataset, keys, key2length)

    # Set the Tensor shapes correctly since they get lost in the process.
    def my_fn(x):
        return {k: tf.reshape(v, [key2length[k]]) for k, v in x.items()}

    return dataset.map(my_fn, num_parallel_calls=AUTOTUNE)