def _get_task_eval_dataset(task, sequence_length, split):
            eval_datasets = mesh_transformer.mesh_eval_dataset_fn(
                sequence_length=sequence_length,
                dataset_split=split,
                mixture_or_task_name=task.name,
            )

            return eval_datasets[0].dataset_fn()
        def _get_task_eval_dataset(task, sequence_length, split):
            # TODO(sharannarang): Replace with more general function.
            eval_datasets = mesh_transformer.mesh_eval_dataset_fn(
                sequence_length=sequence_length,
                dataset_split=split,
                mixture_or_task_name=task.name,
            )

            return eval_datasets[0].dataset_fn()
 def estimator_input_fn(params):
   """Eval input function for estimator."""
   del params
   # Concatenate all dataset inputs to only have to do one decode loop
   combined_ds = None
   for task in tasks:
     ds = mesh_transformer.mesh_eval_dataset_fn(
         mixture_or_task_name=task.name,
         sequence_length=sequence_length,
         dataset_split=split)[0].dataset_fn()
     ds = ds.map(
         utils.filter_features,
         num_parallel_calls=tf.data.experimental.AUTOTUNE)
     combined_ds = ds if not combined_ds else combined_ds.concatenate(ds)
   combined_ds = combined_ds.batch(self.batch_size, drop_remainder=False)  # pytype:disable=attribute-error
   # Pad the final batch.
   combined_ds = transformer_dataset.trim_and_pad_dataset(
       combined_ds, length=self.batch_size)
   combined_ds = combined_ds.prefetch(tf.data.experimental.AUTOTUNE)
   return combined_ds