def _experimental_distribute_dataset(self, dataset, options): return input_util.get_distributed_dataset( dataset, self._input_workers_with_options(options), self._container_strategy(), num_replicas_in_sync=self._num_replicas_in_sync, options=options)
def _experimental_distribute_dataset(self, dataset, options): if (options and options.experimental_replication_mode == distribute_lib.InputReplicationMode.PER_REPLICA): raise NotImplementedError("InputReplicationMode.PER_REPLICA " "is only supported in " "`distribute_datasets_from_function`.") return input_util.get_distributed_dataset( dataset, self._input_workers_with_options(options), self._container_strategy(), num_replicas_in_sync=self._num_replicas_in_sync, options=options)
def _experimental_distribute_dataset(self, dataset, options): input_workers_devices = self._input_workers_with_options() # If this DistributedDataset is created outside ClusterCoordinator, i,e, # outside a tf.function, we don't build its underlying datasets immediately # until it is passed to ClusterCoordinator.create_per_worker_dataset. return input_util.get_distributed_dataset( dataset, input_workers_devices, self._container_strategy(), num_replicas_in_sync=self._num_replicas_in_sync, options=options, build=ops.inside_function()) # will be built by ClusterCoordinator
def _experimental_distribute_dataset(self, dataset, options): # Note that split_batch_by argument is not passed because it is always 1 in # this strategy, and adding it adds unnecessary overhead to the dataset. if (options and options.experimental_replication_mode == distribute_lib.InputReplicationMode.PER_REPLICA): raise NotImplementedError( "InputReplicationMode.PER_REPLICA " "is only supported in " "`experimental_distribute_datasets_from_function`.") return input_util.get_distributed_dataset( dataset, self._input_workers_with_options(options), self._container_strategy(), options=options)