def devices(self): distribute_lib.require_replica_context(self) ds = self._strategy replica_id = tensor_util.constant_value(self._replica_id_in_sync_group) if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`. # TODO(cjfj): Return other devices when model parallelism is supported. return (tpu.core(0),) else: return (ds.extended.worker_devices[replica_id],)
def devices(self): distribute_lib.require_replica_context(self) replica_id = tensor_util.constant_value(self._replica_id_in_sync_group) return [self._strategy.extended.worker_devices_by_replica[replica_id]]
def devices(self): distribute_lib.require_replica_context(self) ds = self._distribution_strategy replica_id = tensor_util.constant_value(self._replica_id_in_sync_group) return (ds.extended.worker_devices[replica_id],)
def devices(self): distribute_lib.require_replica_context(self) return [ self._strategy.extended.worker_devices_by_replica[ self._replica_id_in_sync_group] ]
def devices(self): distribute_lib.require_replica_context(self) replica_id = tensor_util.constant_value(self._replica_id_in_sync_group) return [self._distribution_strategy.extended.worker_devices[replica_id]]