Exemple #1
0
def GetMeshSplitSharding(device_mesh, tensor_split_dims_mapping):
    """Wrapper of xla_sharding.mesh_split_sharding()."""
    # Apply the prefix in the context.
    tensor_split_dims_mapping = (_MESH_SPLIT_DIM_PREFIXES.stack +
                                 tensor_split_dims_mapping)
    if _MANUAL_MESH_DIMS.stack:
        return xla_sharding.mesh_split_sharding(
            device_mesh,
            tensor_split_dims_mapping,
            manual_mesh_dims=_MANUAL_MESH_DIMS.stack)
    # Do not include manual_mesh_dims to support legacy TF versions.
    return xla_sharding.mesh_split_sharding(device_mesh,
                                            tensor_split_dims_mapping)
Exemple #2
0
 def ToXlaOpSharding(self) -> xla_data_pb2.OpSharding:
     if self.is_replicated:
         return xla_sharding.Sharding.replicate().proto
     return xla_sharding.mesh_split_sharding(self.device_mesh,
                                             self.split_dims_mapping).proto
Exemple #3
0
 def ToXlaOpSharding(self) -> xla_data_pb2.OpSharding:
     if self.is_replicated:
         return xla_sharding.Sharding.replicate().proto
     dims_mapping = _MESH_SPLIT_DIM_PREFIXES.stack + self.split_dims_mapping
     return xla_sharding.mesh_split_sharding(self.device_mesh,
                                             dims_mapping).proto