def copy_tensor(dst_tensor, src_tensor): """Copy the content from src_tensor to dst_tensor. Args: dst_tensor: the tensor to copy from. src_tensor: the tensor to copy to. Returns: None """ copied = True if isinstance(dst_tensor, numpy.ndarray) \ and isinstance(src_tensor, numpy.ndarray): numpy.copyto(dst_tensor, src_tensor) elif torch_available(): if isinstance(dst_tensor, torch.Tensor) and isinstance( src_tensor, torch.Tensor): dst_tensor.copy_(src_tensor) elif isinstance(dst_tensor, torch.Tensor) and isinstance( src_tensor, numpy.ndarray): t = torch.Tensor(src_tensor) dst_tensor.copy_(t) elif isinstance(dst_tensor, numpy.ndarray) and isinstance( src_tensor, torch.Tensor): t = src_tensor.numpy() numpy.copyto(dst_tensor, t) else: copied = False else: copied = False if not copied: raise ValueError( "Unsupported tensor type. Got: {} and {}. Supported " "CPU tensor types are: torch.Tensor, numpy.ndarray.".format( type(dst_tensor), type(src_tensor)))
def copy_tensor(dst_tensor, src_tensor): """Copy the content from src_tensor to dst_tensor. Args: dst_tensor: the tensor to copy from. src_tensor: the tensor to copy to. Returns: None """ copied = True if isinstance(dst_tensor, cupy.ndarray) and isinstance( src_tensor, cupy.ndarray): cupy.copyto(dst_tensor, src_tensor) elif torch_available(): if isinstance(dst_tensor, torch.Tensor) and isinstance( src_tensor, torch.Tensor): dst_tensor.copy_(src_tensor) elif isinstance(dst_tensor, torch.Tensor) and isinstance( src_tensor, cupy.ndarray): t = torch.utils.dlpack.from_dlpack(src_tensor.toDlpack()) dst_tensor.copy_(t) elif isinstance(dst_tensor, cupy.ndarray) and isinstance( src_tensor, torch.Tensor): t = cupy.fromDlpack(torch.utils.dlpack.to_dlpack(src_tensor)) cupy.copyto(dst_tensor, t) else: copied = False else: copied = False if not copied: raise ValueError( "Unsupported tensor type. Got: {} and {}. Supported " "GPU tensor types are: torch.Tensor, cupy.ndarray.".format( type(dst_tensor), type(src_tensor)))
def get_tensor_n_elements(tensor): """Return the number of elements in a tensor.""" if isinstance(tensor, numpy.ndarray): return tensor.size if torch_available(): if isinstance(tensor, torch.Tensor): return torch.numel(tensor) raise ValueError("Unsupported tensor type. Got: {}.".format(type(tensor)))
def get_nccl_tensor_dtype(tensor): """Return the corresponded NCCL dtype given a tensor.""" if isinstance(tensor, cupy.ndarray): return NUMPY_NCCL_DTYPE_MAP[tensor.dtype.type] if torch_available(): if isinstance(tensor, torch.Tensor): return TORCH_NCCL_DTYPE_MAP[tensor.dtype] raise ValueError("Unsupported tensor type. " "Got: {}.".format(type(tensor)))
def get_tensor_shape(tensor): """Return the shape of the tensor as a list.""" if isinstance(tensor, numpy.ndarray): return list(tensor.shape) if torch_available(): if isinstance(tensor, torch.Tensor): return list(tensor.size()) raise ValueError("Unsupported tensor type. Got: {}. Supported " "CPU tensor types are: torch.Tensor, " "numpy.ndarray.".format(type(tensor)))
def get_numpy_tensor_dtype(tensor): """Return the corresponded Cupy dtype given a tensor.""" if isinstance(tensor, numpy.ndarray): return tensor.dtype.type if torch_available(): if isinstance(tensor, torch.Tensor): return TORCH_NUMPY_DTYPE_MAP[tensor.dtype] raise ValueError("Unsupported tensor type. Got: {}. Supported " "CPU tensor types are: torch.Tensor, " "numpy.ndarray.".format(type(tensor)))
def get_tensor_device(tensor): if isinstance(tensor, numpy.ndarray): return "cpu" elif torch_available() and isinstance(tensor, torch.Tensor): if not tensor.is_cuda: return "cpu" else: return "cuda" else: raise RuntimeError("Unrecognized tensor type: " "'{}'.".format(type(tensor)))
def get_tensor_strides(tensor): """Return the strides of the tensor as a list.""" if isinstance(tensor, cupy.ndarray): return [ int(stride / tensor.dtype.itemsize) for stride in tensor.strides ] if torch_available(): if isinstance(tensor, torch.Tensor): return list(tensor.stride()) raise ValueError("Unsupported tensor type. Got: {}. Supported " "GPU tensor types are: torch.Tensor, " "cupy.ndarray.".format(type(tensor)))
def get_gloo_tensor_dtype(tensor): """Return the corresponded GLOO dtype given a tensor.""" if isinstance(tensor, numpy.ndarray): return NUMPY_GLOO_DTYPE_MAP[tensor.dtype.type] if torch_available(): if isinstance(tensor, torch.Tensor): if not tensor.is_cuda: return TORCH_GLOO_DTYPE_MAP[tensor.dtype] else: raise ValueError("Expect torch CPU tensor. Got {}.".format( tensor.device)) raise ValueError("Unsupported tensor type. Got: {}.".format(type(tensor)))
def get_tensor_ptr(tensor): """Return the pointer to the underlying memory storage of a tensor.""" if isinstance(tensor, numpy.ndarray): return tensor.ctypes.data if torch_available(): if isinstance(tensor, torch.Tensor): if tensor.is_cuda: raise RuntimeError("Torch tensor must be on CPU " "when using GLOO collectives.") return tensor.data_ptr() raise ValueError("Unsupported tensor type. Got: {}. Supported " "CPU tensor types are: torch.Tensor, " "numpy.ndarray.".format(type(tensor)))
def _check_single_tensor_input(tensor): """Check if the tensor is with a supported type.""" if isinstance(tensor, np.ndarray): return if types.cupy_available(): if isinstance(tensor, types.cp.ndarray): return if types.torch_available(): if isinstance(tensor, types.th.Tensor): return raise RuntimeError("Unrecognized tensor type '{}'. Supported types are: " "np.ndarray, torch.Tensor, cupy.ndarray.".format( type(tensor)))
def get_tensor_ptr(tensor): """Return the pointer to the underlying memory storage of a tensor.""" if isinstance(tensor, cupy.ndarray): return tensor.data.ptr if isinstance(tensor, numpy.ndarray): return tensor.data if torch_available(): if isinstance(tensor, torch.Tensor): if not tensor.is_cuda: raise RuntimeError("torch tensor must be on gpu.") return tensor.data_ptr() raise ValueError("Unsupported tensor type. " "Got: {}.".format(type(tensor)))
def get_tensor_device(tensor): """Return the GPU index of a tensor.""" if isinstance(tensor, cupy.ndarray): try: device = tensor.device.id except AttributeError as exec: raise RuntimeError("The tensor is not on a valid GPU.") from exec elif torch_available() and isinstance(tensor, torch.Tensor): device = tensor.device.index if not isinstance(device, int): raise RuntimeError("The tensor is not on a valid GPU.") else: raise ValueError("Unsupported tensor type. Got: {}.".format( type(tensor))) return device
numpy.uint8: pygloo.glooDataType_t.glooUint8, numpy.uint32: pygloo.glooDataType_t.glooUint32, numpy.uint64: pygloo.glooDataType_t.glooUint64, numpy.int8: pygloo.glooDataType_t.glooInt8, numpy.int32: pygloo.glooDataType_t.glooInt32, numpy.int64: pygloo.glooDataType_t.glooInt64, # FLOAT types numpy.half: pygloo.glooDataType_t.glooFloat16, numpy.float: pygloo.glooDataType_t.glooFloat64, numpy.float16: pygloo.glooDataType_t.glooFloat16, numpy.float32: pygloo.glooDataType_t.glooFloat32, numpy.float64: pygloo.glooDataType_t.glooFloat64, numpy.double: pygloo.glooDataType_t.glooFloat64, } if torch_available(): import torch TORCH_GLOO_DTYPE_MAP = { torch.int: pygloo.glooDataType_t.glooInt32, torch.uint8: pygloo.glooDataType_t.glooUint8, torch.int8: pygloo.glooDataType_t.glooInt8, torch.int32: pygloo.glooDataType_t.glooInt32, torch.int64: pygloo.glooDataType_t.glooInt64, torch.long: pygloo.glooDataType_t.glooInt64, # FLOAT types torch.half: pygloo.glooDataType_t.glooFloat16, torch.float: pygloo.glooDataType_t.glooFloat32, torch.float16: pygloo.glooDataType_t.glooFloat16, torch.float32: pygloo.glooDataType_t.glooFloat32, torch.float64: pygloo.glooDataType_t.glooFloat64, torch.double: pygloo.glooDataType_t.glooFloat64,