def _mask_from_cuda_array_interface_desc(obj): from cudf.utils.utils import calc_chunk_size, mask_dtype, mask_bitsize from cudf.utils.cudautils import compact_mask_bytes desc = obj.__cuda_array_interface__ mask = desc.get("mask", None) if mask is not None: desc = mask.__cuda_array_interface__ ptr = desc["data"][0] nelem = desc["shape"][0] typestr = desc["typestr"] typecode = typestr[1] if typecode == "t": nelem = calc_chunk_size(nelem, mask_bitsize) mask = Buffer( data=ptr, size=nelem * mask_dtype.itemsize, owner=obj ) elif typecode == "b": dtype = np.dtype(typestr) mask = compact_mask_bytes( rmm.device_array_from_ptr( ptr, nelem=nelem, dtype=dtype, finalizer=None ) ) mask = Buffer(mask) else: raise NotImplementedError( f"Cannot infer mask from typestr {typestr}" ) return mask
def mask_array_view(self): """ View the mask as a device array """ result = rmm.device_array_from_ptr( ptr=self.mask.ptr, nelem=calc_chunk_size(len(self), mask_bitsize), dtype=np.int8, ) result.gpu_data._obj = self return result
def data_array_view(self): """ View the data as a device array or nvstrings object """ if self.dtype == "object": return self.nvstrings if is_categorical_dtype(self.dtype): return self.codes.data_array_view else: dtype = self.dtype result = rmm.device_array_from_ptr(ptr=self.data.ptr, nelem=len(self), dtype=dtype) result.gpu_data._obj = self return result
def gpu_view_as(buf, dtype, shape=None, strides=None): ptr = numba.cuda.cudadrv.driver.device_pointer(buf.to_numba()) return rmm.device_array_from_ptr(ptr, buf.size // dtype.itemsize, dtype=dtype)
def gpu_view_as(nbytes, buf, dtype, shape=None, strides=None): ptr = numba.cuda.cudadrv.driver.device_pointer(buf.to_numba()) arr = rmm.device_array_from_ptr(ptr, nbytes // dtype.itemsize, dtype=dtype) arr.gpu_data._obj = buf return arr
def to_host_array(self): return rmm.device_array_from_ptr(self.ptr, nelem=self.size, dtype="int8").copy_to_host()