Example #1
0
    def sum(self, axis=None):
        """Sum the matrix over the given axis.  If the axis is None, sum
        over both rows and columns, returning a scalar.
        """
        # The spmatrix base class already does axis=0 and axis=1 efficiently
        # so we only do the case axis=None here
        if axis is None:
            return self.data.sum()
        elif (not hasattr(self, 'blocksize') and
              axis in self._swap(((1, -1), (0, 2)))[0]):
            # faster than multiplication for large minor axis in CSC/CSR
            # Mimic numpy's casting.
            if np.issubdtype(self.dtype, np.float_):
                res_dtype = np.float_
            elif (self.dtype.kind == 'u' and
                  np.can_cast(self.dtype, np.uint)):
                res_dtype = np.uint
            elif np.can_cast(self.dtype, np.int_):
                res_dtype = np.int_
            else:
                res_dtype = self.dtype
            ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)

            major_index, value = self._minor_reduce(np.add)
            ret[major_index] = value
            ret = np.asmatrix(ret)
            if axis % 2 == 1:
                ret = ret.T
            return ret
        else:
            return spmatrix.sum(self, axis)
Example #2
0
    def sum(self, axis=None):
        """Sum the matrix over the given axis.  If the axis is None, sum
        over both rows and columns, returning a scalar.
        """
        # We use multiplication by an array of ones to achieve this.
        # For some sparse matrix formats more efficient methods are
        # possible -- these should override this function.
        m, n = self.shape

        # Mimic numpy's casting.
        if np.issubdtype(self.dtype, np.float_):
            res_dtype = np.float_
        elif (self.dtype.kind == 'u' and
              np.can_cast(self.dtype, np.uint)):
            res_dtype = np.uint
        elif np.can_cast(self.dtype, np.int_):
            res_dtype = np.int_
        else:
            res_dtype = self.dtype

        if axis is None:
            # sum over rows and columns
            return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum()

        if axis < 0:
            axis += 2
        if axis == 0:
            # sum over columns
            return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self
        elif axis == 1:
            # sum over rows
            return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))
        else:
            raise ValueError("axis out of bounds")
Example #3
0
def validate_datatype(validator, datatype, instance, schema):
    if isinstance(instance, list):
        array = inline_data_asarray(instance)
        in_datatype, _ = numpy_dtype_to_asdf_datatype(array.dtype)
    elif isinstance(instance, dict):
        if 'datatype' in instance:
            in_datatype = instance['datatype']
        elif 'data' in instance:
            array = inline_data_asarray(instance['data'])
            in_datatype, _ = numpy_dtype_to_asdf_datatype(array.dtype)
        else:
            raise ValidationError("Not an array")
    elif isinstance(instance, (np.ndarray, NDArrayType)):
        in_datatype, _ = numpy_dtype_to_asdf_datatype(instance.dtype)
    else:
        raise ValidationError("Not an array")

    if datatype == in_datatype:
        return

    if schema.get('exact_datatype', False):
        yield ValidationError(
            "Expected datatype '{0}', got '{1}'".format(
                datatype, in_datatype))

    np_datatype = asdf_datatype_to_numpy_dtype(datatype)
    np_in_datatype = asdf_datatype_to_numpy_dtype(in_datatype)

    if not np_datatype.fields:
        if np_in_datatype.fields:
            yield ValidationError(
                "Expected scalar datatype '{0}', got '{1}'".format(
                    datatype, in_datatype))

        if not np.can_cast(np_in_datatype, np_datatype, 'safe'):
            yield ValidationError(
                "Can not safely cast from '{0}' to '{1}' ".format(
                    in_datatype, datatype))

    else:
        if not np_in_datatype.fields:
            yield ValidationError(
                "Expected structured datatype '{0}', got '{1}'".format(
                    datatype, in_datatype))

        if len(np_in_datatype.fields) != len(np_datatype.fields):
            yield ValidationError(
                "Mismatch in number of columns: "
                "Expected {0}, got {1}".format(
                    len(datatype), len(in_datatype)))

        for i in range(len(np_datatype.fields)):
            in_type = np_in_datatype[i]
            out_type = np_datatype[i]
            if not np.can_cast(in_type, out_type, 'safe'):
                yield ValidationError(
                    "Can not safely cast to expected datatype: "
                    "Expected {0}, got {1}".format(
                        numpy_dtype_to_asdf_datatype(out_type)[0],
                        numpy_dtype_to_asdf_datatype(in_type)[0]))
Example #4
0
def transform_scalars(dataset, constant=0):
    """Add a constant to the data set"""

    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    # Try to be a little smart so that we don't always just produce a
    # double-precision output
    newMin = np.min(scalars) + constant
    newMax = np.max(scalars) + constant
    if (constant).is_integer() and newMin.is_integer() and newMax.is_integer():
        # Let ints be ints!
        constant = int(constant)
        newMin = int(newMin)
        newMax = int(newMax)
    for dtype in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32,
                  np.uint64, np.int64, np.float32, np.float64]:
        if np.can_cast(newMin, dtype) and np.can_cast(newMax, dtype):
            constant = np.array([constant], dtype=dtype)
            break

    # numpy should cast to an appropriate output type to avoid overflow
    result = scalars + constant

    utils.set_scalars(dataset, result)
Example #5
0
def get_sum_dtype(dtype):
    """Mimic numpy's casting for np.sum"""
    if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
        return np.uint
    if np.can_cast(dtype, np.int_):
        return np.int_
    return dtype
Example #6
0
 def test_can_cast_record(self):
     import numpy as np
     rec1 = np.dtype([('x', int), ('y', float)])
     rec2 = np.dtype([('x', float), ('y', float)])
     rec3 = np.dtype([('y', np.float64), ('x', float)])
     assert not np.can_cast(rec1, rec2, 'equiv')
     assert np.can_cast(rec2, rec3, 'equiv')
     assert np.can_cast(rec1, rec2)
Example #7
0
    def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
                subok=False, ndmin=0):

        if unit is not None:
            # convert unit first, to avoid multiple string->unit conversions
            unit = Unit(unit)

        # optimize speed for Quantity with no dtype given, copy=False
        if isinstance(value, Quantity):
            if unit is not None and unit is not value.unit:
                value = value.to(unit)
                # the above already makes a copy (with float dtype)
                copy = False

            if not subok and type(value) is not cls:
                value = value.view(cls)

            if dtype is None:
                if not copy:
                    return value

                if not np.can_cast(np.float32, value.dtype):
                    dtype = np.float

            return np.array(value, dtype=dtype, copy=copy, order=order,
                            subok=True, ndmin=ndmin)

        # Maybe list/tuple of Quantity? short-circuit array for speed
        if(not isinstance(value, np.ndarray) and isiterable(value) and
           all(isinstance(v, Quantity) for v in value)):
            if unit is None:
                unit = value[0].unit
            value = [q.to(unit).value for q in value]
            copy = False  # copy already made

        else:
            if unit is None:
                unit = dimensionless_unscaled

        value = np.array(value, dtype=dtype, copy=copy, order=order,
                         subok=False, ndmin=ndmin)

        # check that array contains numbers or long int objects
        if (value.dtype.kind in 'OSU' and
            not (value.dtype.kind == 'O' and
                 isinstance(value.item(() if value.ndim == 0 else 0),
                            numbers.Number))):
            raise TypeError("The value must be a valid Python or "
                            "Numpy numeric type.")

        # by default, cast any integer, boolean, etc., to float
        if dtype is None and not np.can_cast(np.float32, value.dtype):
            value = value.astype(np.float)

        value = value.view(cls)
        value._unit = unit

        return value
Example #8
0
    def resolve(self, identifier, additional_namespace=None, strip_units=False):
        '''
        The additional_namespace (e.g. the local/global namespace) will only
        be used if the namespace does not contain any user-defined namespace.
        '''        
        # We save tuples of (namespace description, referred object) to
        # give meaningful warnings in case of duplicate definitions
        matches = []
        
        if self.is_explicit or additional_namespace is None: 
            namespaces = self.namespaces
        else:            
            namespaces = OrderedDict(self.namespaces)
            # Add the additional namespace in the end
            description, namespace = additional_namespace
            namespaces[description] = namespace
        
        for description, namespace in namespaces.iteritems():
            if identifier in namespace:
                matches.append((description, namespace[identifier]))            

        if len(matches) == 0:
            # No match at all
            raise KeyError(('The identifier "%s" could not be resolved.') % 
                           (identifier))
        elif len(matches) > 1:
            # Possibly, all matches refer to the same object
            first_obj = matches[0][1]
            if not all([(m[1] is first_obj) or _same_function(m[1], first_obj)
                        for m in matches]):
                _conflict_warning(('The name "%s" refers to different objects '
                                   'in different namespaces used for resolving. '
                                   'Will use the object from the %s namespace '
                                   'with the value %r') %
                                  (identifier, matches[0][0],
                                   first_obj), matches[1:])
                    
        # use the first match (according to resolution order)
        resolved = matches[0][1]

        # Remove units
        if strip_units and isinstance(resolved, Quantity):
            if resolved.ndim == 0:
                resolved = float(resolved)
            else:
                resolved = np.asarray(resolved)

        # Use standard Python types if possible
        if not isinstance(resolved, np.ndarray) and hasattr(resolved, 'dtype'):
            numpy_type = resolved.dtype
            if np.can_cast(numpy_type, np.int_):
                resolved = int(resolved)
            elif np.can_cast(numpy_type, np.float_):
                resolved = float(resolved)
            elif np.can_cast(numpy_type, np.complex_):
                resolved = complex(resolved)

        return resolved
Example #9
0
 def can_cast1(args, ty_ins):
     for i in six.moves.range(nin):
         if args[i].const is None:
             if not numpy.can_cast(args[i].ty, ty_ins[i]):
                 return False
         else:
             if not numpy.can_cast(args[i].const, ty_ins[i]):
                 return False
     return True
Example #10
0
def gentle_asarray(a, dtype):
    """
    Performs an asarray that doesn't cause a copy if the byteorder is
    different.  It also ignores column name differences -- the
    resulting array will have the column names from the given dtype.
    """
    out_dtype = np.dtype(dtype)
    if isinstance(a, np.ndarray):
        in_dtype = a.dtype
        # Non-table array
        if in_dtype.fields is None and out_dtype.fields is None:
            if np.can_cast(in_dtype, out_dtype, 'equiv'):
                return a
            else:
                return np.asanyarray(a, dtype=out_dtype)
        elif in_dtype.fields is not None and out_dtype.fields is not None:
            if in_dtype == out_dtype:
                return a
            in_names = {n.lower() for n in in_dtype.names}
            out_names = {n.lower() for n in out_dtype.names}
            if in_names == out_names:
                # Change the dtype name to match the fits record names
                # as the mismatch causes case insensitive access to fail
                out_dtype.names = in_dtype.names
            else:
                raise ValueError(
                    "Column names don't match schema. "
                    "Schema has {0}. Data has {1}".format(
                        str(out_names.difference(in_names)),
                        str(in_names.difference(out_names))))

            new_dtype = []
            for i in range(len(out_dtype.fields)):
                in_type = in_dtype[i]
                out_type = out_dtype[i]
                if in_type.subdtype is None:
                    type_str = in_type.str
                else:
                    type_str = in_type.subdtype[0].str
                if np.can_cast(in_type, out_type, 'equiv'):
                    new_dtype.append(
                        (out_dtype.names[i],
                         type_str,
                         in_type.shape))
                else:
                    return np.asanyarray(a, dtype=out_dtype)
            return a.view(dtype=np.dtype(new_dtype))
        else:
            return np.asanyarray(a, dtype=out_dtype)
    else:
        try:
            a = np.asarray(a, dtype=out_dtype)
        except Exception:
            raise ValueError("Can't convert {0!s} to ndarray".format(type(a)))
        return a
Example #11
0
 def analyze(aDataObject, bDataObject,
             epsilonValue=0.0, epsilonPercent=None):
     """
     analyze the differences between the two data sets
     updates the two data objects with additional masks
     and returns data object containing diff data and masks
     """
     shape = aDataObject.data.shape
     assert(bDataObject.data.shape == shape)
     assert(np.can_cast(aDataObject.data.dtype, bDataObject.data.dtype) or
            np.can_cast(bDataObject.data.dtype, aDataObject.data.dtype))
     
     # do some basic analysis on the individual data sets
     aDataObject.self_analysis()
     bDataObject.self_analysis()
     
     # where is the shared valid data?
     valid_in_both  = aDataObject.masks.valid_mask  & bDataObject.masks.valid_mask
     ignore_in_both = aDataObject.masks.ignore_mask | bDataObject.masks.ignore_mask
     
     # get our shared data type and fill value
     sharedType, fill_data_value = DiffInfoObject._get_shared_type_and_fill_value(aDataObject.data,
                                                                                  bDataObject.data,
                                                                                  aDataObject.select_fill_value(),
                                                                                  bDataObject.select_fill_value())
     
     # we can't continue if we don't have a fill value
     assert(fill_data_value is not None)
     
     # construct our diff'ed data set
     raw_diff = np.zeros(shape, dtype=sharedType)
     raw_diff[~valid_in_both] = fill_data_value # throw away invalid data
     # compute difference, using shared type in computation
     raw_diff[valid_in_both] = bDataObject.data[valid_in_both].astype(sharedType) -  \
                               aDataObject.data[valid_in_both].astype(sharedType)
     
     # the valid data which is too different between the two sets according to the given epsilon
     outside_epsilon_mask = np.zeros(shape, dtype=np.bool)
     if (epsilonValue   is not None) :
         outside_epsilon_mask |= (abs(raw_diff) > epsilonValue) & valid_in_both
     if (epsilonPercent is not None) :
         outside_epsilon_mask |= (abs(raw_diff) > abs(aDataObject.data * (float(epsilonPercent) / 100.0))) & valid_in_both
     
     # mismatch points = mismatched nans, mismatched missing-values, differences that are too large 
     mismatch_pt_mask = ( (aDataObject.masks.non_finite_mask ^ bDataObject.masks.non_finite_mask) |
                          (aDataObject.masks.missing_mask    ^ bDataObject.masks.missing_mask)    |
                          outside_epsilon_mask )
     
     # make our diff data object
     diff_data_object = DataObject(raw_diff, fillValue=fill_data_value)
     diff_data_object.masks = DiffMaskSetObject(ignore_in_both, valid_in_both,
                                                mismatch_pt_mask, outside_epsilon_mask)
     
     return diff_data_object
Example #12
0
File: util.py Project: sosey/jwst
def gentle_asarray(a, dtype):
    """
    Performs an asarray that doesn't cause a copy if the byteorder is
    different.  It also ignores column name differences -- the
    resulting array will have the column names from the given dtype.
    """
    out_dtype = np.dtype(dtype)
    if isinstance(a, np.ndarray):
        in_dtype = a.dtype
        # Non-table array
        if in_dtype.fields is None and out_dtype.fields is None:
            if np.can_cast(in_dtype, out_dtype, 'equiv'):
                return a
            else:
                return np.asanyarray(a, dtype=out_dtype)
        elif in_dtype.fields is not None and out_dtype.fields is not None:
            if in_dtype == out_dtype:
                return a
            if len(in_dtype) != len(out_dtype):
                raise ValueError(
                    "Wrong number of columns.  Expected {0}, got {1}".format(
                        len(out_dtype), len(in_dtype)))
            new_dtype = []
            # Change the dtype name to match the fits record names
            # as the mismatch causes case insensitive access to fail
            if hasattr(in_dtype, 'names') and hasattr(out_dtype, 'names'):
                out_dtype.names = in_dtype.names
            for i in range(len(out_dtype.fields)):
                in_type = in_dtype[i]
                out_type = out_dtype[i]
                if in_type.subdtype is None:
                    type_str = in_type.str
                else:
                    type_str = in_type.subdtype[0].str
                if np.can_cast(in_type, out_type, 'equiv'):
                    new_dtype.append(
                        (out_dtype.names[i],
                         type_str,
                         in_type.shape))
                else:
                    return np.asanyarray(a, dtype=out_dtype)
            return a.view(dtype=np.dtype(new_dtype))
        else:
            return np.asanyarray(a, dtype=out_dtype)
    else:
        try:
            a = np.asarray(a, dtype=out_dtype)
        except:
            raise ValueError("Can't convert {0!s} to ndarray".format(type(a)))
        return a
Example #13
0
def read_raster(raster, masked=True, driver=None):
    src = rasterio.open(raster, driver=driver)
    if src.count > 1:
        src.close()
        raise NotImplementedError('Cannot load a multiband layer')
    if src.crs.is_valid:
        proj = parse_projection(src.crs)
    else:
        proj = None
    if masked:
        _raster = src.read(1, masked=masked)
        # return _raster
        if isinstance(src.transform, Affine):
            transform = src.transform
        else:
            transform = src.affine  # for compatibility with rasterio 0.36
        rgrid = RectifiedGrid(_raster,
                              proj,
                              transform,
                              mask=_raster.mask)
    else:
        rgrid = RectifiedGrid(src.read(1),
                              proj,
                              transform,
                              mask=np.ma.nomask)
    src.close()
    # check and fix fill_value dtype
    if not np.can_cast(rgrid.fill_value, rgrid.dtype, casting='safe'):
        fill_value = guess_fill_value(rgrid)
        rgrid.set_fill_value(fill_value)
        logger.warning("read_raster: the fill_value has been changed to {}".format(fill_value))

    return rgrid
def load_data():
    # Read file content
    training_file_content = pd.read_csv(TRAINING_FILE_PATH)
    testing_file_content = pd.read_csv(TESTING_FILE_PATH)
    combined_file_content = pd.concat([training_file_content, testing_file_content])

    # Manipulate file content
    X = combined_file_content.drop([ID_COLUMN_NAME, LABEL_COLUMN_NAME], axis=1).as_matrix()
    categorical_features_mask_list = []
    for column_vector in X.T:
        valid_elements_mask = np.logical_not(pd.isnull(column_vector))
        if np.can_cast(type(column_vector[valid_elements_mask][0]), np.float):
            categorical_features_mask_list.append(False)
            min_value = np.min(column_vector[valid_elements_mask])
            column_vector[np.logical_not(valid_elements_mask)] = min_value - 1
        else:
            categorical_features_mask_list.append(True)
            column_vector[np.logical_not(valid_elements_mask)] = "Missing"
            column_vector[:] = perform_categorization(column_vector)
    encoder = OneHotEncoder(categorical_features=categorical_features_mask_list)
    X = encoder.fit_transform(X).toarray()

    # Separate the data set
    Y = combined_file_content[LABEL_COLUMN_NAME].as_matrix()
    ID = combined_file_content[ID_COLUMN_NAME].as_matrix()
    test_data_mask = pd.isnull(Y)
    X_train = X[np.logical_not(test_data_mask)]
    Y_train = Y[np.logical_not(test_data_mask)]
    X_test = X[test_data_mask]
    ID_test = ID[test_data_mask]

    return X_train, Y_train, X_test, ID_test
Example #15
0
    def __init__(self, element_strategy, shape, dtype, fill, unique):
        self.shape = tuple(shape)
        self.fill = fill
        check_argument(shape,
                       u'Array shape must have at least one dimension, '
                       u'provided shape was {}', shape)
        check_argument(all(isinstance(s, int) for s in shape),
                       u'Array shape must be integer in each dimension, '
                       u'provided shape was {}', shape)
        self.array_size = int(np.prod(shape))
        self.dtype = dtype
        self.element_strategy = element_strategy
        self.unique = unique

        # Used by self.insert_element to check that the value can be stored
        # in the array without e.g. overflowing.  See issue #1385.
        if dtype.kind in (u'i', u'u'):
            self.check_cast = lambda x: np.can_cast(x, self.dtype, 'safe')
        elif dtype.kind == u'f' and dtype.itemsize == 2:
            max_f2 = (2. - 2 ** -10) * 2 ** 15
            self.check_cast = lambda x: \
                (not np.isfinite(x)) or (-max_f2 <= x <= max_f2)
        elif dtype.kind == u'f' and dtype.itemsize == 4:
            max_f4 = (2. - 2 ** -23) * 2 ** 127
            self.check_cast = lambda x: \
                (not np.isfinite(x)) or (-max_f4 <= x <= max_f4)
        else:
            self.check_cast = lambda x: True
Example #16
0
    def set_data(self, A):
        """
        Set the image array

        ACCEPTS: numpy/PIL Image A
        """
        # check if data is PIL Image without importing Image
        if hasattr(A, 'getpixel'):
            self._A = pil_to_array(A)
        else:
            self._A = cbook.safe_masked_invalid(A)

        if (self._A.dtype != np.uint8 and
                not np.can_cast(self._A.dtype, np.float)):
            raise TypeError("Image data can not convert to float")

        if (self._A.ndim not in (2, 3) or
                (self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
            raise TypeError("Invalid dimensions for image data")

        self._imcache = None
        self._rgbacache = None
        self._oldxslice = None
        self._oldyslice = None
        self.stale = True
Example #17
0
def copyto(dst, src, casting='same_kind', where=None):
    """Copies values from one array to another with broadcasting.

    This function can be called for arrays on different devices. In this case,
    casting, ``where``, and broadcasting is not supported, and an exception is
    raised if these are used.

    Args:
        dst (cupy.ndarray): Target array.
        src (cupy.ndarray): Source array.
        casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
        where (cupy.ndarray of bool): If specified, this array acts as a mask,
            and an element is copied only if the corresponding element of
            ``where``` is True.

    .. seealso:: :func:`numpy.copyto`

    """
    if not numpy.can_cast(src.dtype, dst.dtype, casting):
        raise TypeError('Cannot cast %s to %s in %s casting mode' %
                        (src.dtype, dst.dtype, casting))
    if dst.size == 0:
        return

    if where is None:
        if _can_memcpy(dst, src):
            dst.data.copy_from(src.data, src.nbytes)
        else:
            elementwise.copy(src, dst)
    else:
        elementwise.copy_where(src, where, dst)
Example #18
0
	def Op(self,opstr,indx,J,dtype,*args):

		row = _np.array(self._basis,dtype=self._dtype)
		col = _np.array(self._basis,dtype=self._dtype)
		ME = _np.ones((self._Ns,),dtype=dtype)
		if len(opstr) != len(indx):
			raise ValueError('length of opstr does not match length of indx')
		if not _np.can_cast(J,_np.dtype(dtype)):
			raise TypeError("can't cast J to proper dtype")


		for o in opstr[::-1]:
			if o == "I":
				continue
			elif o == "n":
				ME *= dtype(_np.abs(col))
			elif o == "+":
				col += 1
				ME *= _np.sqrt(dtype(_np.abs(col)))
			elif o == "-":
				ME *= _np.sqrt(dtype(_np.abs(col)))
				col -= 1
			else:
				raise Exception("operator symbol {0} not recognized".format(o))

		mask = ( col < 0)
		mask += (col > (self._Ns))
		ME[mask] *= 0.0
		
		if J != 1.0: 
			ME *= J

		return ME,row,col		
Example #19
0
    def _pad_and_fix_dtypes(self, cols, column_dtypes):
        # Pad out Nones with empty arrays of appropriate dtypes
        rtn = {}
        index = cols[INDEX]
        full_length = len(index)
        for k, v in iteritems(cols):
            if k != INDEX and k != 'SYMBOL':
                col_len = len(v)
                if col_len < full_length:
                    v = ([None, ] * (full_length - col_len)) + v
                    assert len(v) == full_length
                for i, arr in enumerate(v):
                    if arr is None:
                        #  Replace Nones with appropriate-length empty arrays
                        v[i] = self._empty(len(index[i]), column_dtypes.get(k))
                    else:
                        # Promote to appropriate dtype only if we can safely cast all the values
                        # This avoids the case with strings where None is cast as 'None'.
                        # Casting the object to a string is not worthwhile anyway as Pandas changes the
                        # dtype back to objectS
                        if (i == 0 or v[i].dtype != v[i - 1].dtype) and np.can_cast(v[i].dtype, column_dtypes[k],
                                                                                    casting='safe'):
                            v[i] = v[i].astype(column_dtypes[k], casting='safe')

            rtn[k] = v
        return rtn
Example #20
0
    def _require_dataset_nosync(self, name, shape, dtype=None, exact=False,
                                **kwargs):

        path = self._item_path(name)

        if contains_array(self._store, path):

            # array already exists at path, validate that it is the right shape and type

            synchronizer = kwargs.get('synchronizer', self._synchronizer)
            cache_metadata = kwargs.get('cache_metadata', True)
            cache_attrs = kwargs.get('cache_attrs', self.attrs.cache)
            a = Array(self._store, path=path, read_only=self._read_only,
                      chunk_store=self._chunk_store, synchronizer=synchronizer,
                      cache_metadata=cache_metadata, cache_attrs=cache_attrs)
            shape = normalize_shape(shape)
            if shape != a.shape:
                raise TypeError('shape do not match existing array; expected {}, got {}'
                                .format(a.shape, shape))
            dtype = np.dtype(dtype)
            if exact:
                if dtype != a.dtype:
                    raise TypeError('dtypes do not match exactly; expected {}, got {}'
                                    .format(a.dtype, dtype))
            else:
                if not np.can_cast(dtype, a.dtype):
                    raise TypeError('dtypes ({}, {}) cannot be safely cast'
                                    .format(dtype, a.dtype))
            return a

        else:
            return self._create_dataset_nosync(name, shape=shape, dtype=dtype,
                                               **kwargs)
Example #21
0
    def test_simply_typed_space_validate(space, batch_dtype, is_numeric):
        """
        Creates a batch of batch_dtype, and sees if space validates it.
        """
        assert isinstance(space, SimplyTypedSpace), \
            "%s is not a SimplyTypedSpace" % type(space)

        batch_sizes = (1, 3)

        if not is_numeric and isinstance(space, VectorSpace) and space.sparse:
            batch_sizes = (None, )

        for batch_size in batch_sizes:
            if is_numeric:
                batch = space.get_origin_batch(dtype=batch_dtype,
                                               batch_size=batch_size)
            else:
                batch = space.make_theano_batch(dtype=batch_dtype,
                                                batch_size=batch_size,
                                                name="test batch to validate")

            # Expect an error if space.dtype is not None and batch can't cast
            # to it.
            if space.dtype is not None and \
               not np.can_cast(batch.dtype, space.dtype):
                np.testing.assert_raises(TypeError,
                                         space._validate,
                                         (is_numeric, batch))
            else:
                # Otherwise, don't expect an error.
                space._validate(is_numeric, batch)
def test_imshow_float128():
    fig, ax = plt.subplots()
    ax.imshow(np.zeros((3, 3), dtype=np.longdouble))
    with (ExitStack() if np.can_cast(np.longdouble, np.float64, "equiv")
          else pytest.warns(UserWarning)):
        # Ensure that drawing doesn't cause crash.
        fig.canvas.draw()
Example #23
0
def imageArrayToStruct(imgArray, sparkMode=None):
    """
    Create a row representation of an image from an image array and (optional) imageType.

    to_image_udf = udf(arrayToImageRow, imageSchema)
    df.withColumn("output_img", to_image_udf(df["np_arr_col"])

    :param imgArray: ndarray, image data.
    :param sparkMode: spark mode, type information for the image, will be inferred from array if
        the mode is not provide. See SparkMode for valid modes.
    :return: Row, image as a DataFrame Row.
    """
    # Sometimes tensors have a leading "batch-size" dimension. Assume to be 1 if it exists.
    if len(imgArray.shape) == 4:
        if imgArray.shape[0] != 1:
            raise ValueError("The first dimension of a 4-d image array is expected to be 1.")
        imgArray = imgArray.reshape(imgArray.shape[1:])

    if sparkMode is None:
        sparkMode = _arrayToSparkMode(imgArray)
    imageType = sparkModeLookup[sparkMode]

    height, width, nChannels = imgArray.shape
    if imageType.nChannels != nChannels:
        msg = "Image of type {} should have {} channels, but array has {} channels."
        raise ValueError(msg.format(sparkMode, imageType.nChannels, nChannels))

    # Convert the array to match the image type.
    if not np.can_cast(imgArray, imageType.dtype, 'same_kind'):
        msg = "Array of type {} cannot safely be cast to image type {}."
        raise ValueError(msg.format(imgArray.dtype, imageType.dtype))
    imgArray = np.array(imgArray, dtype=imageType.dtype, copy=False)

    data = bytearray(imgArray.tobytes())
    return Row(mode=sparkMode, height=height, width=width, nChannels=nChannels, data=data)
Example #24
0
def map_result_construction(signal,
                            inplace,
                            result,
                            ragged,
                            sig_shape=None,
                            lazy=False):
    from hyperspy.signals import (BaseSignal, LazySignal)
    res = None
    if inplace:
        sig = signal
    else:
        res = sig = signal._deepcopy_with_new_data()
    if ragged:
        sig.data = result
        sig.axes_manager.remove(sig.axes_manager.signal_axes)
        sig.__class__ = LazySignal if lazy else BaseSignal
        sig.__init__(**sig._to_dictionary(add_models=True))

    else:
        if not sig._lazy and sig.data.shape == result.shape and np.can_cast(
                result.dtype, sig.data.dtype):
            sig.data[:] = result
        else:
            sig.data = result

        # remove if too many axes
        sig.axes_manager.remove(sig.axes_manager.signal_axes[len(sig_shape):])
        # add additional required axes
        for ind in range(
                len(sig_shape) - sig.axes_manager.signal_dimension, 0, -1):
            sig.axes_manager._append_axis(sig_shape[-ind], navigate=False)
    sig.get_dimensions_from_data()
    return res
Example #25
0
def upcast(*args):
    """Returns the nearest supported sparse dtype for the
    combination of one or more types.

    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype

    Examples
    --------

    >>> upcast('int32')
    <type 'numpy.int32'>
    >>> upcast('bool')
    <type 'numpy.int8'>
    >>> upcast('int32','float32')
    <type 'numpy.float64'>
    >>> upcast('bool',complex,float)
    <type 'numpy.complex128'>

    """
    sample = np.array([0],dtype=args[0])
    for t in args[1:]:
        sample = sample + np.array([0],dtype=t)

    upcast = sample.dtype

    for t in supported_dtypes:
        if np.can_cast(sample.dtype,t):
            return t

    raise TypeError,'no supported conversion for types: %s' % args
Example #26
0
File: sputils.py Project: 87/scipy
def upcast(*args):
    """Returns the nearest supported sparse dtype for the
    combination of one or more types.

    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype

    Examples
    --------

    >>> upcast('int32')
    <type 'numpy.int32'>
    >>> upcast('bool')
    <type 'numpy.int8'>
    >>> upcast('int32','float32')
    <type 'numpy.float64'>
    >>> upcast('bool',complex,float)
    <type 'numpy.complex128'>

    """

    t = _upcast_memo.get(hash(args))
    if t is not None:
        return t

    upcast = np.find_common_type(args, [])

    for t in supported_dtypes:
        if np.can_cast(upcast, t):
            _upcast_memo[hash(args)] = t
            return t

    raise TypeError('no supported conversion for types: %s' % args)
Example #27
0
    def _require_dataset_nosync(self, name, shape, dtype=None, exact=False,
                                **kwargs):

        path = self._item_path(name)

        if contains_array(self._store, path):
            synchronizer = kwargs.get('synchronizer', self._synchronizer)
            cache_metadata = kwargs.get('cache_metadata', True)
            a = Array(self._store, path=path, read_only=self._read_only,
                      chunk_store=self._chunk_store,
                      synchronizer=synchronizer, cache_metadata=cache_metadata)
            shape = normalize_shape(shape)
            if shape != a.shape:
                raise TypeError('shapes do not match')
            dtype = np.dtype(dtype)
            if exact:
                if dtype != a.dtype:
                    raise TypeError('dtypes do not match exactly')
            else:
                if not np.can_cast(dtype, a.dtype):
                    raise TypeError('dtypes cannot be safely cast')
            return a

        else:
            return self._create_dataset_nosync(name, shape=shape, dtype=dtype,
                                               **kwargs)
Example #28
0
    def require_dataset(self, name, shape, dtype, exact=False, **kwds):
        """ Open a dataset, creating it if it doesn't exist.

        If keyword "exact" is False (default), an existing dataset must have
        the same shape and a conversion-compatible dtype to be returned.  If
        True, the shape and dtype must match exactly.

        Other dataset keywords (see create_dataset) may be provided, but are
        only used if a new dataset is to be created.

        Raises TypeError if an incompatible object already exists, or if the
        shape or dtype don't match according to the above rules.
        """

        if not name in self:
            return self.create_dataset(name, *(shape, dtype), **kwds)

        dset = self[name]
        if not isinstance(dset, dataset.Dataset):
            raise TypeError("Incompatible object (%s) already exists" % dset.__class__.__name__)

        if not shape == dset.shape:
            raise TypeError("Shapes do not match (existing %s vs new %s)" % (dset.shape, shape))

        if exact:
            if not dtype == dset.dtype:
                raise TypeError("Datatypes do not exactly match (existing %s vs new %s)" % (dset.dtype, dtype))
        elif not numpy.can_cast(dtype, dset.dtype):
            raise TypeError("Datatypes cannot be safely cast (existing %s vs new %s)" % (dset.dtype, dtype))

        return dset
Example #29
0
def _get_c_data_type(value):
    """Return the C data type code corresponding to the specified variable data
    value.

    """
    if isinstance(value, (numpy.ndarray, numpy.generic)):
        # For NumPy ndarrays and scalars, determine the smallest HARP C data type that can safely contain elements of
        # the ndarray or scalar dtype.
        if numpy.issubdtype(value.dtype, numpy.object_):
            # NumPy object arrays are only used to contain variable length strings or byte strings.
            if _all(lambda element: isinstance(element, str), value.flat):
                return _lib.harp_type_string
            elif _all(lambda element: isinstance(element, bytes), value.flat):
                return _lib.harp_type_string
            else:
                raise UnsupportedTypeError("elements of a NumPy object array must be all str or all bytes")
        elif numpy.issubdtype(value.dtype, numpy.str_) or numpy.issubdtype(value.dtype, numpy.bytes_):
            return _lib.harp_type_string
        elif numpy.can_cast(value.dtype, numpy.int8):
            return _lib.harp_type_int8
        elif numpy.can_cast(value.dtype, numpy.int16):
            return _lib.harp_type_int16
        elif numpy.can_cast(value.dtype, numpy.int32):
            return _lib.harp_type_int32
        elif numpy.can_cast(value.dtype, numpy.float32):
            return _lib.harp_type_float
        elif numpy.can_cast(value.dtype, numpy.float64):
            return _lib.harp_type_double
        else:
            raise UnsupportedTypeError("unsupported NumPy dtype '%s'" % value.dtype)
    elif numpy.isscalar(value):
        if isinstance(value, (str, bytes)):
            return _lib.harp_type_string
        elif numpy.can_cast(value, numpy.int8):
            return _lib.harp_type_int8
        elif numpy.can_cast(value, numpy.int16):
            return _lib.harp_type_int16
        elif numpy.can_cast(value, numpy.int32):
            return _lib.harp_type_int32
        elif numpy.can_cast(value, numpy.float32):
            return _lib.harp_type_float
        elif numpy.can_cast(value, numpy.float64):
            return _lib.harp_type_double
        else:
            raise UnsupportedTypeError("unsupported type %r" % value.__class__.__name__)
    else:
        raise UnsupportedTypeError("unsupported type %r" % value.__class__.__name__)
Example #30
0
def _guess_routine_from_in_types(ops, in_types):
    for op in ops:
        for dst, src in six_zip(op[0], in_types):
            if not numpy.can_cast(src, dst):
                break
        else:
            return op
    return None
Example #31
0
def test_state(env, num_cycles):
    env.reset()
    state_0 = env.state()
    for agent in env.agent_iter(env.num_agents * num_cycles):
        observation, reward, done, info = env.last(observe=False)
        if done:
            action = None
        else:
            action = env.action_spaces[agent].sample()

        env.step(action)
        new_state = env.state()
        assert env.state_space.contains(new_state), "Environment's state is outside of it's state space"
        if isinstance(new_state, np.ndarray):
            if np.isinf(new_state).any():
                warnings.warn("State contains infinity (np.inf) or negative infinity (-np.inf)")
            if np.isnan(new_state).any():
                warnings.warn("State contains NaNs")
            if len(new_state.shape) > 3:
                warnings.warn("State has more than 3 dimensions")
            if new_state.shape == (0,):
                assert False, "State can not be an empty array"
            if new_state.shape == (1,):
                warnings.warn("State is a single number")
            if not isinstance(new_state, state_0.__class__):
                warnings.warn("State between Observations are different classes")
            if (new_state.shape != state_0.shape) and (len(new_state.shape) == len(state_0.shape)):
                warnings.warn("States are different shapes")
            if len(new_state.shape) != len(state_0.shape):
                warnings.warn("States have different number of dimensions")
            if not np.can_cast(new_state.dtype, np.dtype("float64")):
                warnings.warn("State numpy array is not a numeric dtype")
            if np.array_equal(new_state, np.zeros(new_state.shape)):
                warnings.warn("State numpy array is all zeros.")
            if not np.all(new_state >= 0) and ((len(new_state.shape) == 2) or (len(new_state.shape) == 3 and new_state.shape[2] == 1) or (len(new_state.shape) == 3 and new_state.shape[2] == 3)):
                warnings.warn("The state contains negative numbers and is in the shape of a graphical observation. This might be a bad thing.")
        else:
            warnings.warn("State is not NumPy array")
Example #32
0
    def __init__(self, element_strategy, shape, dtype, fill, unique):
        self.shape = tuple(shape)
        self.fill = fill
        assert shape, "Zero-dimensional array shape is special-cased in arrays()"
        check_argument(
            all(isinstance(s, integer_types) for s in shape),
            "Array shape must be integer in each dimension, provided shape was {}",
            shape,
        )
        self.array_size = int(np.prod(shape))
        self.dtype = dtype
        self.element_strategy = element_strategy
        self.unique = unique

        # Used by self.insert_element to check that the value can be stored
        # in the array without e.g. overflowing.  See issues #1385 and #1591.
        if dtype.kind in (u"i", u"u"):
            self.check_cast = lambda x: np.can_cast(x, self.dtype, "safe")
        elif dtype.kind == u"f" and dtype.itemsize == 2:
            max_f2 = (2.0 - 2 ** -10) * 2 ** 15
            self.check_cast = lambda x: (not np.isfinite(x)) or (-max_f2 <= x <= max_f2)
        elif dtype.kind == u"f" and dtype.itemsize == 4:
            max_f4 = (2.0 - 2 ** -23) * 2 ** 127
            self.check_cast = lambda x: (not np.isfinite(x)) or (-max_f4 <= x <= max_f4)
        elif dtype.kind == u"c" and dtype.itemsize == 8:
            max_f4 = (2.0 - 2 ** -23) * 2 ** 127
            self.check_cast = lambda x: (not np.isfinite(x)) or (
                -max_f4 <= x.real <= max_f4 and -max_f4 <= x.imag <= max_f4
            )
        elif dtype.kind == u"U":
            length = dtype.itemsize // 4
            self.check_cast = lambda x: len(x) <= length and u"\0" not in x[length:]
        elif dtype.kind in (u"S", u"a"):
            self.check_cast = (
                lambda x: len(x) <= dtype.itemsize and b"\0" not in x[dtype.itemsize :]
            )
        else:
            self.check_cast = lambda x: True
Example #33
0
        def h(*tensors, **kw):
            usr_dtype = np.dtype(kw.pop('dtype')) if 'dtype' in kw else None
            args = [
                make_arg(t)
                if hasattr(t, 'ndim') and hasattr(t, 'dtype') else t
                for t in tensors
            ]
            if reverse:
                args = args[::-1]
            np_kw = dict((k, make_arg(v)
                          if hasattr(v, 'ndim') and hasattr(v, 'dtype') else v)
                         for k, v in six.iteritems(kw) if k != 'out')
            try:
                with np.errstate(all='ignore'):
                    dtype = np_func(*args, **np_kw).dtype
            except:  # noqa: E722
                dtype = None

            if usr_dtype and dtype:
                if check and not np.can_cast(dtype, usr_dtype):
                    raise TypeError(
                        ' No loop matching the specified signature '
                        'and casting was found for ufunc %s' % np_func)
                kw['dtype'] = usr_dtype
            else:
                kw['dtype'] = dtype

            ret = func(*tensors, **kw)
            if ret is NotImplemented:
                reverse_func = getattr(inspect.getmodule(func), 'r{0}'.format(func.__name__), None) \
                    if not reverse else None
                if reverse_func is not None:
                    ret = reverse_func(*tensors[::-1], **kw)
                if ret is NotImplemented:
                    raise TypeError(
                        "unsupported operand type(s) for {0}: '{1}' and '{2}".
                        format(func.__name__, *[type(t) for t in tensors]))
            return ret
Example #34
0
    def require_dataset(self, name, shape, dtype, exact=False, **kwds):
        """ Open a dataset, creating it if it doesn't exist.

        If keyword "exact" is False (default), an existing dataset must have
        the same shape and a conversion-compatible dtype to be returned.  If
        True, the shape and dtype must match exactly.

        Other dataset keywords (see create_dataset) may be provided, but are
        only used if a new dataset is to be created.

        Raises TypeError if an incompatible object already exists, or if the
        shape or dtype don't match according to the above rules.
        """

        with phil:
            if not name in self:
                return self.create_dataset(name, *(shape, dtype), **kwds)

            dset = self[name]
            if not isinstance(dset, Dataset):
                raise TypeError("Incompatible object (%s) already exists" %
                                dset.__class__.__name__)

            if not shape == dset.shape:
                raise TypeError("Shapes do not match (existing %s vs new %s)" %
                                (dset.shape, shape))

            if exact:
                if not dtype == dset.dtype:
                    raise TypeError(
                        "Datatypes do not exactly match (existing %s vs new %s)"
                        % (dset.dtype, dtype))
            elif not numpy.can_cast(dtype, dset.dtype):
                raise TypeError(
                    "Datatypes cannot be safely cast (existing %s vs new %s)" %
                    (dset.dtype, dtype))

            return dset
Example #35
0
    def set_data(self, A):
        """
        Set the image array

        ACCEPTS: numpy/PIL Image A
        """
        self._full_res = A
        self._A = A

        if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype,
                                                         np.float):
            raise TypeError("Image data can not convert to float")

        if (self._A.ndim not in (2, 3) or
            (self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))
                ):
            raise TypeError("Invalid dimensions for image data")

        self._imcache = None
        self._rgbacache = None
        self._oldxslice = None
        self._oldyslice = None
        self._sx, self._sy = None, None
Example #36
0
 def test_reader(self):
     """Check codec readers."""
     for obj, m in self.gen_messages():
         mlen = len(m.packets)
         msg = Message()
         i = 0
         for packet in msg.reader():
             em = str(i) + ': ' + repr(obj)
             self.assertLess(i, mlen, 'reader stopped late on ' + em)
             self.assertEqual(packet.dtype.itemsize,
                              m.packets[i].dtype.itemsize,
                              'reader wrong size on ' + em)
             # np.copyto(packet, m.packets[i], casting='safe')
             # following two lines work back to numpy 1.5:
             self.assertTrue(
                 np.can_cast(m.packets[i].dtype,
                             packet.dtype,
                             casting='safe'), 'reader wrong type on ' + em)
             packet[...] = m.packets[i]
             i += 1
         self.assertEqual(
             i, mlen,
             'reader stopped early on ' + str(i) + ': ' + repr(obj))
Example #37
0
 def __iadd__(self, other):
     if is_scalar(other):
         if not _isclose(other, 0.0):
             self._constant += other
         return self
     if not isinstance(other, FermionOperator2nd):
         raise NotImplementedError
     if not self.hilbert == other.hilbert:
         raise ValueError(
             f"Can only add identical hilbert spaces (got A+B, A={self.hilbert}, "
             "B={other.hilbert})")
     if not np.can_cast(_dtype(other), self.dtype, casting="same_kind"):
         raise ValueError(
             f"Cannot add inplace operator with dtype {type(other)} "
             f"to operator with dtype {self.dtype}")
     for t, w in other._operators.items():
         if t in self._operators.keys():
             self._operators[t] += w
         else:
             self._operators[t] = w
     self._constant += other._constant
     self._reset_caches()
     return self
Example #38
0
    def set_data(self, A):
        """
        Set the image array

        ACCEPTS: numpy/PIL Image A
        """
        # check if data is PIL Image without importing Image
        if hasattr(A,'getpixel'):
            self._A = pil_to_array(A)
        else:
            self._A = cbook.safe_masked_invalid(A)

        if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float):
            raise TypeError("Image data can not convert to float")

        if (self._A.ndim not in (2, 3) or
            (self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
            raise TypeError("Invalid dimensions for image data")

        self._imcache =None
        self._rgbacache = None
        self._oldxslice = None
        self._oldyslice = None
Example #39
0
def _arg_wlen_as_expected(value):
    """Ensure argument `wlen` is of type `np.intp` and larger than 1.

    Used in `peak_prominences` and `peak_widths`.

    Returns
    -------
    value : np.intp
        The original `value` rounded up to an integer or -1 if `value` was
        None.
    """
    if value is None:
        # _peak_prominences expects an intp; -1 signals that no value was
        # supplied by the user
        value = -1
    elif 1 < value:
        # Round up to a positive integer
        if not np.can_cast(value, np.intp, "safe"):
            value = math.ceil(value)
        value = np.intp(value)
    else:
        raise ValueError("`wlen` must be larger than 1, was {}".format(value))
    return value
Example #40
0
    def set_dtype(self, value, check: bool = True):
        """Change data type of the bin contents.

        Allowed conversions:
        - from integral to float types
        - between the same category of type (float/integer)
        - from float types to integer if weights are trivial

        Parameters
        ----------
        value: np.dtype or something convertible to it.
        check: bool
            If True (default), all values are checked against the limits
        """
        # TODO? Deal with unsigned types
        value, type_info = self._eval_dtype(value)
        if value == self._dtype:
            return

        if self.dtype is None or np.can_cast(self.dtype, value):
            pass  # Ok
        elif check:
            if np.issubdtype(value, np.integer):
                if self.dtype.kind == "f":
                    for array in (self._frequencies, self._errors2):
                        if np.any(array % 1.0):
                            raise RuntimeError("Data contain " "non-integer values.")
            for array in (self._frequencies, self._errors2):
                if np.any((array > type_info.max) | (array < type_info.min)):
                    raise RuntimeError(
                        "Data contain values " "outside the specified range."
                    )

        self._dtype = value
        self._frequencies = self._frequencies.astype(value)
        self._errors2 = self._errors2.astype(value)
        self._missed = self._missed.astype(value)
Example #41
0
def upcast(*args):
    """Returns the nearest supported sparse dtype for the
    combination of one or more types.

    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype

    Examples
    --------

    >>> upcast('int32')
    <type 'numpy.int32'>
    >>> upcast('bool')
    <type 'numpy.bool_'>
    >>> upcast('int32','float32')
    <type 'numpy.float64'>
    >>> upcast('bool',complex,float)
    <type 'numpy.complex128'>

    """

    t = _upcast_memo.get(hash(args))
    if t is not None:
        return t

    if np.all([np.issubdtype(np.bool, arg) for arg in args]):
        # numpy 1.5.x compat - it gives int8 for
        # np.find_common_type([np.bool, np.bool)
        upcast = np.bool
    else:
        upcast = np.find_common_type(args, [])

    for t in supported_dtypes:
        if np.can_cast(upcast, t):
            _upcast_memo[hash(args)] = t
            return t

    raise TypeError('no supported conversion for types: %r' % (args, ))
Example #42
0
    def astype(
        self,
        dtype: DTypeLike,
        order: str = "K",
        casting: str = "unsafe",
        subok: bool = True,
        copy: bool = True,
    ) -> "LinearOperator":
        """Cast a linear operator to a different ``dtype``.

        Parameters
        ----------
        dtype:
            Data type to which the linear operator is cast.
        order:
            Memory layout order of the result.
        casting:
            Controls what kind of data casting may occur.
        subok:
            If True, then sub-classes will be passed-through (default).
            False is currently not supported for linear operators.
        copy:
            Whether to return a new linear operator, even if ``dtype`` is the same.
        """
        dtype = np.dtype(dtype)

        if not np.can_cast(self.dtype, dtype, casting=casting):
            raise TypeError(
                f"Cannot cast linear operator from {self.dtype} to {dtype} "
                f"according to the rule {casting}")

        if not subok:
            raise NotImplementedError(
                "Setting `subok` to `False` is not supported for linear operators"
            )

        return self._astype(dtype, order, casting, copy)
Example #43
0
def map_result_construction(signal,
                            inplace,
                            result,
                            ragged,
                            sig_shape=None,
                            lazy=False):
    from hyperspy.signals import BaseSignal
    from hyperspy._lazy_signals import LazySignal
    res = None
    if inplace:
        sig = signal
    else:
        res = sig = signal._deepcopy_with_new_data()

    if ragged:
        sig.data = result
        sig.axes_manager.remove(sig.axes_manager.signal_axes)
        sig.__class__ = LazySignal if lazy else BaseSignal
        sig.__init__(**sig._to_dictionary(add_models=True))
    else:
        if not sig._lazy and sig.data.shape == result.shape and np.can_cast(
                result.dtype, sig.data.dtype):
            sig.data[:] = result
        else:
            sig.data = result

        # remove if too many axes
        sig.axes_manager.remove(sig.axes_manager.signal_axes[len(sig_shape):])
        # add additional required axes
        for ind in range(
                len(sig_shape) - sig.axes_manager.signal_dimension, 0, -1):
            sig.axes_manager._append_axis(sig_shape[-ind], navigate=False)
    if not ragged:
        sig.get_dimensions_from_data()
    if not sig.axes_manager._axes:
        add_scalar_axis(sig, lazy=lazy)
    return res
Example #44
0
	def Op(self,opstr,indx,J,dtype,*args):

		row = _np.array(self._basis,dtype=self._dtype)
		col = _np.array(self._basis,dtype=self._dtype)
		ME = _np.ones((self._Ns,),dtype=dtype)


		if len(opstr) != len(indx):
			raise ValueError('length of opstr does not match length of indx')
		if not _np.can_cast(J,_np.dtype(dtype)):
			raise TypeError("can't cast J to proper dtype")

		for o in opstr[::-1]:
			if o == "I":
				continue
			elif o == "n":
				ME *= dtype(_np.abs(row))
			elif o == "+":
				row += 1
				ME *= _np.sqrt(dtype(_np.abs(row)))
			elif o == "-":
				ME *= _np.sqrt(dtype(_np.abs(row)))
				row -= 1
			else:
				raise Exception("operator symbol {0} not recognized".format(o))

		mask = ( row < 0)
		mask += (row >= (self._Ns))
		row[mask] = col[mask]
		ME[mask] = 0.0

		
		if J != 1.0: 
			ME *= J

		return ME,row,col		
Example #45
0
    def __call__(self, *inputs):
        from ..core import Tensor

        src, dst, where = self._extract_inputs(inputs)

        if not isinstance(dst, Tensor):
            raise TypeError('dst has to be a Tensor')

        self._dtype = dst.dtype
        self._gpu = dst.op.gpu
        self._sparse = dst.issparse()

        if not np.can_cast(src.dtype, dst.dtype, casting=self.casting):
            raise TypeError(
                f'Cannot cast array from {src.dtype!r} to {dst.dtype!r} '
                f'according to the rule {self.casting!s}')

        try:
            broadcast_to(src, dst.shape)
        except ValueError:
            raise ValueError(
                'could not broadcast input array '
                f'from shape {src.shape!r} into shape {dst.shape!r}')
        if where:
            try:
                broadcast_to(where, dst.shape)
            except ValueError:
                raise ValueError(
                    'could not broadcast where mask '
                    f'from shape {src.shape!r} into shape {dst.shape!r}')

        inps = [src, dst]
        if where is not None:
            inps.append(where)
        ret = self.new_tensor(inps, dst.shape, order=dst.order)
        dst.data = ret.data
Example #46
0
 def res(*args, **kwargs):
     mem = get_mem(args)
     var_list = [_normalize_arg(_, mem) for _ in args]
     if 'out' in kwargs:
         var_list.append(_normalize_arg.pop('out'))
     if kwargs:
         raise TypeError('Wrong arguments %s' % kwargs)
     assert nin <= len(var_list) and len(var_list) <= nin + nout
     in_vars = var_list[:nin]
     out_vars = var_list[nin:]
     can_cast = can_cast1 if _should_use_min_scalar(in_vars) else can_cast2
     for ty_ins, ty_outs, op in ufunc._ops:
         ty_ins = [numpy.dtype(_) for _ in ty_ins]
         ty_outs = [numpy.dtype(_) for _ in ty_outs]
         if can_cast(in_vars, ty_ins):
             param_names = (['in%d' % i for i in six.moves.range(nin)] +
                            ['out%d' % i for i in six.moves.range(nout)])
             ret = []
             for i in six.moves.range(nout):
                 if i >= len(out_vars):
                     v = mem.get_fresh(ty_outs[i])
                     out_vars.append(v)
                     ret.append(_FusionRef(v, mem))
                 elif numpy.can_cast(ty_outs[i], out_vars[i].ty,
                                     "same_kind"):
                     v = out_vars[i]
                     ret.append(_FusionRef(v, mem))
                 else:
                     raise TypeError("Cannot cast from %s to %s" %
                                     (ty_outs[i], out_vars[i].ty) +
                                     " with casting rule 'same_kind'")
             mem.set_op(ufunc.name, op, param_names, nin, nout, in_vars,
                        out_vars, ty_ins + ty_outs)
             return ret[0] if len(ret) == 1 else tuple(ret)
     raise TypeError('Invalid type cast in \'{}\': {} -> {}'.format(
         ufunc.name, [_.ty for _ in in_vars], [_.ty for _ in out_vars]))
Example #47
0
    def transform(self, data):
        """
        Subtract dark spectrum (overwrite original data).


        Parameters
        ----------
        data : ndarray
            Data from which dark is subtracted.

        Returns
        -------
        bool
            Returns the success state (True=success)

        """

        if not _np.can_cast(self.dark.dtype, data.dtype):
            err_str1 = 'Cannot transform input data type {}'.format(data.dtype)
            err_str2 = ' with dark type {}'.format(self.dark.dtype)
            raise TypeError(err_str1 + err_str2)

        success = self._calc(data, ret_obj=data)
        return success
Example #48
0
def _binary_check_case4(*args):
    dt = args[0]
    in_args = args[1]
    name = args[2]
    casting = args[3]
    valid = args[4]
    check_cast = args[5]
    check_out = args[6]
    out = args[7]
    if dt.kind in ('u'):
        if in_args[0].dtype.kind in ('b') and in_args[1].dtype.kind not in ('b', 'u'):
            _raise_no_loop_matching(name)
        elif in_args[0].dtype.kind not in ('b', 'u') and in_args[1].dtype.kind in ('b'):
            _raise_no_loop_matching(name)
        elif in_args[0].dtype.kind not in ('b', 'u') and \
                in_args[1].dtype.kind not in ('b', 'u'):
            _raise_no_loop_matching(name)
    if check_cast and not _casting_check_without_msg(dt, in_args, casting):
        _raise_no_loop_matching(name)
    if check_out:
        if check_cast and not numpy.can_cast(dt, out.dtype, casting=casting):
            _raise_no_loop_matching(name)
        elif dt not in (numpy.int8,) and out.dtype.kind in ('u'):
            _casting_check_out(dt, out.dtype, name, casting)
        if out.dtype.kind in ('b') or in_args[0].dtype.kind in ('f', 'c') or \
                in_args[1].dtype.kind in ('f', 'c'):
            _casting_check_out(dt, out.dtype, name, casting)
        if in_args[1].dtype.char == 'L' and out.dtype.kind == 'i' and \
           in_args[0].size != 1 and in_args[1].size != 1:
            raise TypeError("ufunc '{}' output (typecode '{}') could not be "
                            "coerced to provided output parameter (typecode '{}') "
                            "according to the casting rule ''{}''".format(
                                name, in_args[1].dtype.char, out.dtype.char, casting))

    if valid is False or dt.kind in ('b'):
        _raise_no_loop_matching(name)
Example #49
0
    def reindexer(X, fill_value=fill_value):
        if not np.can_cast(fill_value, X.dtype):
            out_dtype = np.promote_types(np.array(fill_value).dtype, X.dtype)
        else:
            out_dtype = X.dtype

        idxmtx = sparse.coo_matrix(
            (np.ones(len(new_pts), dtype=int), (cur_pts, new_pts)),
            shape=(old_size, new_size),
            dtype=out_dtype,
        )
        out = X @ idxmtx

        if fill_value != 0:
            to_fill = new_var.get_indexer(new_var.difference(cur_var))
            if len(to_fill) > 0:
                # More efficient to set columns on csc
                if sparse.issparse(out):
                    out = sparse.csc_matrix(out)
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
                    out[:, to_fill] = fill_value

        return out
Example #50
0
def check(obj, dtype):
    """Pack the given nested structured object according to dtype.

    Details
    -------
    `dtype` is needed to make sure that flattened values in mappings
    are strictly aligned by their keys with the dtype's fields.
    """
    # simplify the object and validate
    kind = dtype.metadata['type']
    if not isinstance(obj, kind):
        raise TypeError(f'`obj` must be `{kind}`. Got `{type(obj)}`.')

    if isinstance(obj, abc.Sequence) and not isinstance(obj, (str, bytes)):
        # though namedtuples have mappable fields, they are nonetheless
        #  immutable tuples with fixed order order of elements
        assert len(obj) == len(dtype.fields)

        # sequence-likes are aligned with dtype's field order
        dtypes = [dt for dt, *_ in dtype.fields.values()]
        return tuple(map(check, obj, dtypes))

    elif isinstance(obj, abc.Mapping):
        assert obj.keys() == dtype.fields.keys()

        # make sure to order the items according to dtype
        return tuple([
            check(obj[k], dt) for k, (dt, *_) in dtype.fields.items()
        ])

    # dtype is unstructured
    if not numpy.can_cast(obj, dtype, casting='safe'):
        raise TypeError(f'`{obj}` cannot be safely cast to `{dtype}`.')

    # return the basic element as is (don't care about actual dtype)
    return obj
Example #51
0
    def position_modules(self, data, out=None, threadpool=None):
        """Implementation for position_modules_fast
        """
        assert data.shape[-3:] == self.geom.expected_data_shape
        if out is None:
            out = self.make_output_array(data.shape[:-3], data.dtype)
        else:
            assert out.shape == data.shape[:-3] + self.size_yx
            if not np.can_cast(data.dtype, out.dtype, casting='safe'):
                raise TypeError("{} cannot be safely cast to {}".format(
                    data.dtype, out.dtype))

        copy_pairs = []
        for i, module in enumerate(self.modules):
            mod_data = data[..., i, :, :]
            tiles_data = self.geom.split_tiles(mod_data)
            for tile, tile_data in zip(module, tiles_data):
                y, x = tile.corner_idx
                h, w = tile.pixel_dims

                copy_pairs.append((out[..., y:y + h,
                                       x:x + w], tile.transform(tile_data)))

        if threadpool is not None:

            def copy_data(pair):
                dst, src = pair
                dst[:] = src

            # concurrent.futures map() is async, so call list() to wait for it
            list(threadpool.map(copy_data, copy_pairs))
        else:
            for dst, src in copy_pairs:
                dst[:] = src

        return out, self.centre
Example #52
0
def histogram(a,
              bins=10,
              range=None,
              normed=False,
              weights=None,
              density=None):
    r"""
    Compute the histogram of a set of data.

    Parameters
    ----------
    a : array_like
        Input data. The histogram is computed over the flattened array.
    bins : int or sequence of scalars or str, optional
        If `bins` is an int, it defines the number of equal-width
        bins in the given range (10, by default). If `bins` is a
        sequence, it defines the bin edges, including the rightmost
        edge, allowing for non-uniform bin widths.

        .. versionadded:: 1.11.0

        If `bins` is a string, it defines the method used to calculate the
        optimal bin width, as defined by `histogram_bin_edges`.

    range : (float, float), optional
        The lower and upper range of the bins.  If not provided, range
        is simply ``(a.min(), a.max())``.  Values outside the range are
        ignored. The first element of the range must be less than or
        equal to the second. `range` affects the automatic bin
        computation as well. While bin width is computed to be optimal
        based on the actual data within `range`, the bin count will fill
        the entire range including portions containing no data.
    normed : bool, optional

        .. deprecated:: 1.6.0

        This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
        behavior. It will be removed in NumPy 2.0.0. Use the ``density``
        keyword instead. If ``False``, the result will contain the
        number of samples in each bin. If ``True``, the result is the
        value of the probability *density* function at the bin,
        normalized such that the *integral* over the range is 1. Note
        that this latter behavior is known to be buggy with unequal bin
        widths; use ``density`` instead.
    weights : array_like, optional
        An array of weights, of the same shape as `a`.  Each value in
        `a` only contributes its associated weight towards the bin count
        (instead of 1). If `density` is True, the weights are
        normalized, so that the integral of the density over the range
        remains 1.
    density : bool, optional
        If ``False``, the result will contain the number of samples in
        each bin. If ``True``, the result is the value of the
        probability *density* function at the bin, normalized such that
        the *integral* over the range is 1. Note that the sum of the
        histogram values will not be equal to 1 unless bins of unity
        width are chosen; it is not a probability *mass* function.

        Overrides the ``normed`` keyword if given.

    Returns
    -------
    hist : array
        The values of the histogram. See `density` and `weights` for a
        description of the possible semantics.
    bin_edges : array of dtype float
        Return the bin edges ``(length(hist)+1)``.


    See Also
    --------
    histogramdd, bincount, searchsorted, digitize, histogram_bin_edges

    Notes
    -----
    All but the last (righthand-most) bin is half-open.  In other words,
    if `bins` is::

      [1, 2, 3, 4]

    then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
    the second ``[2, 3)``.  The last bin, however, is ``[3, 4]``, which
    *includes* 4.


    Examples
    --------
    >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
    (array([0, 2, 1]), array([0, 1, 2, 3]))
    >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
    (array([ 0.25,  0.25,  0.25,  0.25]), array([0, 1, 2, 3, 4]))
    >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
    (array([1, 4, 1]), array([0, 1, 2, 3]))

    >>> a = np.arange(5)
    >>> hist, bin_edges = np.histogram(a, density=True)
    >>> hist
    array([ 0.5,  0. ,  0.5,  0. ,  0. ,  0.5,  0. ,  0.5,  0. ,  0.5])
    >>> hist.sum()
    2.4999999999999996
    >>> np.sum(hist * np.diff(bin_edges))
    1.0

    .. versionadded:: 1.11.0

    Automated Bin Selection Methods example, using 2 peak random data
    with 2000 points:

    >>> import matplotlib.pyplot as plt
    >>> rng = np.random.RandomState(10)  # deterministic random data
    >>> a = np.hstack((rng.normal(size=1000),
    ...                rng.normal(loc=5, scale=2, size=1000)))
    >>> plt.hist(a, bins='auto')  # arguments are passed to np.histogram
    >>> plt.title("Histogram with 'auto' bins")
    >>> plt.show()

    """
    a, weights = _ravel_and_check_weights(a, weights)

    bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)

    # Histogram is an integer or a float array depending on the weights.
    if weights is None:
        ntype = np.dtype(np.intp)
    else:
        ntype = weights.dtype

    # We set a block size, as this allows us to iterate over chunks when
    # computing histograms, to minimize memory usage.
    BLOCK = 65536

    # The fast path uses bincount, but that only works for certain types
    # of weight
    simple_weights = (weights is None or np.can_cast(weights.dtype, np.double)
                      or np.can_cast(weights.dtype, complex))

    if uniform_bins is not None and simple_weights:
        # Fast algorithm for equal bins
        # We now convert values of a to bin indices, under the assumption of
        # equal bin widths (which is valid here).
        first_edge, last_edge, n_equal_bins = uniform_bins

        # Initialize empty histogram
        n = np.zeros(n_equal_bins, ntype)

        # Pre-compute histogram scaling factor
        norm = n_equal_bins / (last_edge - first_edge)

        # We iterate over blocks here for two reasons: the first is that for
        # large arrays, it is actually faster (for example for a 10^8 array it
        # is 2x as fast) and it results in a memory footprint 3x lower in the
        # limit of large arrays.
        for i in _range(0, len(a), BLOCK):
            tmp_a = a[i:i + BLOCK]
            if weights is None:
                tmp_w = None
            else:
                tmp_w = weights[i:i + BLOCK]

            # Only include values in the right range
            keep = (tmp_a >= first_edge)
            keep &= (tmp_a <= last_edge)
            if not np.logical_and.reduce(keep):
                tmp_a = tmp_a[keep]
                if tmp_w is not None:
                    tmp_w = tmp_w[keep]

            # This cast ensures no type promotions occur below, which gh-10322
            # make unpredictable. Getting it wrong leads to precision errors
            # like gh-8123.
            tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)

            # Compute the bin indices, and for values that lie exactly on
            # last_edge we need to subtract one
            f_indices = (tmp_a - first_edge) * norm
            indices = f_indices.astype(np.intp)
            indices[indices == n_equal_bins] -= 1

            # The index computation is not guaranteed to give exactly
            # consistent results within ~1 ULP of the bin edges.
            decrement = tmp_a < bin_edges[indices]
            indices[decrement] -= 1
            # The last bin includes the right edge. The other bins do not.
            increment = ((tmp_a >= bin_edges[indices + 1])
                         & (indices != n_equal_bins - 1))
            indices[increment] += 1

            # We now compute the histogram using bincount
            if ntype.kind == 'c':
                n.real += np.bincount(indices,
                                      weights=tmp_w.real,
                                      minlength=n_equal_bins)
                n.imag += np.bincount(indices,
                                      weights=tmp_w.imag,
                                      minlength=n_equal_bins)
            else:
                n += np.bincount(indices,
                                 weights=tmp_w,
                                 minlength=n_equal_bins).astype(ntype)
    else:
        # Compute via cumulative histogram
        cum_n = np.zeros(bin_edges.shape, ntype)
        if weights is None:
            for i in _range(0, len(a), BLOCK):
                sa = np.sort(a[i:i + BLOCK])
                cum_n += _search_sorted_inclusive(sa, bin_edges)
        else:
            zero = np.zeros(1, dtype=ntype)
            for i in _range(0, len(a), BLOCK):
                tmp_a = a[i:i + BLOCK]
                tmp_w = weights[i:i + BLOCK]
                sorting_index = np.argsort(tmp_a)
                sa = tmp_a[sorting_index]
                sw = tmp_w[sorting_index]
                cw = np.concatenate((zero, sw.cumsum()))
                bin_index = _search_sorted_inclusive(sa, bin_edges)
                cum_n += cw[bin_index]

        n = np.diff(cum_n)

    # density overrides the normed keyword
    if density is not None:
        normed = False

    if density:
        db = np.array(np.diff(bin_edges), float)
        return n / db / n.sum(), bin_edges
    elif normed:
        # deprecated, buggy behavior. Remove for NumPy 2.0.0
        db = np.array(np.diff(bin_edges), float)
        return n / (n * db).sum(), bin_edges
    else:
        return n, bin_edges
Example #53
0
 def fillna(self, value):
     if not np.can_cast(value, self.dtype):
         raise TypeError("fill value must match dtype of series")
     return self.map_partitions(M.fillna, value, meta=self)
Example #54
0
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False):
    """
    Solve the equation `A x = b` for `x`, assuming A is a triangular matrix.

    Parameters
    ----------
    A : (M, M) sparse matrix
        A sparse square triangular matrix. Should be in CSR format.
    b : (M,) or (M, N) array_like
        Right-hand side matrix in `A x = b`
    lower : bool, optional
        Whether `A` is a lower or upper triangular matrix.
        Default is lower triangular matrix.
    overwrite_A : bool, optional
        Allow changing `A`. The indices of `A` are going to be sorted and zero
        entries are going to be removed.
        Enabling gives a performance gain. Default is False.
    overwrite_b : bool, optional
        Allow overwriting data in `b`.
        Enabling gives a performance gain. Default is False.
        If `overwrite_b` is True, it should be ensured that
        `b` has an appropriate dtype to be able to store the result.

    Returns
    -------
    x : (M,) or (M, N) ndarray
        Solution to the system `A x = b`.  Shape of return matches shape of `b`.

    Raises
    ------
    LinAlgError
        If `A` is singular or not triangular.
    ValueError
        If shape of `A` or shape of `b` do not match the requirements.

    Notes
    -----
    .. versionadded:: 0.19.0

    Examples
    --------
    >>> from scipy.sparse import csr_matrix
    >>> from scipy.sparse.linalg import spsolve_triangular
    >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
    >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
    >>> x = spsolve_triangular(A, B)
    >>> np.allclose(A.dot(x), B)
    True
    """

    # Check the input for correct type and format.
    if not isspmatrix_csr(A):
        warn('CSR matrix format is required. Converting to CSR matrix.',
             SparseEfficiencyWarning)
        A = csr_matrix(A)
    elif not overwrite_A:
        A = A.copy()

    if A.shape[0] != A.shape[1]:
        raise ValueError(
            'A must be a square matrix but its shape is {}.'.format(A.shape))

    A.eliminate_zeros()
    A.sort_indices()

    b = np.asanyarray(b)

    if b.ndim not in [1, 2]:
        raise ValueError('b must have 1 or 2 dims but its shape is {}.'.format(
            b.shape))
    if A.shape[0] != b.shape[0]:
        raise ValueError(
            'The size of the dimensions of A must be equal to '
            'the size of the first dimension of b but the shape of A is '
            '{} and the shape of b is {}.'.format(A.shape, b.shape))

    # Init x as (a copy of) b.
    x_dtype = np.result_type(A.data, b, np.float)
    if overwrite_b:
        if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
            x = b
        else:
            raise ValueError('Cannot overwrite b (dtype {}) with result '
                             'of type {}.'.format(b.dtype, x_dtype))
    else:
        x = b.astype(x_dtype, copy=True)

    # Choose forward or backward order.
    if lower:
        row_indices = range(len(b))
    else:
        row_indices = range(len(b) - 1, -1, -1)

    # Fill x iteratively.
    for i in row_indices:

        # Get indices for i-th row.
        indptr_start = A.indptr[i]
        indptr_stop = A.indptr[i + 1]
        if lower:
            A_diagonal_index_row_i = indptr_stop - 1
            A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
        else:
            A_diagonal_index_row_i = indptr_start
            A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)

        # Check regularity and triangularity of A.
        if indptr_stop <= indptr_start or A.indices[A_diagonal_index_row_i] < i:
            raise LinAlgError('A is singular: diagonal {} is zero.'.format(i))
        if A.indices[A_diagonal_index_row_i] > i:
            raise LinAlgError('A is not triangular: A[{}, {}] is nonzero.'
                              ''.format(i, A.indices[A_diagonal_index_row_i]))

        # Incorporate off-diagonal entries.
        A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
        A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
        x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)

        # Compute i-th entry of x.
        x[i] /= A.data[A_diagonal_index_row_i]

    return x
Example #55
0
def check_cast(fromvar, tovar):
    if not np.can_cast(fromvar.dtype, tovar.dtype, casting='safe'):
        # FIXME exception or warning?
        raise TypeError("Unsafe automatic casting from %s to %s" %
                        (fromvar.dtype, tovar.dtype))
Example #56
0
def copyto(dst, src, casting='same_kind', where=None):
    """Copies values from one array to another with broadcasting.

    This function can be called for arrays on different devices. In this case,
    casting, ``where``, and broadcasting is not supported, and an exception is
    raised if these are used.

    Args:
        dst (cupy.ndarray): Target array.
        src (cupy.ndarray): Source array.
        casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
        where (cupy.ndarray of bool): If specified, this array acts as a mask,
            and an element is copied only if the corresponding element of
            ``where`` is True.

    .. seealso:: :func:`numpy.copyto`

    """
    src_is_numpy_scalar = False

    src_type = type(src)
    src_is_python_scalar = src_type in (
        int, bool, float, complex,
        fusion._FusionVarScalar, _fusion_interface._ScalarProxy)
    if src_is_python_scalar:
        src_dtype = numpy.dtype(type(src))
        can_cast = numpy.can_cast(src, dst.dtype, casting)
    elif isinstance(src, numpy.ndarray) or numpy.isscalar(src):
        if src.size != 1:
            raise ValueError(
                'non-scalar numpy.ndarray cannot be used for copyto')
        src_dtype = src.dtype
        can_cast = numpy.can_cast(src, dst.dtype, casting)
        src = src.item()
        src_is_numpy_scalar = True
    else:
        src_dtype = src.dtype
        can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)

    if not can_cast:
        raise TypeError('Cannot cast %s to %s in %s casting mode' %
                        (src_dtype, dst.dtype, casting))

    if fusion._is_fusing():
        # TODO(kataoka): NumPy allows stripping leading unit dimensions.
        # But fusion array proxy does not currently support
        # `shape` and `squeeze`.

        if where is None:
            _core.elementwise_copy(src, dst)
        else:
            fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
        return

    if not src_is_python_scalar and not src_is_numpy_scalar:
        # Check broadcast condition
        # - for fast-paths and
        # - for a better error message (than ufunc's).
        # NumPy allows stripping leading unit dimensions.
        if not all([
            s in (d, 1)
            for s, d in itertools.zip_longest(
                reversed(src.shape), reversed(dst.shape), fillvalue=1)
        ]):
            raise ValueError(
                "could not broadcast input array "
                f"from shape {src.shape} into shape {dst.shape}")
        squeeze_ndim = src.ndim - dst.ndim
        if squeeze_ndim > 0:
            # always succeeds because broadcast conition is checked.
            src = src.squeeze(tuple(range(squeeze_ndim)))

    if where is not None:
        _core.elementwise_copy(src, dst, _where=where)
        return

    if dst.size == 0:
        return

    if src_is_python_scalar or src_is_numpy_scalar:
        _core.elementwise_copy(src, dst)
        return

    if _can_memcpy(dst, src):
        dst.data.copy_from_async(src.data, src.nbytes)
        return

    device = dst.device
    prev_device = runtime.getDevice()
    try:
        runtime.setDevice(device.id)
        if src.device != device:
            src = src.copy()
        _core.elementwise_copy(src, dst)
    finally:
        runtime.setDevice(prev_device)
Example #57
0
def stefcal(rawvis,
            num_ants,
            corrprod_lookup,
            weights=None,
            ref_ant=0,
            init_gain=None,
            *args,
            **kwargs):
    """Solve for antenna gains using StEFCal.

    Refer to :func:`katsdpcal.calprocs.stefcal` for details. This version
    expects a dask array for `rawvis`, and optionally for `weights` and
    `init_gain` as well.
    """
    if weights is None:
        weights = da.ones(1, dtype=rawvis.real.dtype, chunks=1)
    else:
        weights = da.asarray(weights)
    if weights.ndim == 0:
        weights = weights[np.newaxis]

    if init_gain is None:
        init_gain = da.ones(num_ants, dtype=rawvis.dtype, chunks=num_ants)
    else:
        init_gain = da.asarray(init_gain)

    # label the dimensions; the reverse is to match numpy broadcasting rules
    # where the number of dimensions don't match. The final dimension in each
    # case is given a unique label because they do not necessarily match along
    # that dimension.
    rawvis_dims = list(reversed(range(rawvis.ndim)))
    rawvis_dims[-1] = -1
    weights_dims = list(reversed(range(weights.ndim)))
    weights_dims[-1] = -2
    init_gain_dims = list(reversed(range(init_gain.ndim)))
    init_gain_dims[-1] = -3
    out_dims = list(
        reversed(range(max(rawvis.ndim, weights.ndim, init_gain.ndim))))
    out_dims[-1] = -4

    # Determine the output dtype, since the gufunc has two signatures
    if (np.can_cast(rawvis.dtype, np.complex64)
            and np.can_cast(weights.dtype, np.float32)
            and np.can_cast(init_gain.dtype, np.complex64)):
        dtype = np.complex64
    else:
        dtype = np.complex128

    def stefcal_wrapper(rawvis, weights, init_gain):
        return calprocs.stefcal(rawvis, num_ants, corrprod_lookup, weights,
                                ref_ant, init_gain, *args, **kwargs)

    return da.blockwise(stefcal_wrapper,
                        out_dims,
                        rawvis,
                        rawvis_dims,
                        weights,
                        weights_dims,
                        init_gain,
                        init_gain_dims,
                        concatenate=True,
                        new_axes={-4: num_ants},
                        dtype=dtype)
Example #58
0
    def __new__(cls,
                value,
                unit=None,
                dtype=None,
                copy=True,
                order=None,
                subok=False,
                ndmin=0):

        if unit is not None:
            # convert unit first, to avoid multiple string->unit conversions
            unit = Unit(unit)

        # optimize speed for Quantity with no dtype given, copy=False
        if isinstance(value, Quantity):
            if unit is not None and unit is not value.unit:
                value = value.to(unit)
                # the above already makes a copy (with float dtype)
                copy = False

            if not subok and type(value) is not cls:
                value = value.view(cls)

            if dtype is None:
                if not copy:
                    return value

                if not np.can_cast(np.float32, value.dtype):
                    dtype = np.float

            return np.array(value,
                            dtype=dtype,
                            copy=copy,
                            order=order,
                            subok=True,
                            ndmin=ndmin)

        rescale_value = None

        # Maybe list/tuple of Quantity? short-circuit array for speed
        if (not isinstance(value, np.ndarray) and isiterable(value)
                and all(isinstance(v, Quantity) for v in value)):
            if unit is None:
                unit = value[0].unit
            value = [q.to(unit).value for q in value]
            copy = False  # copy already made

        else:
            # if the value has a `unit` attribute, treat it like a quantity by
            # rescaling the value appropriately
            if hasattr(value, 'unit'):
                try:
                    value_unit = Unit(value.unit)
                except TypeError:
                    if unit is None:
                        unit = dimensionless_unscaled
                else:
                    if unit is None:
                        unit = value_unit
                    else:
                        rescale_value = value_unit.to(unit)

            #if it has no unit, default to dimensionless_unscaled
            elif unit is None:
                unit = dimensionless_unscaled

        value = np.array(value,
                         dtype=dtype,
                         copy=copy,
                         order=order,
                         subok=False,
                         ndmin=ndmin)

        # check that array contains numbers or long int objects
        if (value.dtype.kind in 'OSU'
                and not (value.dtype.kind == 'O' and isinstance(
                    value.item(
                        () if value.ndim == 0 else 0), numbers.Number))):
            raise TypeError("The value must be a valid Python or "
                            "Numpy numeric type.")

        # by default, cast any integer, boolean, etc., to float
        if dtype is None and not np.can_cast(np.float32, value.dtype):
            value = value.astype(np.float)

        if rescale_value is not None:
            value *= rescale_value

        value = value.view(cls)
        value._unit = unit

        return value
Example #59
0
def _transpile_stmt(stmt, is_toplevel, env):
    """Transpile the statement.

    Returns (list of [CodeBlock or str]): The generated CUDA code.
    """

    if isinstance(stmt, ast.ClassDef):
        raise NotImplementedError('class is not supported currently.')
    if isinstance(stmt, (ast.FunctionDef, ast.AsyncFunctionDef)):
        raise NotImplementedError(
            'Nested functions are not supported currently.')
    if isinstance(stmt, ast.Return):
        value = _transpile_expr(stmt.value, env)
        value = _to_cuda_object(value, env)
        t = value.ctype
        if env.ret_type is None:
            env.ret_type = t
        elif env.ret_type != t:
            raise ValueError(
                f'Failed to infer the return type: {env.ret_type} or {t}')
        return [f'return {value.code};']
    if isinstance(stmt, ast.Delete):
        raise NotImplementedError('`del` is not supported currently.')

    if isinstance(stmt, ast.Assign):
        if len(stmt.targets) != 1:
            raise NotImplementedError('Not implemented.')
        target = stmt.targets[0]
        if not isinstance(target, ast.Name):
            raise NotImplementedError('Tuple is not supported.')
        name = target.id
        value = _transpile_expr(stmt.value, env)

        if is_constants([value]):
            if not isinstance(value.obj, _typeclasses):
                if is_toplevel:
                    if env[name] is not None and not is_constants([env[name]]):
                        raise TypeError(f'Type mismatch of variable: `{name}`')
                    env.consts[name] = value
                    return []
                else:
                    raise TypeError(
                        'Cannot assign constant value not at top-level.')
            value = _to_cuda_object(value, env)

        if env[name] is None:
            env[name] = CudaObject(target.id, value.ctype)
        elif is_constants([env[name]]):
            raise TypeError('Type mismatch of variable: `{name}`')
        elif env[name].ctype.dtype != value.ctype.dtype:
            raise TypeError(
                f'Data type mismatch of variable: `{name}`: '
                f'{env[name].ctype.dtype} != {value.ctype.dtype}')
        return [f'{target.id} = {value.code};']

    if isinstance(stmt, ast.AugAssign):
        value = _transpile_expr(stmt.value, env)
        target = _transpile_expr(stmt.target, env)
        assert isinstance(target, CudaObject)
        value = _to_cuda_object(value, env)
        result = _eval_operand(stmt.op, (target, value), env)
        if not numpy.can_cast(
                result.ctype.dtype, target.ctype.dtype, 'same_kind'):
            raise TypeError('dtype mismatch')
        return [f'{target.code} = {result.code};']

    if isinstance(stmt, ast.For):
        if len(stmt.orelse) > 0:
            raise NotImplementedError('while-else is not supported.')
        name = stmt.target.id
        iters = _transpile_expr(stmt.iter, env)

        if env[name] is None:
            env[name] = CudaObject(stmt.target.id, iters.ctype)
        elif env[name].ctype.dtype != iters.ctype.dtype:
            raise TypeError(
                f'Data type mismatch of variable: `{name}`: '
                f'{env[name].ctype.dtype} != {iters.ctype.dtype}')

        body = _transpile_stmts(stmt.body, False, env)

        if not isinstance(iters, Range):
            raise NotImplementedError(
                'for-loop is supported only for range iterator.')

        init_code = (f'{iters.ctype} '
                     f'__it = {iters.start.code}, '
                     f'__stop = {iters.stop.code}, '
                     f'__step = {iters.step.code}')
        cond = f'__step >= 0 ? __it < __stop : __it > __stop'
        if iters.step_is_positive is True:
            cond = f'__it < __stop'
        elif iters.step_is_positive is False:
            cond = f'__it > __stop'

        head = f'for ({init_code}; {cond}; __it += __step)'
        return [CodeBlock(head, [f'{name} = __it;'] + body)]

    if isinstance(stmt, ast.AsyncFor):
        raise ValueError('`async for` is not allowed.')
    if isinstance(stmt, ast.While):
        if len(stmt.orelse) > 0:
            raise NotImplementedError('while-else is not supported.')
        condition = _transpile_expr(stmt.test, env)
        condition = _astype_scalar(condition, _types.bool_, 'unsafe', env)
        condition = _to_cuda_object(condition, env)
        body = _transpile_stmts(stmt.body, False, env)
        head = f'while ({condition.code})'
        return [CodeBlock(head, body)]
    if isinstance(stmt, ast.If):
        condition = _transpile_expr(stmt.test, env)
        if is_constants([condition]):
            stmts = stmt.body if condition.obj else stmt.orelse
            return _transpile_stmts(stmts, is_toplevel, env)
        head = f'if ({condition.code})'
        then_body = _transpile_stmts(stmt.body, False, env)
        else_body = _transpile_stmts(stmt.orelse, False, env)
        return [CodeBlock(head, then_body), CodeBlock('else', else_body)]
    if isinstance(stmt, (ast.With, ast.AsyncWith)):
        raise ValueError('Switching contexts are not allowed.')
    if isinstance(stmt, (ast.Raise, ast.Try)):
        raise ValueError('throw/catch are not allowed.')
    if isinstance(stmt, ast.Assert):
        value = _transpile_expr(stmt.test, env)
        if is_constants([value]):
            assert value.obj
            return [';']
        else:
            return ['assert(' + value + ');']
    if isinstance(stmt, (ast.Import, ast.ImportFrom)):
        raise ValueError('Cannot import modules from the target functions.')
    if isinstance(stmt, (ast.Global, ast.Nonlocal)):
        raise ValueError('Cannot use global/nonlocal in the target functions.')
    if isinstance(stmt, ast.Expr):
        value = _transpile_expr(stmt.value, env)
        return [';'] if is_constants([value]) else [value + ';']
    if isinstance(stmt, ast.Pass):
        return [';']
    if isinstance(stmt, ast.Break):
        raise NotImplementedError('Not implemented.')
    if isinstance(stmt, ast.Continue):
        raise NotImplementedError('Not implemented.')
    assert False
Example #60
0
def _can_cast(arg, dtype):
    """
    This is needed for compatibility with Numpy < 1.6, in which ``can_cast``
    can only take a dtype or type as its first argument.
    """
    return np.can_cast(getattr(arg, 'dtype', type(arg)), dtype)