Пример #1
0
        if minus_ones == 0:
            pass
        elif minus_ones == 1:
            known_dimensions_size = -np.product(shape,axis=0) * dt.itemsize
            unknown_dimension_size, illegal = divmod(self.remaining_bytes(),
                                                     known_dimensions_size)
            if illegal:
                raise ValueError("unknown dimension doesn't match filesize")
            shape[shape.index(-1)] = unknown_dimension_size
        else:
            raise ValueError(
                "illegal -1 count; can only specify one unknown dimension")
        sz = dt.itemsize * np.product(shape)
        dt_endian = self._endian_from_dtype(dt)
        buf = self.file.read(sz)
        arr = np.ndarray(shape=shape,
                         dtype=dt,
                         buffer=buf,
                         order=order)
        if (not endian == 'dtype') and (dt_endian != endian):
            return arr.byteswap()
        return arr.copy()

npfile = np.deprecate_with_doc("""
You can achieve the same effect as using npfile, using ndarray.tofile
and numpy.fromfile.

Even better you can use memory-mapped arrays and data-types to map out a
file format for direct manipulation in NumPy.
""")(npfile)
Пример #2
0
from info import __doc__

from numpy import deprecate_with_doc

# These are all deprecated (until the end deprecated tag)
from npfile import npfile
from data_store import save, load, create_module, create_shelf
from array_import import read_array, write_array
from pickler import objload, objsave

from numpyio import packbits, unpackbits, bswap, fread, fwrite, \
     convert_objectarray

fread = deprecate_with_doc("""
scipy.io.fread is can be replaced with raw reading capabilities of NumPy
including fromfile as well as memory-mapping capabilities.
""")(fread)

fwrite = deprecate_with_doc("""
scipy.io.fwrite can be replaced with raw writing capabilities of
NumPy.  Also, remember that files can be directly memory-mapped into NumPy
arrays which is often a better way of reading especially large files.

Look at the tofile methods as well as save and savez for writing arrays into
easily transported files of data.
""")(fwrite)

bswap = deprecate_with_doc("""
scipy.io.bswap is easily replaced with the byteswap method on an array.
out = scipy.io.bswap(arr) --> out = arr.byteswap(True)
""")(bswap)
Пример #3
0
        return ret
    else:
        raise ValueError('Unknown mat file type, version %s' % ret)


class ByteOrder(object):
    ''' Namespace for byte ordering '''
    little_endian = boc.sys_is_le
    native_code = boc.native_code
    swapped_code = boc.swapped_code
    to_numpy_code = boc.to_numpy_code


ByteOrder = np.deprecate_with_doc("""
We no longer use the ByteOrder class, and deprecate it; we will remove
it in future versions of scipy.  Please use the
scipy.io.matlab.byteordercodes module instead.
""")(ByteOrder)


class MatStreamAgent(object):
    ''' Base object for readers / getters from mat file streams

    Attaches to initialized stream

    Base class for "getters" - which do store state of what they are
    reading on initialization, and therefore need to be initialized
    before each read, and "readers" which do not store state, and only
    need to be initialized once on object creation

    Implements common array reading functions
Пример #4
0
from io import StringIO
from typing import Any, Dict

import numpy as np

AR: np.ndarray[Any, np.dtype[np.float64]]
AR_DICT: Dict[str, np.ndarray[Any, np.dtype[np.float64]]]
FILE: StringIO

def func(a: int) -> bool: ...

reveal_type(np.deprecate(func))  # E: def (a: builtins.int) -> builtins.bool
reveal_type(np.deprecate())  # E: _Deprecate

reveal_type(np.deprecate_with_doc("test"))  # E: _Deprecate
reveal_type(np.deprecate_with_doc(None))  # E: _Deprecate

reveal_type(np.byte_bounds(AR))  # E: Tuple[builtins.int, builtins.int]
reveal_type(np.byte_bounds(np.float64()))  # E: Tuple[builtins.int, builtins.int]

reveal_type(np.who(None))  # E: None
reveal_type(np.who(AR_DICT))  # E: None

reveal_type(np.info(1, output=FILE))  # E: None

reveal_type(np.source(np.interp, output=FILE))  # E: None

reveal_type(np.lookfor("binary representation", output=FILE))  # E: None

reveal_type(np.safe_eval("1 + 1"))  # E: Any
Пример #5
0
from info import __doc__

from numpy import deprecate_with_doc

# These are all deprecated (until the end deprecated tag)
from npfile import npfile
from data_store import save, load, create_module, create_shelf
from array_import import read_array, write_array
from pickler import objload, objsave

from numpyio import packbits, unpackbits, bswap, fread, fwrite, \
     convert_objectarray

fread = deprecate_with_doc("""
scipy.io.fread is can be replaced with raw reading capabilities of NumPy
including fromfile as well as memory-mapping capabilities.
""")(fread)

fwrite = deprecate_with_doc("""
scipy.io.fwrite can be replaced with raw writing capabilities of
NumPy.  Also, remember that files can be directly memory-mapped into NumPy
arrays which is often a better way of reading especially large files.

Look at the tofile methods as well as save and savez for writing arrays into
easily transported files of data.
""")(fwrite)

bswap = deprecate_with_doc("""
scipy.io.bswap is easily replaced with the byteswap method on an array.
out = scipy.io.bswap(arr) --> out = arr.byteswap(True)
""")(bswap)
Пример #6
0
    >>> from sympy.abc import x, y, z
    >>> from nipy.fixes.sympy.utilities.lambdify import lambdify, implemented_function
    >>> from sympy import Function
    >>> f = implemented_function(Function('f'), lambda x : x+1)
    >>> lam_f = lambdify(x, f(x))
    >>> lam_f(4)
    5
    """
    #NIPY-EDIT: compatibility between sympy 0.6.x and 0.7.0. UndefinedFunction
    # is, er, not defined in sympy 0.6.x
    try:
        # Delayed import to avoid circular imports
        from sympy.core.function import UndefinedFunction as funcmaker
    except ImportError:
        from sympy import Function, FunctionClass
        funcmaker = lambda name: FunctionClass(Function, name)
    # if name, create anonymous function to hold implementation
    if isinstance(symfunc, basestring):
        symfunc = funcmaker(symfunc)
    #NIPY-EDIT: ends
    # We need to attach as a method because symfunc will be a class
    symfunc._imp_ = staticmethod(implementation)
    return symfunc


# This is a nipy compatability wrapper
import numpy as np

aliased_function = np.deprecate_with_doc(
    'Please use sympy.utilities.implemented_function')(implemented_function)
Пример #7
0
    return shape


class ByteOrder(object):
    """ Namespace for byte ordering """

    little_endian = boc.sys_is_le
    native_code = boc.native_code
    swapped_code = boc.swapped_code
    to_numpy_code = boc.to_numpy_code


ByteOrder = np.deprecate_with_doc(
    """
We no longer use the ByteOrder class, and deprecate it; we will remove
it in future versions of scipy.  Please use the
scipy.io.matlab.byteordercodes module instead.
"""
)(ByteOrder)


class MatStreamAgent(object):
    """ Base object for readers / getters from mat file streams

    Attaches to initialized stream

    Base class for "getters" - which do store state of what they are
    reading on initialization, and therefore need to be initialized
    before each read, and "readers" which do not store state, and only
    need to be initialized once on object creation
Пример #8
0
import numpy as np

np.deprecate(1)  # E: No overload variant

np.deprecate_with_doc(1)  # E: incompatible type

np.byte_bounds(1)  # E: incompatible type

np.who(1)  # E: incompatible type

np.lookfor(None)  # E: incompatible type

np.safe_eval(None)  # E: incompatible type
Пример #9
0
    dir, filename = os.path.split(module.__file__)
    filebase = filename.split('.')[0]
    fn = os.path.join(dir, filebase)
    f = dumb_shelve.open(fn, "r")
    #exec( 'import ' + module.__name__)
    for i in f.keys():
        exec('import ' + module.__name__ + ';' + module.__name__ + '.' + i +
             '=' + 'f["' + i + '"]')


#       print i, 'loaded...'
#   print 'done'

load = deprecate_with_doc("""
This is an internal function used with scipy.io.save_as_module

If you are saving arrays into a module, you should think about using
HDF5 or .npz files instead.
""")(_load)


def _create_module(file_name):
    """ Create the module file.
    """
    if not os.path.exists(file_name + '.py'):  # don't clobber existing files
        module_name = os.path.split(file_name)[-1]
        f = open(file_name + '.py', 'w')
        f.write('import scipy.io.data_store as data_store\n')
        f.write('import %s\n' % module_name)
        f.write('data_store._load(%s)' % module_name)
        f.close()
Пример #10
0
    if type(img) == Nifti1Image and ext in (".img", ".hdr"):
        klass = Nifti1Pair
    elif type(img) == Nifti2Image and ext in (".img", ".hdr"):
        klass = Nifti2Pair
    elif type(img) == Nifti1Pair and ext == ".nii":
        klass = Nifti1Image
    elif type(img) == Nifti2Pair and ext == ".nii":
        klass = Nifti2Image
    else:
        img_type = ext_map[ext]
        klass = class_map[img_type]["class"]
    converted = klass.from_image(img)
    converted.to_filename(filename)


np.deprecate_with_doc("Please use ``img.dataobj.get_unscaled()`` " "instead")


def read_img_data(img, prefer="scaled"):
    """ Read data from image associated with files

    We've deprecated this function and will remove it soon. If you want
    unscaled data, please use ``img.dataobj.get_unscaled()`` instead.  If you
    want scaled data, use ``img.get_data()`` (which will cache the loaded
    array) or ``np.array(img.dataobj)`` (which won't cache the array). If you
    want to load the data as for a modified header, save the image with the
    modified header, and reload.

    Parameters
    ----------
    img : ``SpatialImage``
Пример #11
0
        the same name as the module.
    """
    dir,filename = os.path.split(module.__file__)
    filebase = filename.split('.')[0]
    fn = os.path.join(dir, filebase)
    f = dumb_shelve.open(fn, "r")
    #exec( 'import ' + module.__name__)
    for i in f.keys():
        exec( 'import ' + module.__name__+ ';' +
              module.__name__+'.'+i + '=' + 'f["' + i + '"]')
#       print i, 'loaded...'
#   print 'done'

load = deprecate_with_doc("""
This is an internal function used with scipy.io.save_as_module

If you are saving arrays into a module, you should think about using
HDF5 or .npz files instead.
""")(_load)


def _create_module(file_name):
    """ Create the module file.
    """
    if not os.path.exists(file_name+'.py'): # don't clobber existing files
        module_name = os.path.split(file_name)[-1]
        f = open(file_name+'.py','w')
        f.write('import scipy.io.data_store as data_store\n')
        f.write('import %s\n' % module_name)
        f.write('data_store._load(%s)' % module_name)
        f.close()
Пример #12
0
    if type(img) == Nifti1Image and ext in ('.img', '.hdr'):
        klass = Nifti1Pair
    elif type(img) == Nifti2Image and ext in ('.img', '.hdr'):
        klass = Nifti2Pair
    elif type(img) == Nifti1Pair and ext == '.nii':
        klass = Nifti1Image
    elif type(img) == Nifti2Pair and ext == '.nii':
        klass = Nifti2Image
    else:
        img_type = ext_map[ext]
        klass = class_map[img_type]['class']
    converted = klass.from_image(img)
    converted.to_filename(filename)


np.deprecate_with_doc('Please use ``img.dataobj.get_unscaled()`` ' 'instead')


def read_img_data(img, prefer='scaled'):
    """ Read data from image associated with files

    We've deprecated this function and will remove it soon. If you want
    unscaled data, please use ``img.dataobj.get_unscaled()`` instead.  If you
    want scaled data, use ``img.get_data()`` (which will cache the loaded
    array) or ``np.array(img.dataobj)`` (which won't cache the array). If you
    want to load the data as for a modified header, save the image with the
    modified header, and reload.

    Parameters
    ----------
    img : ``SpatialImage``
Пример #13
0
    >>> from sympy.abc import x, y, z
    >>> from nipy.fixes.sympy.utilities.lambdify import lambdify, implemented_function
    >>> from sympy import Function
    >>> f = implemented_function(Function('f'), lambda x : x+1)
    >>> lam_f = lambdify(x, f(x))
    >>> lam_f(4)
    5
    """
    # NIPY-EDIT: compatibility between sympy 0.6.x and 0.7.0. UndefinedFunction
    # is, er, not defined in sympy 0.6.x
    try:
        # Delayed import to avoid circular imports
        from sympy.core.function import UndefinedFunction as funcmaker
    except ImportError:
        from sympy import Function, FunctionClass

        funcmaker = lambda name: FunctionClass(Function, name)
    # if name, create anonymous function to hold implementation
    if isinstance(symfunc, basestring):
        symfunc = funcmaker(symfunc)
    # NIPY-EDIT: ends
    # We need to attach as a method because symfunc will be a class
    symfunc._imp_ = staticmethod(implementation)
    return symfunc


# This is a nipy compatability wrapper
import numpy as np

aliased_function = np.deprecate_with_doc("Please use sympy.utilities.implemented_function")(implemented_function)
Пример #14
0
from __future__ import annotations

from io import StringIO
from typing import Any

import numpy as np

FILE = StringIO()
AR: np.ndarray[Any, np.dtype[np.float64]] = np.arange(10).astype(np.float64)


def func(a: int) -> bool:
    ...


np.deprecate(func)
np.deprecate()

np.deprecate_with_doc("test")
np.deprecate_with_doc(None)

np.byte_bounds(AR)
np.byte_bounds(np.float64())

np.info(1, output=FILE)

np.source(np.interp, output=FILE)

np.lookfor("binary representation", output=FILE)
Пример #15
0
    if type(img) == Nifti1Image and ext in ('.img', '.hdr'):
        klass = Nifti1Pair
    elif type(img) == Nifti2Image and ext in ('.img', '.hdr'):
        klass = Nifti2Pair
    elif type(img) == Nifti1Pair and ext == '.nii':
        klass = Nifti1Image
    elif type(img) == Nifti2Pair and ext == '.nii':
        klass = Nifti2Image
    else:
        img_type = ext_map[ext]
        klass = class_map[img_type]['class']
    converted = klass.from_image(img)
    converted.to_filename(filename)


np.deprecate_with_doc('Please use ``img.dataobj.get_unscaled()`` '
                      'instead')
def read_img_data(img, prefer='scaled'):
    """ Read data from image associated with files

    We've deprecated this function and will remove it soon. If you want
    unscaled data, please use ``img.dataobj.get_unscaled()`` instead.  If you
    want scaled data, use ``img.get_data()`` (which will cache the loaded
    array) or ``np.array(img.dataobj)`` (which won't cache the array). If you
    want to load the data as for a modified header, save the image with the
    modified header, and reload.

    Parameters
    ----------
    img : ``SpatialImage``
       Image with valid image file in ``img.file_map``.  Unlike the
       ``img.get_data()`` method, this function returns the data read
Пример #16
0
        if minus_ones == 0:
            pass
        elif minus_ones == 1:
            known_dimensions_size = -np.product(shape,axis=0) * dt.itemsize
            unknown_dimension_size, illegal = divmod(self.remaining_bytes(),
                                                     known_dimensions_size)
            if illegal:
                raise ValueError("unknown dimension doesn't match filesize")
            shape[shape.index(-1)] = unknown_dimension_size
        else:
            raise ValueError(
                "illegal -1 count; can only specify one unknown dimension")
        sz = dt.itemsize * np.product(shape)
        dt_endian = self._endian_from_dtype(dt)
        buf = self.file.read(sz)
        arr = np.ndarray(shape=shape,
                         dtype=dt,
                         buffer=buf,
                         order=order)
        if (not endian == 'dtype') and (dt_endian != endian):
            return arr.byteswap()
        return arr.copy()

npfile = np.deprecate_with_doc("""
You can achieve the same effect as using npfile using numpy.save and
numpy.load.

You can use memory-mapped arrays and data-types to map out a
file format for direct manipulation in NumPy.
""")(npfile)
Пример #17
0
            shape = [shape]
        minus_ones = shape.count(-1)
        if minus_ones == 0:
            pass
        elif minus_ones == 1:
            known_dimensions_size = -np.product(shape, axis=0) * dt.itemsize
            unknown_dimension_size, illegal = divmod(self.remaining_bytes(),
                                                     known_dimensions_size)
            if illegal:
                raise ValueError("unknown dimension doesn't match filesize")
            shape[shape.index(-1)] = unknown_dimension_size
        else:
            raise ValueError(
                "illegal -1 count; can only specify one unknown dimension")
        sz = dt.itemsize * np.product(shape)
        dt_endian = self._endian_from_dtype(dt)
        buf = self.file.read(sz)
        arr = np.ndarray(shape=shape, dtype=dt, buffer=buf, order=order)
        if (not endian == 'dtype') and (dt_endian != endian):
            return arr.byteswap()
        return arr.copy()


npfile = np.deprecate_with_doc("""
You can achieve the same effect as using npfile using numpy.save and
numpy.load.

You can use memory-mapped arrays and data-types to map out a
file format for direct manipulation in NumPy.
""")(npfile)