コード例 #1
0
def issubdtype(arg1, arg2):
    """
    Returns True if first argument is a typecode lower/equal in type hierarchy.

    Parameters
    ----------
    arg1, arg2 : dtype_like
        dtype or string representing a typecode.

    Returns
    -------
    out : bool

    See Also
    --------
    issubsctype, issubclass_
    numpy.core.numerictypes : Overview of numpy type hierarchy.

    Examples
    --------
    >>> np.issubdtype('S1', str)
    True
    >>> np.issubdtype(np.float64, np.float32)
    False

    """
    if issubclass_(arg2, generic):
        return issubclass(dtype(arg1).type, arg2)
    mro = dtype(arg2).type.mro()
    if len(mro) > 1:
        val = mro[1]
    else:
        val = mro[0]
    return issubclass(dtype(arg1).type, val)
コード例 #2
0
ファイル: _internal.py プロジェクト: reinhack/numpy
def _add_trailing_padding(value, padding):
    """Inject the specified number of padding bytes at the end of a dtype"""
    from numpy.core.multiarray import dtype

    if value.fields is None:
        vfields = {'f0': (value, 0)}
    else:
        vfields = dict(value.fields)

    if value.names and value.names[-1] == '' and \
           value[''].char == 'V':
        # A trailing padding field is already present
        vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
                       vfields[''][1])
        value = dtype(vfields)
    else:
        # Get a free name for the padding field
        j = 0
        while True:
            name = 'pad%d' % j
            if name not in vfields:
                vfields[name] = ('V%d' % padding, value.itemsize)
                break
            j += 1

        value = dtype(vfields)
        if '' not in vfields:
            # Strip out the name of the padding field
            names = list(value.names)
            names[-1] = ''
            value.names = tuple(names)
    return value
コード例 #3
0
def _add_trailing_padding(value, padding):
    """Inject the specified number of padding bytes at the end of a dtype"""
    from numpy.core.multiarray import dtype

    if value.fields is None:
        vfields = {'f0': (value, 0)}
    else:
        vfields = dict(value.fields)

    if value.names and value.names[-1] == '' and \
           value[''].char == 'V':
        # A trailing padding field is already present
        vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
                       vfields[''][1])
        value = dtype(vfields)
    else:
        # Get a free name for the padding field
        j = 0
        while True:
            name = 'pad%d' % j
            if name not in vfields:
                vfields[name] = ('V%d' % padding, value.itemsize)
                break
            j += 1

        value = dtype(vfields)
        if '' not in vfields:
            # Strip out the name of the padding field
            names = list(value.names)
            names[-1] = ''
            value.names = tuple(names)
    return value
コード例 #4
0
def _makenames_list(adict):
    from multiarray import dtype
    allfields = []
    fnames = adict.keys()
    for fname in fnames:
        obj = adict[fname]
        n = len(obj)
        if not isinstance(obj, tuple) or n not in [2, 3]:
            raise ValueError, "entry not a 2- or 3- tuple"
        if (n > 2) and (obj[2] == fname):
            continue
        num = int(obj[1])
        if (num < 0):
            raise ValueError, "invalid offset."
        format = dtype(obj[0])
        if (format.itemsize == 0):
            raise ValueError, "all itemsizes must be fixed."
        if (n > 2):
            title = obj[2]
        else:
            title = None
        allfields.append((fname, format, num, title))
    # sort by offsets
    allfields.sort(lambda x, y: cmp(x[2], y[2]))
    names = [x[0] for x in allfields]
    formats = [x[1] for x in allfields]
    offsets = [x[2] for x in allfields]
    titles = [x[3] for x in allfields]

    return names, formats, offsets, titles
コード例 #5
0
def _usefields(adict, align):
    from multiarray import dtype
    try:
        names = adict[-1]
    except KeyError:
        names = None
    if names is None:
        names, formats, offsets, titles = _makenames_list(adict)
    else:
        formats = []
        offsets = []
        titles = []
        for name in names:
            res = adict[name]
            formats.append(res[0])
            offsets.append(res[1])
            if (len(res) > 2):
                titles.append(res[2])
            else:
                titles.append(None)

    return dtype(
        {
            "names": names,
            "formats": formats,
            "offsets": offsets,
            "titles": titles
        }, align)
コード例 #6
0
def _makenames_list(adict, align):
    allfields = []
    fnames = list(adict.keys())
    for fname in fnames:
        obj = adict[fname]
        n = len(obj)
        if not isinstance(obj, tuple) or n not in [2, 3]:
            raise ValueError("entry not a 2- or 3- tuple")
        if (n > 2) and (obj[2] == fname):
            continue
        num = int(obj[1])
        if (num < 0):
            raise ValueError("invalid offset.")
        format = dtype(obj[0], align=align)
        if (n > 2):
            title = obj[2]
        else:
            title = None
        allfields.append((fname, format, num, title))
    # sort by offsets
    allfields.sort(key=lambda x: x[2])
    names = [x[0] for x in allfields]
    formats = [x[1] for x in allfields]
    offsets = [x[2] for x in allfields]
    titles = [x[3] for x in allfields]

    return names, formats, offsets, titles
コード例 #7
0
ファイル: _internal.py プロジェクト: 8848/Pymol-script-repo
def _usefields(adict, align):
    from multiarray import dtype
    try:
        names = adict[-1]
    except KeyError:
        names = None
    if names is None:
        names, formats, offsets, titles = _makenames_list(adict)
    else:
        formats = []
        offsets = []
        titles = []
        for name in names:
            res = adict[name]
            formats.append(res[0])
            offsets.append(res[1])
            if (len(res) > 2):
                titles.append(res[2])
            else:
                titles.append(None)

    return dtype({"names" : names,
                  "formats" : formats,
                  "offsets" : offsets,
                  "titles" : titles}, align)
コード例 #8
0
ファイル: _internal.py プロジェクト: 8848/Pymol-script-repo
def _makenames_list(adict):
    from multiarray import dtype
    allfields = []
    fnames = adict.keys()
    for fname in fnames:
        obj = adict[fname]
        n = len(obj)
        if not isinstance(obj, tuple) or n not in [2,3]:
            raise ValueError, "entry not a 2- or 3- tuple"
        if (n > 2) and (obj[2] == fname):
            continue
        num = int(obj[1])
        if (num < 0):
            raise ValueError, "invalid offset."
        format = dtype(obj[0])
        if (format.itemsize == 0):
            raise ValueError, "all itemsizes must be fixed."
        if (n > 2):
            title = obj[2]
        else:
            title = None
        allfields.append((fname, format, num, title))
    # sort by offsets
    allfields.sort(lambda x,y: cmp(x[2],y[2]))
    names = [x[0] for x in allfields]
    formats = [x[1] for x in allfields]
    offsets = [x[2] for x in allfields]
    titles = [x[3] for x in allfields]

    return names, formats, offsets, titles
コード例 #9
0
def obj2sctype(rep, default=None):
    """
    Return the scalar dtype or NumPy equivalent of Python type of an object.

    Parameters
    ----------
    rep : any
        The object of which the type is returned.
    default : any, optional
        If given, this is returned for objects whose types can not be
        determined. If not given, None is returned for those objects.

    Returns
    -------
    dtype : dtype or Python type
        The data type of `rep`.

    See Also
    --------
    sctype2char, issctype, issubsctype, issubdtype, maximum_sctype

    Examples
    --------
    >>> np.obj2sctype(np.int32)
    <type 'numpy.int32'>
    >>> np.obj2sctype(np.array([1., 2.]))
    <type 'numpy.float64'>
    >>> np.obj2sctype(np.array([1.j]))
    <type 'numpy.complex128'>

    >>> np.obj2sctype(dict)
    <type 'numpy.object_'>
    >>> np.obj2sctype('string')
    <type 'numpy.string_'>

    >>> np.obj2sctype(1, default=list)
    <type 'list'>

    """
    try:
        if issubclass(rep, generic):
            return rep
    except TypeError:
        pass
    if isinstance(rep, dtype):
        return rep.type
    if isinstance(rep, type):
        return _python_type(rep)
    if isinstance(rep, ndarray):
        return rep.dtype.type
    try:
        res = dtype(rep)
    except:
        return default
    return res.type
コード例 #10
0
def _set_array_types():
    ibytes = [1, 2, 4, 8, 16, 32, 64]
    fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
    for bytes in ibytes:
        bits = 8 * bytes
        _add_array_type('int', bits)
        _add_array_type('uint', bits)
    for bytes in fbytes:
        bits = 8 * bytes
        _add_array_type('float', bits)
        _add_array_type('complex', 2 * bits)
    _gi = dtype('p')
    if _gi.type not in sctypes['int']:
        indx = 0
        sz = _gi.itemsize
        _lst = sctypes['int']
        while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
            indx += 1
        sctypes['int'].insert(indx, _gi.type)
        sctypes['uint'].insert(indx, dtype('P').type)
コード例 #11
0
def _can_coerce_all(dtypelist, start=0):
    N = len(dtypelist)
    if N == 0:
        return None
    if N == 1:
        return dtypelist[0]
    thisind = start
    while thisind < __len_test_types:
        newdtype = dtype(__test_types[thisind])
        numcoerce = len([x for x in dtypelist if newdtype >= x])
        if numcoerce == N:
            return newdtype
        thisind += 1
    return None
コード例 #12
0
def _add_trailing_padding(value, padding):
    """Inject the specified number of padding bytes at the end of a dtype"""
    if value.fields is None:
        field_spec = dict(names=['f0'],
                          formats=[value],
                          offsets=[0],
                          itemsize=value.itemsize)
    else:
        fields = value.fields
        names = value.names
        field_spec = dict(names=names,
                          formats=[fields[name][0] for name in names],
                          offsets=[fields[name][1] for name in names],
                          itemsize=value.itemsize)

    field_spec['itemsize'] += padding
    return dtype(field_spec)
コード例 #13
0
ファイル: _internal.py プロジェクト: 8848/Pymol-script-repo
def _getintp_ctype():
    from multiarray import dtype
    val = _getintp_ctype.cache
    if val is not None:
        return val
    char = dtype('p').char
    import ctypes
    if (char == 'i'):
        val = ctypes.c_int
    elif char == 'l':
        val = ctypes.c_long
    elif char == 'q':
        val = ctypes.c_longlong
    else:
        val = ctypes.c_long
    _getintp_ctype.cache = val
    return val
コード例 #14
0
def _getintp_ctype():
    from multiarray import dtype
    val = _getintp_ctype.cache
    if val is not None:
        return val
    char = dtype('p').char
    import ctypes
    if (char == 'i'):
        val = ctypes.c_int
    elif char == 'l':
        val = ctypes.c_long
    elif char == 'q':
        val = ctypes.c_longlong
    else:
        val = ctypes.c_long
    _getintp_ctype.cache = val
    return val
コード例 #15
0
 def __init__(self, data):
     if data.dtype.kind == 'm':
         nat_value = array(['NaT'], dtype=data.dtype)[0]
         int_dtype = dtype(data.dtype.byteorder + 'i8')
         int_view = data.view(int_dtype)
         v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
         if len(v) > 0:
             # Max str length of non-NaT elements
             max_str_len = max(len(str(maximum.reduce(v))),
                               len(str(minimum.reduce(v))))
         else:
             max_str_len = 0
         if len(v) < len(data):
             # data contains a NaT
             max_str_len = max(max_str_len, 5)
         self.format = '%' + str(max_str_len) + 'd'
         self._nat = "'NaT'".rjust(max_str_len)
コード例 #16
0
def _getintp_ctype():
    val = _getintp_ctype.cache
    if val is not None:
        return val
    if ctypes is None:
        import numpy as np
        val = dummy_ctype(np.intp)
    else:
        char = dtype('p').char
        if (char == 'i'):
            val = ctypes.c_int
        elif char == 'l':
            val = ctypes.c_long
        elif char == 'q':
            val = ctypes.c_longlong
        else:
            val = ctypes.c_long
    _getintp_ctype.cache = val
    return val
コード例 #17
0
ファイル: _internal.py プロジェクト: dwcrandell/GeneDesigner
def _getintp_ctype():
    from multiarray import dtype

    val = _getintp_ctype.cache
    if val is not None:
        return val
    char = dtype("p").char
    import ctypes

    if char == "i":
        val = ctypes.c_int
    elif char == "l":
        val = ctypes.c_long
    elif char == "q":
        val = ctypes.c_longlong
    else:
        val = ctypes.c_long
    _getintp_ctype.cache = val
    return val
コード例 #18
0
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
    from numpy.core.multiarray import dtype

    fields = {}
    offset = 0
    explicit_name = False
    this_explicit_name = False
    common_alignment = 1
    is_padding = False
    last_offset = 0

    dummy_name_index = [0]

    def next_dummy_name():
        dummy_name_index[0] += 1

    def get_dummy_name():
        while True:
            name = 'f%d' % dummy_name_index[0]
            if name not in fields:
                return name
            next_dummy_name()

    # Parse spec
    while spec:
        value = None

        # End of structure, bail out to upper level
        if spec[0] == '}':
            spec = spec[1:]
            break

        # Sub-arrays (1)
        shape = None
        if spec[0] == '(':
            j = spec.index(')')
            shape = tuple(map(int, spec[1:j].split(',')))
            spec = spec[j + 1:]

        # Byte order
        if spec[0] in ('@', '=', '<', '>', '^', '!'):
            byteorder = spec[0]
            if byteorder == '!':
                byteorder = '>'
            spec = spec[1:]

        # Byte order characters also control native vs. standard type sizes
        if byteorder in ('@', '^'):
            type_map = _pep3118_native_map
            type_map_chars = _pep3118_native_typechars
        else:
            type_map = _pep3118_standard_map
            type_map_chars = _pep3118_standard_typechars

        # Item sizes
        itemsize = 1
        if spec[0].isdigit():
            j = 1
            for j in xrange(1, len(spec)):
                if not spec[j].isdigit():
                    break
            itemsize = int(spec[:j])
            spec = spec[j:]

        # Data types
        is_padding = False

        if spec[:2] == 'T{':
            value, spec, align, next_byteorder = _dtype_from_pep3118(
                spec[2:], byteorder=byteorder, is_subdtype=True)
        elif spec[0] in type_map_chars:
            next_byteorder = byteorder
            if spec[0] == 'Z':
                j = 2
            else:
                j = 1
            typechar = spec[:j]
            spec = spec[j:]
            is_padding = (typechar == 'x')
            dtypechar = type_map[typechar]
            if dtypechar in 'USV':
                dtypechar += '%d' % itemsize
                itemsize = 1
            numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
            value = dtype(numpy_byteorder + dtypechar)
            align = value.alignment
        else:
            raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)

        #
        # Native alignment may require padding
        #
        # Here we assume that the presence of a '@' character implicitly implies
        # that the start of the array is *already* aligned.
        #
        extra_offset = 0
        if byteorder == '@':
            start_padding = (-offset) % align
            intra_padding = (-value.itemsize) % align

            offset += start_padding

            if intra_padding != 0:
                if itemsize > 1 or (shape is not None and _prod(shape) > 1):
                    # Inject internal padding to the end of the sub-item
                    value = _add_trailing_padding(value, intra_padding)
                else:
                    # We can postpone the injection of internal padding,
                    # as the item appears at most once
                    extra_offset += intra_padding

            # Update common alignment
            common_alignment = (align * common_alignment /
                                _gcd(align, common_alignment))

        # Convert itemsize to sub-array
        if itemsize != 1:
            value = dtype((value, (itemsize, )))

        # Sub-arrays (2)
        if shape is not None:
            value = dtype((value, shape))

        # Field name
        this_explicit_name = False
        if spec and spec.startswith(':'):
            i = spec[1:].index(':') + 1
            name = spec[1:i]
            spec = spec[i + 1:]
            explicit_name = True
            this_explicit_name = True
        else:
            name = get_dummy_name()

        if not is_padding or this_explicit_name:
            if name in fields:
                raise RuntimeError(
                    "Duplicate field name '%s' in PEP3118 format" % name)
            fields[name] = (value, offset)
            last_offset = offset
            if not this_explicit_name:
                next_dummy_name()

        byteorder = next_byteorder

        offset += value.itemsize
        offset += extra_offset

    # Check if this was a simple 1-item type
    if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
           and not is_subdtype:
        ret = fields['f0'][0]
    else:
        ret = dtype(fields)

    # Trailing padding must be explicitly added
    padding = offset - ret.itemsize
    if byteorder == '@':
        padding += (-offset) % common_alignment
    if is_padding and not this_explicit_name:
        ret = _add_trailing_padding(ret, padding)

    # Finished
    if is_subdtype:
        return ret, spec, common_alignment, byteorder
    else:
        return ret
コード例 #19
0
ファイル: _internal.py プロジェクト: reinhack/numpy
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
    from numpy.core.multiarray import dtype

    fields = {}
    offset = 0
    explicit_name = False
    this_explicit_name = False
    common_alignment = 1
    is_padding = False
    last_offset = 0

    dummy_name_index = [0]
    def next_dummy_name():
        dummy_name_index[0] += 1
    def get_dummy_name():
        while True:
            name = 'f%d' % dummy_name_index[0]
            if name not in fields:
                return name
            next_dummy_name()

    # Parse spec
    while spec:
        value = None

        # End of structure, bail out to upper level
        if spec[0] == '}':
            spec = spec[1:]
            break

        # Sub-arrays (1)
        shape = None
        if spec[0] == '(':
            j = spec.index(')')
            shape = tuple(map(int, spec[1:j].split(',')))
            spec = spec[j+1:]

        # Byte order
        if spec[0] in ('@', '=', '<', '>', '^', '!'):
            byteorder = spec[0]
            if byteorder == '!':
                byteorder = '>'
            spec = spec[1:]

        # Byte order characters also control native vs. standard type sizes
        if byteorder in ('@', '^'):
            type_map = _pep3118_native_map
            type_map_chars = _pep3118_native_typechars
        else:
            type_map = _pep3118_standard_map
            type_map_chars = _pep3118_standard_typechars

        # Item sizes
        itemsize = 1
        if spec[0].isdigit():
            j = 1
            for j in xrange(1, len(spec)):
                if not spec[j].isdigit():
                    break
            itemsize = int(spec[:j])
            spec = spec[j:]

        # Data types
        is_padding = False

        if spec[:2] == 'T{':
            value, spec, align, next_byteorder = _dtype_from_pep3118(
                spec[2:], byteorder=byteorder, is_subdtype=True)
        elif spec[0] in type_map_chars:
            next_byteorder = byteorder
            if spec[0] == 'Z':
                j = 2
            else:
                j = 1
            typechar = spec[:j]
            spec = spec[j:]
            is_padding = (typechar == 'x')
            dtypechar = type_map[typechar]
            if dtypechar in 'USV':
                dtypechar += '%d' % itemsize
                itemsize = 1
            numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
            value = dtype(numpy_byteorder + dtypechar)
            align = value.alignment
        else:
            raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)

        #
        # Native alignment may require padding
        #
        # Here we assume that the presence of a '@' character implicitly implies
        # that the start of the array is *already* aligned.
        #
        extra_offset = 0
        if byteorder == '@':
            start_padding = (-offset) % align
            intra_padding = (-value.itemsize) % align

            offset += start_padding

            if intra_padding != 0:
                if itemsize > 1 or (shape is not None and _prod(shape) > 1):
                    # Inject internal padding to the end of the sub-item
                    value = _add_trailing_padding(value, intra_padding)
                else:
                    # We can postpone the injection of internal padding,
                    # as the item appears at most once
                    extra_offset += intra_padding

            # Update common alignment
            common_alignment = (align*common_alignment
                                / _gcd(align, common_alignment))

        # Convert itemsize to sub-array
        if itemsize != 1:
            value = dtype((value, (itemsize,)))

        # Sub-arrays (2)
        if shape is not None:
            value = dtype((value, shape))

        # Field name
        this_explicit_name = False
        if spec and spec.startswith(':'):
            i = spec[1:].index(':') + 1
            name = spec[1:i]
            spec = spec[i+1:]
            explicit_name = True
            this_explicit_name = True
        else:
            name = get_dummy_name()

        if not is_padding or this_explicit_name:
            if name in fields:
                raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
                                   % name)
            fields[name] = (value, offset)
            last_offset = offset
            if not this_explicit_name:
                next_dummy_name()

        byteorder = next_byteorder

        offset += value.itemsize
        offset += extra_offset

    # Check if this was a simple 1-item type
    if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
           and not is_subdtype:
        ret = fields['f0'][0]
    else:
        ret = dtype(fields)

    # Trailing padding must be explicitly added
    padding = offset - ret.itemsize
    if byteorder == '@':
        padding += (-offset) % common_alignment
    if is_padding and not this_explicit_name:
        ret = _add_trailing_padding(ret, padding)

    # Finished
    if is_subdtype:
        return ret, spec, common_alignment, byteorder
    else:
        return ret
コード例 #20
0
def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
            skiprows=0, usecols=None, unpack=False):
    """
    Load ASCII data from fname into an array and return the array.

    The data must be regular, same number of values in every row

    fname can be a filename or a file handle.  Support for gzipped files is
    automatic, if the filename ends in .gz

    See scipy.loadmat to read and write matfiles.

    Example usage:

      X = loadtxt('test.dat')  # data in two columns
      t = X[:,0]
      y = X[:,1]

    Alternatively, you can do the same with "unpack"; see below

      X = loadtxt('test.dat')    # a matrix of data
      x = loadtxt('test.dat')    # a single column of data


    dtype - the data-type of the resulting array.  If this is a
    record data-type, the the resulting array will be 1-d and each row will
    be interpreted as an element of the array. The number of columns
    used must match the number of fields in the data-type in this case. 
    
    comments - the character used to indicate the start of a comment
    in the file

    delimiter is a string-like character used to seperate values in the
    file. If delimiter is unspecified or none, any whitespace string is
    a separator.

    converters, if not None, is a dictionary mapping column number to
    a function that will convert that column to a float.  Eg, if
    column 0 is a date string: converters={0:datestr2num}

    skiprows is the number of rows from the top to skip

    usecols, if not None, is a sequence of integer column indexes to
    extract where 0 is the first column, eg usecols=(1,4,5) to extract
    just the 2nd, 5th and 6th columns

    unpack, if True, will transpose the matrix allowing you to unpack
    into named arguments on the left hand side

        t,y = load('test.dat', unpack=True) # for  two column data
        x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)

    """

    if _string_like(fname):
        if fname.endswith('.gz'):
            import gzip
            fh = gzip.open(fname)
        else:
            fh = file(fname)
    elif hasattr(fname, 'seek'):
        fh = fname
    else:
        raise ValueError('fname must be a string or file handle')
    X = []

    dtype = multiarray.dtype(dtype)
    defconv = _getconv(dtype)
    converterseq = None    
    if converters is None:
        converters = {}
        if dtype.names is not None:
            converterseq = [_getconv(dtype.fields[name][0]) \
                            for name in dtype.names]
            
    for i,line in enumerate(fh):
        if i<skiprows: continue
        line = line[:line.find(comments)].strip()
        if not len(line): continue
        vals = line.split(delimiter)
        if converterseq is None:
           converterseq = [converters.get(j,defconv) \
                           for j in xrange(len(vals))]
        if usecols is not None:
            row = [converterseq[j](vals[j]) for j in usecols]
        else:
            row = [converterseq[j](val) for j,val in enumerate(vals)]
        if dtype.names is not None:
            row = tuple(row)
        X.append(row)

    X = array(X, dtype)
    r,c = X.shape
    if r==1 or c==1:
        X.shape = max([r,c]),
    if unpack: return X.T
    else:  return X
コード例 #21
0
def __dtype_from_pep3118(stream, is_subdtype):
    field_spec = dict(names=[], formats=[], offsets=[], itemsize=0)
    offset = 0
    common_alignment = 1
    is_padding = False

    # Parse spec
    while stream:
        value = None

        # End of structure, bail out to upper level
        if stream.consume('}'):
            break

        # Sub-arrays (1)
        shape = None
        if stream.consume('('):
            shape = stream.consume_until(')')
            shape = tuple(map(int, shape.split(',')))

        # Byte order
        if stream.next in ('@', '=', '<', '>', '^', '!'):
            byteorder = stream.advance(1)
            if byteorder == '!':
                byteorder = '>'
            stream.byteorder = byteorder

        # Byte order characters also control native vs. standard type sizes
        if stream.byteorder in ('@', '^'):
            type_map = _pep3118_native_map
            type_map_chars = _pep3118_native_typechars
        else:
            type_map = _pep3118_standard_map
            type_map_chars = _pep3118_standard_typechars

        # Item sizes
        itemsize_str = stream.consume_until(lambda c: not c.isdigit())
        if itemsize_str:
            itemsize = int(itemsize_str)
        else:
            itemsize = 1

        # Data types
        is_padding = False

        if stream.consume('T{'):
            value, align = __dtype_from_pep3118(stream, is_subdtype=True)
        elif stream.next in type_map_chars:
            if stream.next == 'Z':
                typechar = stream.advance(2)
            else:
                typechar = stream.advance(1)

            is_padding = (typechar == 'x')
            dtypechar = type_map[typechar]
            if dtypechar in 'USV':
                dtypechar += '%d' % itemsize
                itemsize = 1
            numpy_byteorder = {
                '@': '=',
                '^': '='
            }.get(stream.byteorder, stream.byteorder)
            value = dtype(numpy_byteorder + dtypechar)
            align = value.alignment
        else:
            raise ValueError("Unknown PEP 3118 data type specifier %r" %
                             stream.s)

        #
        # Native alignment may require padding
        #
        # Here we assume that the presence of a '@' character implicitly implies
        # that the start of the array is *already* aligned.
        #
        extra_offset = 0
        if stream.byteorder == '@':
            start_padding = (-offset) % align
            intra_padding = (-value.itemsize) % align

            offset += start_padding

            if intra_padding != 0:
                if itemsize > 1 or (shape is not None and _prod(shape) > 1):
                    # Inject internal padding to the end of the sub-item
                    value = _add_trailing_padding(value, intra_padding)
                else:
                    # We can postpone the injection of internal padding,
                    # as the item appears at most once
                    extra_offset += intra_padding

            # Update common alignment
            common_alignment = _lcm(align, common_alignment)

        # Convert itemsize to sub-array
        if itemsize != 1:
            value = dtype((value, (itemsize, )))

        # Sub-arrays (2)
        if shape is not None:
            value = dtype((value, shape))

        # Field name
        if stream.consume(':'):
            name = stream.consume_until(':')
        else:
            name = None

        if not (is_padding and name is None):
            if name is not None and name in field_spec['names']:
                raise RuntimeError(
                    "Duplicate field name '%s' in PEP3118 format" % name)
            field_spec['names'].append(name)
            field_spec['formats'].append(value)
            field_spec['offsets'].append(offset)

        offset += value.itemsize
        offset += extra_offset

        field_spec['itemsize'] = offset

    # extra final padding for aligned types
    if stream.byteorder == '@':
        field_spec['itemsize'] += (-offset) % common_alignment

    # Check if this was a simple 1-item type, and unwrap it
    if (field_spec['names'] == [None] and field_spec['offsets'][0] == 0
            and field_spec['itemsize'] == field_spec['formats'][0].itemsize
            and not is_subdtype):
        ret = field_spec['formats'][0]
    else:
        _fix_names(field_spec)
        ret = dtype(field_spec)

    # Finished
    return ret, common_alignment
コード例 #22
0
ファイル: numeric.py プロジェクト: tanmoydeb07/radicalspam
def loadtxt(fname,
            dtype=float,
            comments='#',
            delimiter=None,
            converters=None,
            skiprows=0,
            usecols=None,
            unpack=False):
    """
    Load ASCII data from fname into an array and return the array.

    The data must be regular, same number of values in every row

    fname can be a filename or a file handle.  Support for gzipped files is
    automatic, if the filename ends in .gz

    See scipy.loadmat to read and write matfiles.

    Example usage:

      X = loadtxt('test.dat')  # data in two columns
      t = X[:,0]
      y = X[:,1]

    Alternatively, you can do the same with "unpack"; see below

      X = loadtxt('test.dat')    # a matrix of data
      x = loadtxt('test.dat')    # a single column of data


    dtype - the data-type of the resulting array.  If this is a
    record data-type, the the resulting array will be 1-d and each row will
    be interpreted as an element of the array. The number of columns
    used must match the number of fields in the data-type in this case. 
    
    comments - the character used to indicate the start of a comment
    in the file

    delimiter is a string-like character used to seperate values in the
    file. If delimiter is unspecified or none, any whitespace string is
    a separator.

    converters, if not None, is a dictionary mapping column number to
    a function that will convert that column to a float.  Eg, if
    column 0 is a date string: converters={0:datestr2num}

    skiprows is the number of rows from the top to skip

    usecols, if not None, is a sequence of integer column indexes to
    extract where 0 is the first column, eg usecols=(1,4,5) to extract
    just the 2nd, 5th and 6th columns

    unpack, if True, will transpose the matrix allowing you to unpack
    into named arguments on the left hand side

        t,y = load('test.dat', unpack=True) # for  two column data
        x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)

    """

    if _string_like(fname):
        if fname.endswith('.gz'):
            import gzip
            fh = gzip.open(fname)
        else:
            fh = file(fname)
    elif hasattr(fname, 'seek'):
        fh = fname
    else:
        raise ValueError('fname must be a string or file handle')
    X = []

    dtype = multiarray.dtype(dtype)
    defconv = _getconv(dtype)
    converterseq = None
    if converters is None:
        converters = {}
        if dtype.names is not None:
            converterseq = [_getconv(dtype.fields[name][0]) \
                            for name in dtype.names]

    for i, line in enumerate(fh):
        if i < skiprows: continue
        line = line[:line.find(comments)].strip()
        if not len(line): continue
        vals = line.split(delimiter)
        if converterseq is None:
            converterseq = [converters.get(j,defconv) \
                            for j in xrange(len(vals))]
        if usecols is not None:
            row = [converterseq[j](vals[j]) for j in usecols]
        else:
            row = [converterseq[j](val) for j, val in enumerate(vals)]
        if dtype.names is not None:
            row = tuple(row)
        X.append(row)

    X = array(X, dtype)
    r, c = X.shape
    if r == 1 or c == 1:
        X.shape = max([r, c]),
    if unpack: return X.T
    else: return X
コード例 #23
0
def find_common_type(array_types, scalar_types):
    """
    Determine common type following standard coercion rules.

    Parameters
    ----------
    array_types : sequence
        A list of dtypes or dtype convertible objects representing arrays.
    scalar_types : sequence
        A list of dtypes or dtype convertible objects representing scalars.

    Returns
    -------
    datatype : dtype
        The common data type, which is the maximum of `array_types` ignoring
        `scalar_types`, unless the maximum of `scalar_types` is of a
        different kind (`dtype.kind`). If the kind is not understood, then
        None is returned.

    See Also
    --------
    dtype, common_type, can_cast, mintypecode

    Examples
    --------
    >>> np.find_common_type([], [np.int64, np.float32, np.complex])
    dtype('complex128')
    >>> np.find_common_type([np.int64, np.float32], [])
    dtype('float64')

    The standard casting rules ensure that a scalar cannot up-cast an
    array unless the scalar is of a fundamentally different kind of data
    (i.e. under a different hierarchy in the data type hierarchy) then
    the array:

    >>> np.find_common_type([np.float32], [np.int64, np.float64])
    dtype('float32')

    Complex is of a different type, so it up-casts the float in the
    `array_types` argument:

    >>> np.find_common_type([np.float32], [np.complex])
    dtype('complex128')

    Type specifier strings are convertible to dtypes and can therefore
    be used instead of dtypes:

    >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
    dtype('complex128')

    """
    array_types = [dtype(x) for x in array_types]
    scalar_types = [dtype(x) for x in scalar_types]

    maxa = _can_coerce_all(array_types)
    maxsc = _can_coerce_all(scalar_types)

    if maxa is None:
        return maxsc

    if maxsc is None:
        return maxa

    try:
        index_a = _kind_list.index(maxa.kind)
        index_sc = _kind_list.index(maxsc.kind)
    except ValueError:
        return None

    if index_sc > index_a:
        return _find_common_coerce(maxsc, maxa)
    else:
        return maxa