Example #1
0
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
    """
    Build a new MaskedArray from the information stored in a pickle.

    """
    _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
    _mask = ndarray.__new__(ndarray, baseshape, 'b1')
    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
Example #2
0
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
    """
    Build a new MaskedArray from the information stored in a pickle.

    """
    _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
    _mask = ndarray.__new__(ndarray, baseshape, 'b1')
    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
Example #3
0
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
    """Internal function that builds a new MaskedArray from the
    information stored in a pickle.

    """
    _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
#    _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
#    return _data
    _mask = ndarray.__new__(ndarray, baseshape, 'b1')
    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
Example #4
0
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
    """Internal function that builds a new MaskedArray from the
    information stored in a pickle.

    """
    _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
#    _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
#    return _data
    _mask = ndarray.__new__(ndarray, baseshape, 'b1')
    return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
Example #5
0
def ones(r, c=None):
    """Matrix filled with ones.

    If r and c are numbers, it returns a (r, c) matrix,
    r and c being converted to integers when needed.

    If r is a (m, n) matrix, it returns an (m, n) matrix.   
    """
    try:
        a = ndarray.__new__(matrix,(r,c))
    except TypeError:
        a = ndarray.__new__(matrix,r.shape)
    a.fill(1)
    return(a)
Example #6
0
    def __new__(cls, camera, frame):
        """
        Convert a phloxar-dc1394 frame into a Frame instance.
        :param camera:
        :param frame:
        :return:
        """
        dtype = ARRAY(c_byte, frame.contents.image_bytes)
        buf = dtype.from_address(frame.contents.image)
        width, height = frame.contents.size
        pixels = width * height
        endian = frame.contents.little_endian and '<' or '>'
        type_str = '%su%i' % (endian, frame.contents.image_bytes / pixels)
        img = ndarray.__new__(cls, shape=(height, width), dtype=type_str, buffer=buf)

        img.frame_id = frame.contents.id
        img.frames_behind = frame.contents.frames_behind
        img.position = frame.contents.position
        img.packet_size = frame.contents.packet_size
        img.packets_per_frame = frame.contents.packet_per_frame
        img.timestamp = frame.contents.timestamp
        img.video_mode = video_modes[frame.contents.video_mode]
        img.data_depth = frame.contents.data_depth
        img.color_coding = color_codings[frame.contents.color_coding]
        img.color_filter = frame.contents.color_filter
        img.yuv_byte_order = frame.contents.yuv_byte_order
        img.stride = frame.contents.stride
        # save camera and frame for enqueue()
        img._frame = frame
        img._cam = camera

        return img
Example #7
0
    def __new__(cls, camera, frame):
        """
        Convert a phloxar-dc1394 frame into a Frame instance.
        :param camera:
        :param frame:
        :return:
        """
        dtype = ARRAY(c_byte, frame.contents.image_bytes)
        buf = dtype.from_address(frame.contents.image)
        width, height = frame.contents.size
        pixels = width * height
        endian = frame.contents.little_endian and '<' or '>'
        type_str = '%su%i' % (endian, frame.contents.image_bytes / pixels)
        img = ndarray.__new__(cls,
                              shape=(height, width),
                              dtype=type_str,
                              buffer=buf)

        img.frame_id = frame.contents.id
        img.frames_behind = frame.contents.frames_behind
        img.position = frame.contents.position
        img.packet_size = frame.contents.packet_size
        img.packets_per_frame = frame.contents.packet_per_frame
        img.timestamp = frame.contents.timestamp
        img.video_mode = video_modes[frame.contents.video_mode]
        img.data_depth = frame.contents.data_depth
        img.color_coding = color_codings[frame.contents.color_coding]
        img.color_filter = frame.contents.color_filter
        img.yuv_byte_order = frame.contents.yuv_byte_order
        img.stride = frame.contents.stride
        # save camera and frame for enqueue()
        img._frame = frame
        img._cam = camera

        return img
Example #8
0
    def __new__(cls, camera, frame):
        """
        Convert a dc1394 frame into an Frame instance.
        """
        dtyp = ARRAY(c_byte, frame.contents.image_bytes)
        buf = dtyp.from_address(frame.contents.image)
        width, height = frame.contents.size
        pixels = width * height
        endianess = frame.contents.little_endian and "<" or ">"
        typ_string = "%su%i" % (endianess, frame.contents.image_bytes / pixels)

        img = ndarray.__new__(cls,
                              shape=(height, width),
                              dtype=typ_string,
                              buffer=buf)

        img.frame_id = frame.contents.id
        img.frames_behind = frame.contents.frames_behind
        img.position = frame.contents.position
        img.packet_size = frame.contents.packet_size
        img.packets_per_frame = frame.contents.packets_per_frame
        img.timestamp = frame.contents.timestamp
        img.video_mode = video_mode_vals[frame.contents.video_mode]
        img.data_depth = frame.contents.data_depth
        img.color_coding = color_coding_vals[frame.contents.color_coding]
        img.color_filter = frame.contents.color_filter
        img.yuv_byte_order = frame.contents.yuv_byte_order
        img.stride = frame.contents.stride
        # save camera and frame for enqueue()
        img._frame = frame
        img._cam = camera
        return img
Example #9
0
    def __new__(cls, labels=[], data=None):
        """labels: list of str
        matrix: None by default,
        if presented, be `np.array` like object
        """
        #if matrix is presented, pass it to the new function
        if data is not None:
            rn, cn = data.shape

            #the row count and col count should equal
            if rn != cn:
                raise ValueError("not square matrix")
            elif rn != len(labels):
                raise ValueError("label size and matrix dimension not match")
            obj = np.asarray(data).view(cls)
        else:
            #else, only init the labels
            obj = ndarray.__new__(cls, (len(labels), len(labels)))

        obj.labels = labels

        #label to index mapping
        obj.label2index_mapping = dict(
            (l, i) for i, l in enumerate(obj.labels))

        #label to index mapping
        obj.index2label_mapping = dict(
            (i, l) for i, l in enumerate(obj.labels))

        return obj
Example #10
0
 def __new__(subtype, data, dtype=None, copy=True):
     arr = array(data, dtype=dtype, copy=copy)
     shape = arr.shape
     ret = ndarray.__new__(subtype, shape, arr.dtype,
                             buffer=arr,
                             order=True)
     return ret
Example #11
0
    def __new__(cls, labels= [], data = None):
        """labels: list of str
        matrix: None by default,
        if presented, be `np.array` like object
        """
        #if matrix is presented, pass it to the new function
        if data is not None:
            rn,cn = data.shape
            
            #the row count and col count should equal
            if rn != cn:
                raise ValueError("not square matrix")
            elif rn != len(labels):
                raise ValueError("label size and matrix dimension not match")
            obj = np.asarray(data).view(cls)
        else:
            #else, only init the labels
            obj = ndarray.__new__(cls, (len(labels), len(labels)))
            
        obj.labels = labels
        
        #label to index mapping
        obj.label2index_mapping = dict((l,i) for i,l in enumerate(obj.labels))
        
        #label to index mapping
        obj.index2label_mapping = dict((i,l) for i,l in enumerate(obj.labels))

        return obj
Example #12
0
    def __new__(cls, camera, frame): 
        """
        Convert a dc1394 frame into an Frame instance.
        """
        dtyp = ARRAY(c_byte, frame.contents.image_bytes)
        buf = dtyp.from_address(frame.contents.image)
        width, height = frame.contents.size
        pixels = width*height
        endianess = frame.contents.little_endian and "<" or ">"
        typ_string = "%su%i" % (endianess,
                frame.contents.image_bytes/pixels)

        img = ndarray.__new__(cls, shape=(height, width),
                dtype=typ_string, buffer=buf)

        img.frame_id = frame.contents.id
        img.frames_behind = frame.contents.frames_behind
        img.position = frame.contents.position
        img.packet_size = frame.contents.packet_size
        img.packets_per_frame = frame.contents.packets_per_frame
        img.timestamp = frame.contents.timestamp
        img.video_mode = video_mode_vals[frame.contents.video_mode]
        img.data_depth = frame.contents.data_depth
        img.color_coding = color_coding_vals[frame.contents.color_coding]
        img.color_filter = frame.contents.color_filter
        img.yuv_byte_order = frame.contents.yuv_byte_order
        img.stride = frame.contents.stride
        # save camera and frame for enqueue()
        img._frame = frame
        img._cam = camera
        return img
Example #13
0
    def _read_var_array(self):
        assert self.fp.read(4) in [ZERO, NC_VARIABLE]

        begin = 0
        dtypes = {"names": [], "formats": []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize = self._read_var()
            if shape and shape[0] is None:
                rec_vars.append(name)
                self.__dict__["_recsize"] += vsize
                if begin == 0:
                    begin = begin_
                dtypes["names"].append(name)
                dtypes["formats"].append(str(shape[1:]) + dtype_)

                # Handle padding with a virtual variable.
                if typecode in "bch":
                    actual_size = reduce(mul, (1,) + shape[1:]) * size
                    padding = -actual_size % 4
                    if padding:
                        dtypes["names"].append("_padding_%d" % var)
                        dtypes["formats"].append("(%d,)>b" % padding)

                # Data will be set later.
                data = None
            else:
                mm = mmap(self.fp.fileno(), begin_ + vsize, access=ACCESS_READ)
                data = ndarray.__new__(ndarray, shape, dtype=dtype_, buffer=mm, offset=begin_, order=0)

            # Add variable.
            self.variables[name] = netcdf_variable(data, typecode, shape, dimensions, attributes)

        if rec_vars:
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes["names"] = dtypes["names"][:1]
                dtypes["formats"] = dtypes["formats"][:1]

            # Build rec array.
            mm = mmap(self.fp.fileno(), begin + self._recs * self._recsize, access=ACCESS_READ)
            rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes, buffer=mm, offset=begin, order=0)

            for var in rec_vars:
                self.variables[var].__dict__["data"] = rec_array[var]
Example #14
0
 def __new__ (cls, cfg = None):
     if not isinstance (cfg, Config): cfg = Config ()
     dtype = [( 'x', cfg.rtype),
              ( 'z', cfg.rtype),
              ('vx', cfg.rtype),
              ('vy', cfg.rtype),
              ('vz', cfg.rtype)]
     obj = ndarray.__new__ (cls, shape = (cfg.npar,), dtype = dtype)
     return obj
Example #15
0
    def __init__(self, fileno, nc_type, vsize, begin, shape, dimensions, attributes, isrec=False, recsize=0):
        self._nc_type = nc_type
        self._vsize = vsize
        self._begin = begin
        self.shape = shape
        self.dimensions = dimensions
        self.attributes = attributes  # for ``dap.plugins.netcdf``
        self.__dict__.update(attributes)
        self._is_record = isrec

        # Number of bytes and type.
        self._bytes = [1, 1, 2, 4, 4, 8][self._nc_type-1]
        type_ = ['i', 'S', 'i', 'i', 'f', 'f'][self._nc_type-1]
        dtype = '>%s%d' % (type_, self._bytes)
        bytes = self._begin + self._vsize

        if isrec:
            # Record variables are not stored contiguosly on disk, so we
            # need to create a separate array for each record.
            #
            # TEO:  This will copy data from the newly-created array
            #  into the __array_data__ region, thus removing any benefit of using
            #  a memory-mapped file.  You might as well just read the data
            #  in directly.
            self.__array_data__ = zeros(shape, dtype)
            bytes += (shape[0] - 1) * recsize
            for n in range(shape[0]):
                offset = self._begin + (n * recsize)
                mm = mmap.mmap(fileno, bytes, access=mmap.ACCESS_READ)
                self.__array_data__[n] = ndarray.__new__(ndarray, shape[1:], dtype=dtype, buffer=mm, offset=offset, order=0)
        else:
            # Create buffer and data.
            mm = mmap.mmap(fileno, bytes, access=mmap.ACCESS_READ)
            self.__array_data__ = ndarray.__new__(ndarray, shape, dtype=dtype, buffer=mm, offset=self._begin, order=0)

        # N-D array interface
        self.__array_interface__ = {'shape'  : shape,
                                    'typestr': dtype,
                                    'data'   : self.__array_data__,
                                    'version': 3,
                                   }
Example #16
0
def RawNumpy(array):
    mmap, address = put_on_heap(array)
    mmap_nd = ndarray.__new__(
        ndarray,
        array.shape,
        dtype=array.dtype,
        buffer=mmap,
        offset=0,
        order='C'
    )
    mmap_nd[:] = array[:]
    assert mmap_nd.ctypes.data == address
    return mmap_nd
Example #17
0
 def __new__(cls, other):
     self = ndarray.__new__(cls, (3,))
     if isinstance(other, ICRS):
         x, y, z = other
         y, z = (
             y * cos(ecliptic_obliquity) + z * sin(ecliptic_obliquity),
             z * cos(ecliptic_obliquity) - y * sin(ecliptic_obliquity),
             )
         self[2] = r = sqrt(x*x + y*y + z*z)
         self[0] = arctan2(-y, -x) + pi
         self[1] = arcsin(z / r)
     else:
         raise ValueError('how do I use that?')
     return self
Example #18
0
def SynchronizedNumpy(array, lock=None):
    mmap, address = put_on_heap(array)
    mmap_nd = ndarray.__new__(
        ndarray,
        array.shape,
        dtype  = array.dtype,
        buffer = mmap,
        offset = 0,
        order  = 'C'
    )
    mmap_nd[:] = array[:]
    assert mmap_nd.ctypes.data == address
    # TODO: agnostic backend
    return SynchronizedArray(mmap_nd, lock=lock)
Example #19
0
    def __new__(cls, rlabels, clabels = None, data = None):
        """
        rlabels: list of hashable obj, like str, for the rows,
        
        clabels: list of hashable obj, like str, for the cols.
        if clabels not presented, it is the same as rlabels
        
        matrix: None by default,
        if presented, be `np.array` like object
        
        labels_synonyms: iother names for attr labels
        """

        #if clabels not presented, then copy rlabels to it
        if clabels is None: 
            clabels = rlabels
        
        #if matrix is presented, pass it to the new function
        if data is not None:

            r_cnt,c_cnt = data.shape
            
            #the row count and col count should equal
            if r_cnt != len(rlabels) or c_cnt != len(clabels):
                raise ValueError("label size and matrix dimension not match ( %dx%d required, %dx%d given)" %(len(rlabels),
                                                                                                              len(clabels),
                                                                                                              r_cnt,
                                                                                                              c_cnt))
            obj = np.asarray(data).view(cls)
        else:
            rc, cc = len(rlabels), len(clabels)
            obj = ndarray.__new__(cls, (rc, cc), buffer = np.zeros( (rc,cc) ))

            
        obj.rlabels = rlabels
        obj.clabels = clabels
            
        #label to index mapping
        obj.rlabel2index_mapping = dict(map(lambda (i,l): (l,i), enumerate(obj.rlabels)))
        obj.clabel2index_mapping = dict(map(lambda (i,l): (l,i), enumerate(obj.clabels)))
        
        return obj
Example #20
0
    def _read_var_array(self):
        header = self.fp.read(4)
        assert header in [ZERO, NC_VARIABLE]

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize = self._read_var(
            )
            if shape and shape[0] is None:
                rec_vars.append(name)
                self.__dict__['_recsize'] += vsize
                if begin == 0: begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + dtype_)

                # Handle padding with a virtual variable.
                if typecode in 'bch':
                    actual_size = reduce(mul, (1, ) + shape[1:]) * size
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                data = None
            else:
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(),
                              begin_ + vsize,
                              access=ACCESS_READ)
                    data = ndarray.__new__(ndarray,
                                           shape,
                                           dtype=dtype_,
                                           buffer=mm,
                                           offset=begin_,
                                           order=0)
                else:
                    pos = self.fp.tell()
                    self.fp.seek(begin_)
                    #data = fromstring(self.fp.read(vsize), dtype=dtype_)
                    data = fromstring(self.fp.read(size * prod(shape)),
                                      dtype=dtype_)
                    data.shape = shape
                    self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(data, typecode, shape,
                                                   dimensions, attributes)

        if rec_vars:
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            if self.use_mmap:
                mm = mmap(self.fp.fileno(),
                          begin + self._recs * self._recsize,
                          access=ACCESS_READ)
                rec_array = ndarray.__new__(ndarray, (self._recs, ),
                                            dtype=dtypes,
                                            buffer=mm,
                                            offset=begin,
                                            order=0)
            else:
                pos = self.fp.tell()
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs *
                                                    self._recsize),
                                       dtype=dtypes)
                rec_array.shape = (self._recs, )
                self.fp.seek(pos)

            for var in rec_vars:
                self.variables[var].__dict__['data'] = rec_array[var]
Example #21
0
def zeros(shape, dtype=None, order='C'):
    """return a matrix initialized to all zeros
    """
    a = ndarray.__new__(matrix, shape, dtype, order=order)
    a.fill(0)
    return a
Example #22
0
def empty(shape, dtype=None, order='C'):
    """return an empty matrix of the given shape
    """
    return ndarray.__new__(matrix, shape, dtype, order=order)
Example #23
0
	def __new__( self, x, y, z , h = 0.0 ):
		""" Initializes the vector. The three required values are the x,y, and 
			z values of the vector. The last optional argument is the homogenous 
			coordinate for quaternion algebra. 
		"""
		return ndarray.__new__( self, (4,), buffer=array([x,y,z,h]) )
Example #24
0
            def __new__(subtype, filename, dtype='uint8', mode='r+', offset=0, shape=None, order='C'):
                # Import here to minimize 'import numpy' overhead
                try:
                    mode = mode_equivalents[mode]
                except KeyError:
                    if mode not in valid_filemodes:
                        raise ValueError("mode must be one of %s" %
                                         (valid_filemodes + list(mode_equivalents.keys())))

                if hasattr(filename, 'read'):
                    fid = filename
                    own_file = False
                else:
                    fid = open(filename, (mode == 'c' and 'r' or mode)+'b')
                    own_file = True

                if (mode == 'w+') and shape is None:
                    raise ValueError("shape must be given")

                fid.seek(0, 2)
                flen = fid.tell()
                descr = dtypedescr(dtype)
                _dbytes = descr.itemsize

                if shape is None:
                    bytes = flen - offset
                    if (bytes % _dbytes):
                        fid.close()
                        raise ValueError("Size of available data is not a "
                                "multiple of the data-type size.")
                    size = bytes // _dbytes
                    shape = (size,)
                else:
                    if not isinstance(shape, tuple):
                        shape = (shape,)
                    size = 1
                    for k in shape:
                        size *= k

                bytes = long(offset + size*_dbytes)

                if mode == 'w+' or (mode == 'r+' and flen < bytes):
                    fid.seek(bytes - 1, 0)
                    fid.write('\0')
                    fid.flush()

                if mode == 'c':
                    acc = mmap.ACCESS_COPY
                elif mode == 'r':
                    acc = mmap.ACCESS_READ
                else:
                    acc = mmap.ACCESS_WRITE

                start = offset - offset % mmap.ALLOCATIONGRANULARITY
                bytes -= start
                offset -= start
                mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)

                self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
                    offset=offset, order=order)
                self._mmap = mm
                self.offset = offset
                self.mode = mode

                if isinstance(filename, basestring):
                    self.filename = os.path.abspath(filename)
                # py3 returns int for TemporaryFile().name
                elif (hasattr(filename, "name") and
                      isinstance(filename.name, basestring)):
                    self.filename = os.path.abspath(filename.name)
                # same as memmap copies (e.g. memmap + 1)
                else:
                    self.filename = None

                if own_file:
                    fid.close()

                return self
Example #25
0
    def _read_var_array(self):
        header = self.fp.read(4)
        assert header in [ZERO, NC_VARIABLE]

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize = self._read_var()
            if shape and shape[0] is None:
                rec_vars.append(name)
                self.__dict__['_recsize'] += vsize
                if begin == 0: begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + dtype_)

                # Handle padding with a virtual variable.
                if typecode in 'bch':
                    actual_size = reduce(mul, (1,) + shape[1:]) * size
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                data = None
            else:
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(), begin_+vsize, access=ACCESS_READ)
                    data = ndarray.__new__(ndarray, shape, dtype=dtype_,
                            buffer=mm, offset=begin_, order=0)
                else:
                    pos = self.fp.tell()
                    self.fp.seek(begin_)
                    #data = fromstring(self.fp.read(vsize), dtype=dtype_)
                    data = fromstring(self.fp.read(size*prod(shape)), dtype=dtype_)
                    data.shape = shape
                    self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(
                    data, typecode, shape, dimensions, attributes)

        if rec_vars:
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            if self.use_mmap:
                mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ)
                rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
                        buffer=mm, offset=begin, order=0)
            else:
                pos = self.fp.tell()
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
                rec_array.shape = (self._recs,)
                self.fp.seek(pos)

            for var in rec_vars:
                self.variables[var].__dict__['data'] = rec_array[var]
Example #26
0
 def __new__ (cls, cfg = None):
     if not isinstance (cfg, Config): cfg = Config ()
     cls.nghost = cfg.nghost
     shape = (cfg.nz + 2*cfg.nghost, cfg.nx + 2*cfg.nghost)
     obj = ndarray.__new__ (cls, shape = shape, dtype = cfg.rtype)
     return obj
Example #27
0
 def __new__(cls,*args,**kwargs):
     return ndarray.__new__(cls,*args,**kwargs)
Example #28
0
 def __new__(self, *args, **kwargs):
     return ndarray.__new__(self, 3)
Example #29
0
    def _read_var_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_VARIABLE]:
            raise ValueError("Unexpected header.")

        if header == ZERO:
            more = self.fp.read(4)
            assert more == ZERO
            return            
        
        records = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            name, dimensions, shape, attributes, type, start, vsize = self._read_var()
            # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
            # Note that vsize is the product of the dimension lengths
            # (omitting the record dimension) and the number of bytes
            # per value (determined from the type), increased to the
            # next multiple of 4, for each variable. If a record
            # variable, this is the amount of space per record. The
            # netCDF "record size" is calculated as the sum of the
            # vsize's of all the record variables.
            #
            # The vsize field is actually redundant, because its value
            # may be computed from other information in the header. The
            # 32-bit vsize field is not large enough to contain the size
            # of variables that require more than 2^32 - 4 bytes, so
            # 2^32 - 1 is used in the vsize field for such variables.
            if shape and shape[0] is None:  # record variable
                rec_vars.append(name)
                # The netCDF "record size" is calculated as the sum of
                # the vsize's of all the record variables.
                self.__dict__['_recsize'] += vsize
                # Store the position where record arrays start.
                if records == 0:
                    records = start
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + '>' + type.char)

                # Handle padding with a virtual variable.
                if type.char in 'bch':
                    actual_size = reduce(mul, (1,) + shape[1:]) * type.itemsize
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                data = None
            else:  # not a record variable
                # Calculate size to avoid problems with vsize (above)
                size = reduce(mul, shape, 1) * type.itemsize
                pos = self.fp.tell()
                if self.use_mmap:
                    if ALLOCATIONGRANULARITY:
                        pages = start // ALLOCATIONGRANULARITY
                        offset = pages * ALLOCATIONGRANULARITY
                        start = start % ALLOCATIONGRANULARITY
                        mm = mmap(self.fp.fileno(), start+size, access=ACCESS_READ, offset=offset)
                    else:
                        mm = mmap(self.fp.fileno(), start+size, access=ACCESS_READ)

                    data = ndarray.__new__(ndarray, shape, dtype=type,
                            buffer=mm, offset=start, order=0)
                else:
                    self.fp.seek(start)
                    data = fromstring(self.fp.read(size), type)
                    data.shape = shape
                self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(
                    data, type, shape, dimensions, attributes,
                    maskandscale=self.maskandscale)

        if rec_vars:
            dtypes['formats'] = [f.replace('()', '').replace(' ', '') for f in dtypes['formats']]
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            pos = self.fp.tell()
            if self.use_mmap:
                if ALLOCATIONGRANULARITY:
                    pages = records // ALLOCATIONGRANULARITY
                    offset = pages * ALLOCATIONGRANULARITY
                    records = records % ALLOCATIONGRANULARITY
                    mm = mmap(self.fp.fileno(), records+self._recs*self._recsize, access=ACCESS_READ, offset=offset)
                else:
                    mm = mmap(self.fp.fileno(), records+self._recs*self._recsize, access=ACCESS_READ)

                rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
                        buffer=mm, offset=records, order=0)
            else:
                self.fp.seek(records)
                rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
                rec_array.shape = (self._recs,)
            self.fp.seek(pos)

            for var in rec_vars:
                self.variables[var].__dict__['data'] = rec_array[var]
Example #30
0
 def __new__(subtype, shape, dtype):
     self = ndarray.__new__(subtype, shape, dtype)
     self.id = 'subtype'
     return self
Example #31
0
def empty(shape, dtype=None, order='C'):
    """return an empty matrix of the given shape
    """
    return ndarray.__new__(matrix, shape, dtype, order=order)
Example #32
0
def zeros(shape, dtype=None, order='C'):
    """return a matrix initialized to all zeros
    """
    a = ndarray.__new__(matrix, shape, dtype, order=order)
    a.fill(0)
    return a
Example #33
0
    def _read_var_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_VARIABLE]:
            raise ValueError("Unexpected header.")

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        rec_vsizes = []
        for var in range(count):
            name, dimensions, shape, attributes, type, begin_, vsize = self._read_var()
            # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
            # Note that vsize is the product of the dimension lengths
            # (omitting the record dimension) and the number of bytes
            # per value (determined from the type), increased to the
            # next multiple of 4, for each variable. If a record
            # variable, this is the amount of space per record. The
            # netCDF "record size" is calculated as the sum of the
            # vsize's of all the record variables.
            #
            # The vsize field is actually redundant, because its value
            # may be computed from other information in the header. The
            # 32-bit vsize field is not large enough to contain the size
            # of variables that require more than 2^32 - 4 bytes, so
            # 2^32 - 1 is used in the vsize field for such variables.
            if shape and shape[0] is None:  # record variable
                rec_vars.append(name)
                # The netCDF "record size" is calculated as the sum of
                # the vsize's of all the record variables.
                self.__dict__['_recsize'] += vsize
                rec_vsizes.append(vsize)
                if begin == 0: begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + '>' + type.char)

                # Handle padding with a virtual variable.
                if type.char in 'bch':
                    actual_size = reduce(mul, (1,) + shape[1:]) * type.itemsize
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                if self.delay:
                    self._begins[name] = begin_
                    data = unmapped_array((self._recs,)+shape[1:], type)
                else:
                    data = None
            else:  # not a record variable
                # Calculate size to avoid problems with vsize (above)
                a_size = reduce(mul, shape, 1) * type.itemsize
                pos = self.fp.tell()
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
                    data = ndarray.__new__(ndarray, shape, dtype=type,
                            buffer=mm, offset=begin_, order=0)
                elif self.delay:
                    self._begins[name] = begin_
                    data = unmapped_array(shape, type)
                else:
                    self.fp.seek(begin_)
                    data = fromstring(self.fp.read(a_size), type)
                    data.shape = shape
                self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(data, type, shape, dimensions, attributes, self.copy)

        if rec_vars and not self.delay:
            dtypes['formats'] = [f.replace('()', '').replace(' ', '') for f in dtypes['formats']]
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            pos = self.fp.tell()
            rec_arrays = []
            if self.use_mmap:
                mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ)
                if self._recsize >= 1<<31:
                    # need to work around limitation of numpy.dtype.itemsize to 32 bit
                    i = 0
                    while i < len(rec_vsizes):
                        ends = np.cumsum(rec_vsizes[i:])
                        n = np.searchsorted(ends, 1<<31)
                        dtype1 = dict(names=dtypes['names'][i:i+n], formats=dtypes['formats'][i:i+n])
                        rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtype1,
                                buffer=mm, offset=begin, order=0)
                        rec_arrays.append(rec_array)
                        begin += ends[n-1]
                        i += n
                else:
                    rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
                            buffer=mm, offset=begin, order=0)
                    rec_arrays = [ rec_array ]
            else:
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
                rec_array.shape = (self._recs,)
                rec_arrays = [ rec_array ]
            self.fp.seek(pos)

            for rec_array in rec_arrays:
                for var in rec_array.dtype.names:
                    self.variables[var].__dict__['data'] = rec_array[var]
Example #34
0
    def _read_var_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_VARIABLE]:
            raise ValueError("Unexpected header.")

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        rec_vsizes = []
        for var in range(count):
            name, dimensions, shape, attributes, type, begin_, vsize = self._read_var(
            )
            # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
            # Note that vsize is the product of the dimension lengths
            # (omitting the record dimension) and the number of bytes
            # per value (determined from the type), increased to the
            # next multiple of 4, for each variable. If a record
            # variable, this is the amount of space per record. The
            # netCDF "record size" is calculated as the sum of the
            # vsize's of all the record variables.
            #
            # The vsize field is actually redundant, because its value
            # may be computed from other information in the header. The
            # 32-bit vsize field is not large enough to contain the size
            # of variables that require more than 2^32 - 4 bytes, so
            # 2^32 - 1 is used in the vsize field for such variables.
            if shape and shape[0] is None:  # record variable
                rec_vars.append(name)
                # The netCDF "record size" is calculated as the sum of
                # the vsize's of all the record variables.
                self.__dict__['_recsize'] += vsize
                rec_vsizes.append(vsize)
                if begin == 0: begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + '>' + type.char)

                # Handle padding with a virtual variable.
                if type.char in 'bch':
                    actual_size = reduce(mul,
                                         (1, ) + shape[1:]) * type.itemsize
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                if self.delay:
                    self._begins[name] = begin_
                    data = unmapped_array((self._recs, ) + shape[1:], type)
                else:
                    data = None
            else:  # not a record variable
                # Calculate size to avoid problems with vsize (above)
                a_size = reduce(mul, shape, 1) * type.itemsize
                pos = self.fp.tell()
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(),
                              begin_ + a_size,
                              access=ACCESS_READ)
                    data = ndarray.__new__(ndarray,
                                           shape,
                                           dtype=type,
                                           buffer=mm,
                                           offset=begin_,
                                           order=0)
                elif self.delay:
                    self._begins[name] = begin_
                    data = unmapped_array(shape, type)
                else:
                    self.fp.seek(begin_)
                    data = fromstring(self.fp.read(a_size), type)
                    data.shape = shape
                self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(data, type, shape,
                                                   dimensions, attributes,
                                                   self.copy)

        if rec_vars and not self.delay:
            dtypes['formats'] = [
                f.replace('()', '').replace(' ', '') for f in dtypes['formats']
            ]
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            pos = self.fp.tell()
            rec_arrays = []
            if self.use_mmap:
                mm = mmap(self.fp.fileno(),
                          begin + self._recs * self._recsize,
                          access=ACCESS_READ)
                if self._recsize >= 1 << 31:
                    # need to work around limitation of numpy.dtype.itemsize to 32 bit
                    i = 0
                    while i < len(rec_vsizes):
                        ends = np.cumsum(rec_vsizes[i:])
                        n = np.searchsorted(ends, 1 << 31)
                        dtype1 = dict(names=dtypes['names'][i:i + n],
                                      formats=dtypes['formats'][i:i + n])
                        rec_array = ndarray.__new__(ndarray, (self._recs, ),
                                                    dtype=dtype1,
                                                    buffer=mm,
                                                    offset=begin,
                                                    order=0)
                        rec_arrays.append(rec_array)
                        begin += ends[n - 1]
                        i += n
                else:
                    rec_array = ndarray.__new__(ndarray, (self._recs, ),
                                                dtype=dtypes,
                                                buffer=mm,
                                                offset=begin,
                                                order=0)
                    rec_arrays = [rec_array]
            else:
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs *
                                                    self._recsize),
                                       dtype=dtypes)
                rec_array.shape = (self._recs, )
                rec_arrays = [rec_array]
            self.fp.seek(pos)

            for rec_array in rec_arrays:
                for var in rec_array.dtype.names:
                    self.variables[var].__dict__['data'] = rec_array[var]
Example #35
0
    def _read_var_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_VARIABLE]:
            raise ValueError("Unexpected header.")

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            (name, dimensions, shape, attributes,
             typecode, size, dtype_, begin_, vsize) = self._read_var()
            # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
            # Note that vsize is the product of the dimension lengths
            # (omitting the record dimension) and the number of bytes
            # per value (determined from the type), increased to the
            # next multiple of 4, for each variable. If a record
            # variable, this is the amount of space per record. The
            # netCDF "record size" is calculated as the sum of the
            # vsize's of all the record variables.
            #
            # The vsize field is actually redundant, because its value
            # may be computed from other information in the header. The
            # 32-bit vsize field is not large enough to contain the size
            # of variables that require more than 2^32 - 4 bytes, so
            # 2^32 - 1 is used in the vsize field for such variables.
            if shape and shape[0] is None: # record variable
                rec_vars.append(name)
                # The netCDF "record size" is calculated as the sum of
                # the vsize's of all the record variables.
                self.__dict__['_recsize'] += vsize
                if begin == 0: begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + dtype_)

                # Handle padding with a virtual variable.
                if typecode in 'bch':
                    actual_size = reduce(mul, (1,) + shape[1:]) * size
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                data = None
            else: # not a record variable
                # Calculate size to avoid problems with vsize (above)
                a_size = reduce(mul, shape, 1) * size
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
                    data = ndarray.__new__(ndarray, shape, dtype=dtype_,
                            buffer=mm, offset=begin_, order=0)
                else:
                    pos = self.fp.tell()
                    self.fp.seek(begin_)
                    data = fromstring(self.fp.read(a_size), dtype=dtype_)
                    data.shape = shape
                    self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(
                    data, typecode, size, shape, dimensions, attributes)
            self.variables[name]._voffset = begin_
            self.variables[name]._dtype = dtype_

        if rec_vars:
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            if self.use_mmap:

                mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ)
                rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
                        buffer=mm, offset=begin, order=0)
            else:
                pos = self.fp.tell()
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
                rec_array.shape = (self._recs,)
                self.fp.seek(pos)

            for var in rec_vars:
                self.variables[var].__dict__['data'] = rec_array[var]
Example #36
0
 def __new__(cls, values):
     this = ndarray.__new__(cls, shape=values.shape, dtype=values.dtype)
     this[...] = values
     return this
Example #37
0
	def __new__(cls, shape, dtype, buffer, gl_buffer, gl_offset, access):
		obj = ndarray.__new__(cls, shape, dtype, buffer)
		obj.gl_buffer = gl_buffer
		obj.offset = gl_offset
		obj.access = access
		return obj
Example #38
0
 def __new__(cls, red, green=None, blue=None, alpha=255):
     return ndarray.__new__(cls, 4, 'float')
Example #39
0
    def _read_var_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_VARIABLE]:
            raise ValueError("Unexpected header.")

        begin = 0
        dtypes = {'names': [], 'formats': []}
        rec_vars = []
        count = self._unpack_int()
        for var in range(count):
            (name, dimensions, shape, attributes, typecode, size, dtype_,
             begin_, vsize) = self._read_var()
            # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
            # Note that vsize is the product of the dimension lengths
            # (omitting the record dimension) and the number of bytes
            # per value (determined from the type), increased to the
            # next multiple of 4, for each variable. If a record
            # variable, this is the amount of space per record. The
            # netCDF "record size" is calculated as the sum of the
            # vsize's of all the record variables.
            #
            # The vsize field is actually redundant, because its value
            # may be computed from other information in the header. The
            # 32-bit vsize field is not large enough to contain the size
            # of variables that require more than 2^32 - 4 bytes, so
            # 2^32 - 1 is used in the vsize field for such variables.
            if shape and shape[0] is None:  # record variable
                rec_vars.append(name)
                # The netCDF "record size" is calculated as the sum of
                # the vsize's of all the record variables.
                self.__dict__['_recsize'] += vsize
                if begin == 0:
                    begin = begin_
                dtypes['names'].append(name)
                dtypes['formats'].append(str(shape[1:]) + dtype_)

                # Handle padding with a virtual variable.
                if typecode in 'bch':
                    actual_size = reduce(mul, (1, ) + shape[1:]) * size
                    padding = -actual_size % 4
                    if padding:
                        dtypes['names'].append('_padding_%d' % var)
                        dtypes['formats'].append('(%d,)>b' % padding)

                # Data will be set later.
                data = None
            else:  # not a record variable
                # Calculate size to avoid problems with vsize (above)
                a_size = reduce(mul, shape, 1) * size
                if self.use_mmap:
                    mm = mmap(self.fp.fileno(),
                              begin_ + a_size,
                              access=ACCESS_READ)
                    data = ndarray.__new__(ndarray,
                                           shape,
                                           dtype=dtype_,
                                           buffer=mm,
                                           offset=begin_,
                                           order=0)
                    self._fds.append(mm)
                else:
                    pos = self.fp.tell()
                    self.fp.seek(begin_)
                    data = fromstring(self.fp.read(a_size), dtype=dtype_)
                    data.shape = shape
                    self.fp.seek(pos)

            # Add variable.
            self.variables[name] = netcdf_variable(data, typecode, size, shape,
                                                   dimensions, attributes)

        if rec_vars:
            # Remove padding when only one record variable.
            if len(rec_vars) == 1:
                dtypes['names'] = dtypes['names'][:1]
                dtypes['formats'] = dtypes['formats'][:1]

            # Build rec array.
            if self.use_mmap:
                mm = mmap(self.fp.fileno(),
                          begin + self._recs * self._recsize,
                          access=ACCESS_READ)
                rec_array = ndarray.__new__(ndarray, (self._recs, ),
                                            dtype=dtypes,
                                            buffer=mm,
                                            offset=begin,
                                            order=0)
                self._fds.append(mm)
            else:
                pos = self.fp.tell()
                self.fp.seek(begin)
                rec_array = fromstring(self.fp.read(self._recs *
                                                    self._recsize),
                                       dtype=dtypes)
                rec_array.shape = (self._recs, )
                self.fp.seek(pos)

            for var in rec_vars:
                self.variables[var].__dict__['data'] = rec_array[var]
Example #40
0
 def __new__(cls, red, green=None, blue=None, alpha=255):
     return ndarray.__new__(cls, 4, 'float')