Ejemplo n.º 1
0
    def field(self, key):
        """
        A view of a `Column`'s data as an array.
        """

        indx = _get_index(self.names, key)
        recformat = self._coldefs._recformats[indx]

        # If field's base is a FITS_rec, we can run into trouble because it
        # contains a reference to the ._coldefs object of the original data;
        # this can lead to a circular reference; see ticket #49
        base = self
        while isinstance(base, FITS_rec) and \
              isinstance(base.base, np.recarray):
            base = base.base
        # base could still be a FITS_rec in some cases, so take care to
        # use rec.recarray.field to avoid a potential infinite
        # recursion
        field = np.recarray.field(base, indx)

        if self._convert[indx] is None:
            # for X format
            if isinstance(recformat, _FormatX):
                _nx = recformat._nx
                dummy = np.zeros(self.shape + (_nx,), dtype=np.bool_)
                _unwrapx(field, dummy, _nx)
                self._convert[indx] = dummy
                return self._convert[indx]

            (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \
                self._get_scale_factors(indx)

            # for P format
            buff = None
            if isinstance(recformat, _FormatP):
                dummy = _VLF([None] * len(self), dtype=recformat.dtype)
                for i in range(len(self)):
                    _offset = field[i, 1] + self._heapoffset

                    if self._file is not None:
                        self._file.seek(_offset)
                        def get_pdata(dtype, count):
                            return _array_from_file(self._file, dtype=dtype,
                                                    count=count, sep='')
                    else:  # There must be a _buffer or something is wrong
                        # Sometimes the buffer is already a Numpy array; in
                        # particular this can occur in compressed HDUs.
                        # Hypothetically other cases as well.
                        if buff is None:
                            buff = self._buffer
                        if not isinstance(buff, np.ndarray):
                            # Go ahead and great a single ndarray from the
                            # buffer if it is not already one; we will then
                            # take slices from it.  This is more efficient than
                            # the previous approach that created separate
                            # arrays for each VLA.
                            buff = np.fromstring(buff, dtype=np.uint8)

                        def get_pdata(dtype, count):
                            dtype = np.dtype(dtype)
                            nbytes = count * dtype.itemsize
                            slc = slice(_offset, _offset + nbytes)
                            return buff[slc].view(dtype=dtype)

                    if recformat.dtype == 'a':
                        count = field[i, 0]
                        dt = recformat.dtype + str(1)
                        da = get_pdata(dt, count)
                        dummy[i] = np.char.array(da, itemsize=count)
                        dummy[i] = decode_ascii(dummy[i])
                    else:
                        count = field[i, 0]
                        dt = recformat.dtype
                        dummy[i] = get_pdata(dt, count)
                        dummy[i].dtype = dummy[i].dtype.newbyteorder('>')

                # scale by TSCAL and TZERO
                if _scale or _zero:
                    for i in range(len(self)):
                        dummy[i][:] = dummy[i] * bscale + bzero

                # Boolean (logical) column
                if recformat.dtype == FITS2NUMPY['L']:
                    for i in range(len(self)):
                        dummy[i] = np.equal(dummy[i], ord('T'))

                self._convert[indx] = dummy
                return self._convert[indx]

            # ASCII table, convert strings to numbers
            if not _str and self._coldefs._tbtype == 'TableHDU':
                _fmap = {'I': np.int32, 'F': np.float32, 'E': np.float32,
                         'D': np.float64}
                _type = _fmap[self._coldefs.formats[indx][0]]

                # if the string = TNULL, return ASCIITNULL
                nullval = self._coldefs.nulls[indx].strip().encode('ascii')
                dummy = field.replace('D'.encode('ascii'),
                                      'E'.encode('ascii'))
                dummy = np.where(dummy.strip() == nullval, str(ASCIITNULL),
                                 dummy)
                dummy = np.array(dummy, dtype=_type)

                self._convert[indx] = dummy
            else:
                dummy = field

            # Test that the dimensions given in dim are sensible; otherwise
            # display a warning and ignore them
            if dim:
                # See if the dimensions already match, if not, make sure the
                # number items will fit in the specified dimensions
                if dummy.ndim > 1:
                    actual_shape = dummy[0].shape
                    if _str:
                        actual_shape = (dummy[0].itemsize,) + actual_shape
                else:
                    actual_shape = len(dummy[0])
                if dim == actual_shape:
                    # The array already has the correct dimensions, so we
                    # ignore dim and don't convert
                    dim = None
                else:
                    nitems = reduce(operator.mul, dim)
                    if _str:
                        actual_nitems = dummy.itemsize
                    else:
                        actual_nitems = dummy.shape[1]
                    if nitems != actual_nitems:
                        warnings.warn(
                        'TDIM%d value %s does not fit with the size of '
                            'the array items (%d).  TDIM%d will be ignored.'
                            % (indx + 1, self._coldefs.dims[indx],
                               actual_nitems, indx + 1))
                        dim = None

            # further conversion for both ASCII and binary tables
            if _number and (_scale or _zero):

                # only do the scaling the first time and store it in _convert
                self._convert[indx] = np.array(dummy, dtype=np.float64)
                if _scale:
                    np.multiply(self._convert[indx], bscale,
                                self._convert[indx])
                if _zero:
                    self._convert[indx] += bzero
            elif _bool:
                self._convert[indx] = np.equal(dummy, ord('T'))
            elif _str:
                try:
                    self._convert[indx] = decode_ascii(dummy)
                except UnicodeDecodeError:
                    pass

            if dim:
                if self._convert[indx] is None:
                    self._convert[indx] = dummy
                if _str:
                    fmt = self._convert[indx].dtype.char
                    dtype = ('|%s%d' % (fmt, dim[-1]), dim[:-1])
                    self._convert[indx].dtype = dtype
                else:
                    self._convert[indx].shape = (dummy.shape[0],) + dim

        if self._convert[indx] is not None:
            return self._convert[indx]
        else:
            return dummy