示例#1
0
文件: streaming.py 项目: coleb/PyFITS
    def write(self, data):
        """
        Write the given data to the stream.

        Parameters
        ----------
        data : ndarray
            Data to stream to the file.

        Returns
        -------
        writecomplete : int
            Flag that when `True` indicates that all of the required
            data has been written to the stream.

        Notes
        -----
        Only the amount of data specified in the header provided to
        the class constructor may be written to the stream.  If the
        provided data would cause the stream to overflow, an `IOError`
        exception is raised and the data is not written.  Once
        sufficient data has been written to the stream to satisfy the
        amount specified in the header, the stream is padded to fill a
        complete FITS block and no more data will be accepted.  An
        attempt to write more data after the stream has been filled
        will raise an `IOError` exception.  If the dtype of the input
        data does not match what is expected by the header, a
        `TypeError` exception is raised.
        """

        curDataSize = self._ffo.tell() - self._data_offset

        if self.writecomplete or curDataSize + data.nbytes > self._size:
            raise IOError('Attempt to write more data to the stream than the '
                          'header specified.')

        if _ImageBaseHDU.NumCode[self._header['BITPIX']] != data.dtype.name:
            raise TypeError('Supplied data does not match the type specified '
                            'in the header.')

        if data.dtype.str[0] != '>':
#
#           byteswap little endian arrays before writing
#
            output = data.byteswap()
        else:
            output = data

        self._ffo.writearray(output)

        if self._ffo.tell() - self._data_offset == self._size:
#
#           the stream is full so pad the data to the next FITS block
#
            self._ffo.write(_pad_length(self._size) * '\0')
            self.writecomplete = True

        self._ffo.flush()

        return self.writecomplete
示例#2
0
    def _wasresized(self, verbose=False):
        """
        Determine if any changes to the HDUList will require a file resize
        when flushing the file.

        Side effect of setting the objects _resize attribute.
        """

        if not self._resize:

            # determine if any of the HDU is resized
            for hdu in self:
                # Header:
                nbytes = len(str(hdu._header))
                if nbytes != (hdu._datLoc - hdu._hdrLoc):
                    self._resize = True
                    self._truncate = False
                    if verbose:
                        print 'One or more header is resized.'
                    break

                # Data:
                if not hdu._data_loaded or hdu.data is None:
                    continue

                nbytes = hdu.size
                nbytes = nbytes + _pad_length(nbytes)
                if nbytes != hdu._datSpan:
                    self._resize = True
                    self._truncate = False
                    if verbose:
                        print 'One or more data area is resized.'
                    break

            if self._truncate:
                try:
                    self.__file.truncate(hdu._datLoc + hdu._datSpan)
                except IOError:
                    self._resize = True
                self._truncate = False

        return self._resize
示例#3
0
    def _calculate_datasum(self, blocking):
        """
        Calculate the value for the ``DATASUM`` card in the HDU.
        """

        if self._data_loaded and self.data is not None:
            # We have the data to be used.
            # We need to pad the data to a block length before calculating
            # the datasum.

            d = np.append(self.data.view(dtype='ubyte'),
                          np.fromstring(_pad_length(self.size) * ' ',
                                        dtype='ubyte'))

            cs = self._compute_checksum(d, blocking=blocking)
            return cs
        else:
            # This is the case where the data has not been read from the file
            # yet.  We can handle that in a generic manner so we do it in the
            # base class.  The other possibility is that there is no data at
            # all.  This can also be handled in a gereric manner.
            return super(TableHDU, self)._calculate_datasum(blocking)