Пример #1
0
    def data(self, data):
        if 'data' in self.__dict__ and self.__dict__['data'] is not None:
            if self.__dict__['data'] is data:
                return
            else:
                self._data_replaced = True
            was_unsigned = _is_pseudo_unsigned(self.__dict__['data'].dtype)
        else:
            self._data_replaced = True
            was_unsigned = False

        if data is not None and not isinstance(data, np.ndarray):
            # Try to coerce the data into a numpy array--this will work, on
            # some level, for most objects
            try:
                data = np.array(data)
            except Exception:
                raise TypeError('data object {!r} could not be coerced into an '
                                'ndarray'.format(data))

        self.__dict__['data'] = data
        self._modified = True

        if isinstance(data, np.ndarray):
            # Set new values of bitpix, bzero, and bscale now, but wait to
            # revise original values until header is updated.
            self._bitpix = DTYPE2BITPIX[data.dtype.name]
            self._bscale = 1
            self._bzero = 0
            self._blank = None
            self._axes = list(data.shape)
            self._axes.reverse()
        elif self.data is None:
            self._axes = []
        else:
            raise ValueError('not a valid data array')

        # Update the header, including adding BZERO/BSCALE if new data is
        # unsigned. Does not change the values of self._bitpix,
        # self._orig_bitpix, etc.
        self.update_header()
        if (data is not None and was_unsigned):
            self._update_header_scale_info(data.dtype)

        # Keep _orig_bitpix as it was until header update is done, then
        # set it, to allow easier handling of the case of unsigned
        # integer data being converted to something else. Setting these here
        # is needed only for the case do_not_scale_image_data=True when
        # setting the data to unsigned int.

        # If necessary during initialization, i.e. if BSCALE and BZERO were
        # not in the header but the data was unsigned, the attributes below
        # will be update in __init__.
        self._orig_bitpix = self._bitpix
        self._orig_bscale = self._bscale
        self._orig_bzero = self._bzero

        # returning the data signals to lazyproperty that we've already handled
        # setting self.__dict__['data']
        return data
Пример #2
0
    def data(self, data):
        if 'data' in self.__dict__ and self.__dict__['data'] is not None:
            if self.__dict__['data'] is data:
                return
            else:
                self._data_replaced = True
            was_unsigned = _is_pseudo_unsigned(self.__dict__['data'].dtype)
        else:
            self._data_replaced = True
            was_unsigned = False

        if data is not None and not isinstance(data, np.ndarray):
            # Try to coerce the data into a numpy array--this will work, on
            # some level, for most objects
            try:
                data = np.array(data)
            except Exception:
                raise TypeError('data object {!r} could not be coerced into an '
                                'ndarray'.format(data))

        self.__dict__['data'] = data
        self._modified = True

        if isinstance(data, np.ndarray):
            # Set new values of bitpix, bzero, and bscale now, but wait to
            # revise original values until header is updated.
            self._bitpix = DTYPE2BITPIX[data.dtype.name]
            self._bscale = 1
            self._bzero = 0
            self._blank = None
            self._axes = list(data.shape)
            self._axes.reverse()
        elif self.data is None:
            self._axes = []
        else:
            raise ValueError('not a valid data array')

        # Update the header, including adding BZERO/BSCALE if new data is
        # unsigned. Does not change the values of self._bitpix,
        # self._orig_bitpix, etc.
        self.update_header()
        if (data is not None and was_unsigned):
            self._update_header_scale_info(data.dtype)

        # Keep _orig_bitpix as it was until header update is done, then
        # set it, to allow easier handling of the case of unsigned
        # integer data being converted to something else. Setting these here
        # is needed only for the case do_not_scale_image_data=True when
        # setting the data to unsigned int.

        # If necessary during initialization, i.e. if BSCALE and BZERO were
        # not in the header but the data was unsigned, the attributes below
        # will be update in __init__.
        self._orig_bitpix = self._bitpix
        self._orig_bscale = self._bscale
        self._orig_bzero = self._bzero

        # returning the data signals to lazyproperty that we've already handled
        # setting self.__dict__['data']
        return data
Пример #3
0
    def _calculate_datasum(self):
        """
        Calculate the value for the ``DATASUM`` card in the HDU.
        """

        if self._has_data:

            # We have the data to be used.
            d = self.data

            # First handle the special case where the data is unsigned integer
            # 16, 32 or 64
            if _is_pseudo_unsigned(self.data.dtype):
                d = np.array(self.data - _unsigned_zero(self.data.dtype),
                             dtype='i{}'.format(self.data.dtype.itemsize))

            # Check the byte order of the data.  If it is little endian we
            # must swap it before calculating the datasum.
            if d.dtype.str[0] != '>':
                if d.flags.writeable:
                    byteswapped = True
                    d = d.byteswap(True)
                    d.dtype = d.dtype.newbyteorder('>')
                else:
                    # If the data is not writeable, we just make a byteswapped
                    # copy and don't bother changing it back after
                    d = d.byteswap(False)
                    d.dtype = d.dtype.newbyteorder('>')
                    byteswapped = False
            else:
                byteswapped = False

            cs = self._compute_checksum(d.flatten().view(np.uint8))

            # If the data was byteswapped in this method then return it to
            # its original little-endian order.
            if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
                d.byteswap(True)
                d.dtype = d.dtype.newbyteorder('<')

            return cs
        else:
            # This is the case where the data has not been read from the file
            # yet.  We can handle that in a generic manner so we do it in the
            # base class.  The other possibility is that there is no data at
            # all.  This can also be handled in a generic manner.
            return super()._calculate_datasum()
Пример #4
0
    def _calculate_datasum(self):
        """
        Calculate the value for the ``DATASUM`` card in the HDU.
        """

        if self._has_data:

            # We have the data to be used.
            d = self.data

            # First handle the special case where the data is unsigned integer
            # 16, 32 or 64
            if _is_pseudo_unsigned(self.data.dtype):
                d = np.array(self.data - _unsigned_zero(self.data.dtype),
                             dtype=f'i{self.data.dtype.itemsize}')

            # Check the byte order of the data.  If it is little endian we
            # must swap it before calculating the datasum.
            if d.dtype.str[0] != '>':
                if d.flags.writeable:
                    byteswapped = True
                    d = d.byteswap(True)
                    d.dtype = d.dtype.newbyteorder('>')
                else:
                    # If the data is not writeable, we just make a byteswapped
                    # copy and don't bother changing it back after
                    d = d.byteswap(False)
                    d.dtype = d.dtype.newbyteorder('>')
                    byteswapped = False
            else:
                byteswapped = False

            cs = self._compute_checksum(d.flatten().view(np.uint8))

            # If the data was byteswapped in this method then return it to
            # its original little-endian order.
            if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
                d.byteswap(True)
                d.dtype = d.dtype.newbyteorder('<')

            return cs
        else:
            # This is the case where the data has not been read from the file
            # yet.  We can handle that in a generic manner so we do it in the
            # base class.  The other possibility is that there is no data at
            # all.  This can also be handled in a generic manner.
            return super()._calculate_datasum()
Пример #5
0
    def _writedata_internal(self, fileobj):
        """
        Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
        we have to get the data's byte order a different way...

        TODO: Might be nice to store some indication of the data's byte order
        as an attribute or function so that we don't have to do this.
        """

        size = 0

        if self.data is not None:
            self.data._scale_back()

            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<',)
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(
                    self.data - _unsigned_zero(self.data.dtype),
                    dtype='>i{}'.format(self.data.dtype.itemsize))
                should_swap = False
            else:
                output = self.data
                fname = self.data.dtype.names[0]
                byteorder = self.data.dtype.fields[fname][0].str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize
        return size
Пример #6
0
    def _writedata_internal(self, fileobj):
        size = 0

        if self.data is None:
            return size
        elif isinstance(self.data, DaskArray):
            return self._writeinternal_dask(fileobj)
        else:
            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<', )
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(self.data - _unsigned_zero(self.data.dtype),
                                  dtype=f'>i{self.data.dtype.itemsize}')
                should_swap = False
            else:
                output = self.data
                byteorder = output.dtype.str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize

            return size
Пример #7
0
    def _writedata_internal(self, fileobj):
        size = 0

        if self.data is not None:
            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<',)
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(
                    self.data - _unsigned_zero(self.data.dtype),
                    dtype='>i{}'.format(self.data.dtype.itemsize))
                should_swap = False
            else:
                output = self.data
                byteorder = output.dtype.str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize

        return size
Пример #8
0
    def _writeinternal_dask(self, fileobj):

        if sys.byteorder == 'little':
            swap_types = ('<', '=')
        else:
            swap_types = ('<', )
        # deal with unsigned integer 16, 32 and 64 data
        if _is_pseudo_unsigned(self.data.dtype):
            raise NotImplementedError(
                "This dtype isn't currently supported with dask.")
        else:
            output = self.data
            byteorder = output.dtype.str[0]
            should_swap = (byteorder in swap_types)

        if should_swap:
            from dask.utils import M
            # NOTE: the inplace flag to byteswap needs to be False otherwise the array is
            # byteswapped in place every time it is computed and this affects
            # the input dask array.
            output = output.map_blocks(M.byteswap,
                                       False).map_blocks(M.newbyteorder, "S")

        initial_position = fileobj.tell()
        n_bytes = output.nbytes

        # Extend the file n_bytes into the future
        fileobj.seek(initial_position + n_bytes - 1)
        fileobj.write(b'\0')
        fileobj.flush()

        if fileobj.fileobj_mode not in ('rb+', 'wb+', 'ab+'):
            # Use another file handle if the current one is not in
            # read/write mode
            fp = open(fileobj.name, mode='rb+')
            should_close = True
        else:
            fp = fileobj._file
            should_close = False

        try:
            outmmap = mmap.mmap(fp.fileno(),
                                length=initial_position + n_bytes,
                                access=mmap.ACCESS_WRITE)

            outarr = np.ndarray(shape=output.shape,
                                dtype=output.dtype,
                                offset=initial_position,
                                buffer=outmmap)

            output.store(outarr, lock=True, compute=True)
        finally:
            if should_close:
                fp.close()
            outmmap.close()

        # On Windows closing the memmap causes the file pointer to return to 0, so
        # we need to go back to the end of the data (since padding may be written
        # after)
        fileobj.seek(initial_position + n_bytes)

        return n_bytes