Ejemplo n.º 1
0
    def _writedata_internal(self, fileobj):
        """
        Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
        we have to get the data's byte order a different way...

        TODO: Might be nice to store some indication of the data's byte order
        as an attribute or function so that we don't have to do this.
        """

        size = 0

        if self.data is not None:
            self.data._scale_back()

            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<',)
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(
                    self.data - _unsigned_zero(self.data.dtype),
                    dtype='>i{}'.format(self.data.dtype.itemsize))
                should_swap = False
            else:
                output = self.data
                fname = self.data.dtype.names[0]
                byteorder = self.data.dtype.fields[fname][0].str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize
        return size
Ejemplo n.º 2
0
    def _calculate_datasum(self):
        """
        Calculate the value for the ``DATASUM`` card in the HDU.
        """

        if self._has_data:

            # We have the data to be used.
            d = self.data

            # First handle the special case where the data is unsigned integer
            # 16, 32 or 64
            if _is_pseudo_unsigned(self.data.dtype):
                d = np.array(self.data - _unsigned_zero(self.data.dtype),
                             dtype='i{}'.format(self.data.dtype.itemsize))

            # Check the byte order of the data.  If it is little endian we
            # must swap it before calculating the datasum.
            if d.dtype.str[0] != '>':
                if d.flags.writeable:
                    byteswapped = True
                    d = d.byteswap(True)
                    d.dtype = d.dtype.newbyteorder('>')
                else:
                    # If the data is not writeable, we just make a byteswapped
                    # copy and don't bother changing it back after
                    d = d.byteswap(False)
                    d.dtype = d.dtype.newbyteorder('>')
                    byteswapped = False
            else:
                byteswapped = False

            cs = self._compute_checksum(d.flatten().view(np.uint8))

            # If the data was byteswapped in this method then return it to
            # its original little-endian order.
            if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
                d.byteswap(True)
                d.dtype = d.dtype.newbyteorder('<')

            return cs
        else:
            # This is the case where the data has not been read from the file
            # yet.  We can handle that in a generic manner so we do it in the
            # base class.  The other possibility is that there is no data at
            # all.  This can also be handled in a generic manner.
            return super()._calculate_datasum()
Ejemplo n.º 3
0
    def _calculate_datasum(self):
        """
        Calculate the value for the ``DATASUM`` card in the HDU.
        """

        if self._has_data:

            # We have the data to be used.
            d = self.data

            # First handle the special case where the data is unsigned integer
            # 16, 32 or 64
            if _is_pseudo_unsigned(self.data.dtype):
                d = np.array(self.data - _unsigned_zero(self.data.dtype),
                             dtype=f'i{self.data.dtype.itemsize}')

            # Check the byte order of the data.  If it is little endian we
            # must swap it before calculating the datasum.
            if d.dtype.str[0] != '>':
                if d.flags.writeable:
                    byteswapped = True
                    d = d.byteswap(True)
                    d.dtype = d.dtype.newbyteorder('>')
                else:
                    # If the data is not writeable, we just make a byteswapped
                    # copy and don't bother changing it back after
                    d = d.byteswap(False)
                    d.dtype = d.dtype.newbyteorder('>')
                    byteswapped = False
            else:
                byteswapped = False

            cs = self._compute_checksum(d.flatten().view(np.uint8))

            # If the data was byteswapped in this method then return it to
            # its original little-endian order.
            if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
                d.byteswap(True)
                d.dtype = d.dtype.newbyteorder('<')

            return cs
        else:
            # This is the case where the data has not been read from the file
            # yet.  We can handle that in a generic manner so we do it in the
            # base class.  The other possibility is that there is no data at
            # all.  This can also be handled in a generic manner.
            return super()._calculate_datasum()
Ejemplo n.º 4
0
    def _writedata_internal(self, fileobj):
        size = 0

        if self.data is None:
            return size
        elif isinstance(self.data, DaskArray):
            return self._writeinternal_dask(fileobj)
        else:
            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<', )
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(self.data - _unsigned_zero(self.data.dtype),
                                  dtype=f'>i{self.data.dtype.itemsize}')
                should_swap = False
            else:
                output = self.data
                byteorder = output.dtype.str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize

            return size
Ejemplo n.º 5
0
    def _writedata_internal(self, fileobj):
        size = 0

        if self.data is not None:
            # Based on the system type, determine the byteorders that
            # would need to be swapped to get to big-endian output
            if sys.byteorder == 'little':
                swap_types = ('<', '=')
            else:
                swap_types = ('<',)
            # deal with unsigned integer 16, 32 and 64 data
            if _is_pseudo_unsigned(self.data.dtype):
                # Convert the unsigned array to signed
                output = np.array(
                    self.data - _unsigned_zero(self.data.dtype),
                    dtype='>i{}'.format(self.data.dtype.itemsize))
                should_swap = False
            else:
                output = self.data
                byteorder = output.dtype.str[0]
                should_swap = (byteorder in swap_types)

            if not fileobj.simulateonly:

                if should_swap:
                    if output.flags.writeable:
                        output.byteswap(True)
                        try:
                            fileobj.writearray(output)
                        finally:
                            output.byteswap(True)
                    else:
                        # For read-only arrays, there is no way around making
                        # a byteswapped copy of the data.
                        fileobj.writearray(output.byteswap(False))
                else:
                    fileobj.writearray(output)

            size += output.size * output.itemsize

        return size