Ejemplo n.º 1
0
    def dmap_array_to_bytes(self, array: DmapArray) -> bytes:
        """
        Converts a DmapArray to the byte format.

        Byte format: name, data type, dimension, shape, data

        Parameter
        ---------
        array : DmapArray
            Dmap array to be converted to bytes

        Return
        -------
        array_total_bytes : bytes
            Total bytes of the array
        """
        # need to ensure the array is flattened
        if array.data_type_fmt == 'c' and isinstance(array.value[0], str):
            raise dmap_exceptions.DmapCharError(array.name, self.rec_num)
        elif array.data_type_fmt == 's':
            message = "Error: Trying to read array of strings."\
                " Currently not implemented."\
                " Failed at record {}".format(self.rec_num)
            raise dmap_exceptions.DmapDataError(self.filename, message)

        array_value = array.value.flatten()
        array_name = "{0}\0".format(array.name)
        array_name_format = '{0}s'.format(len(array_name))
        array_name_bytes = struct.pack(array_name_format,
                                       array_name.encode('utf-8'))

        array_type_bytes = struct.pack('c',
                                       chr(array.data_type).encode('utf-8'))

        array_dim_bytes = struct.pack('i', array.dimension)
        array_shape_bytes = bytes()
        if array.dimension > 1:
            array.shape.reverse()
        for size in array.shape:
            array_shape_bytes += struct.pack('i', size)

        array_data_bytes = array_value.tostring()

        array_total_bytes = array_name_bytes + array_type_bytes + \
            array_dim_bytes + array_shape_bytes + array_data_bytes

        return array_total_bytes
Ejemplo n.º 2
0
    def read_scalar(self) -> DmapScalar:
        """
        Reads a scalar and stores the properties into a namedtuple DmapScalar.

        Return
        ------
        DmapScalar: namedtuple
            data structure that contains the data properties of
            the scalar read in

        Raises
        ------
        DmapDataError
            if the data type format is DMap
            NOTE: In RST, this is allowed, if an example shows up where this is
            allowed s raise as an issue in the GitHub so the code can
            be re-accessed.

        See Also
        --------
        check_data_type : for other possible raised exceptions
        read_data : reads the data stored in the byte array
        """

        # String and char have a byte size of 1
        scalar_name = self.read_data('s', 1)
        scalar_type = self.read_data('c', 1)

        self.check_data_type(scalar_type, scalar_name)

        scalar_type_fmt = DMAP_DATA_TYPES[scalar_type][0]
        scalar_fmt_byte = DMAP_DATA_TYPES[scalar_type][1]

        if scalar_type_fmt != DMAP:
            scalar_value = self.read_data(scalar_type_fmt, scalar_fmt_byte)
        else:
            message = "Error: Trying to read DMap data type for a scalar."\
                " Failed at record {}".format(self.rec_num)
            # Not sure when this is used in a
            # so better to raise an error if used re-access the code.
            raise dmap_exceptions.DmapDataError(self.dmap_file, message)

        return DmapScalar(scalar_name, scalar_value,
                          scalar_type, scalar_type_fmt)
Ejemplo n.º 3
0
 def _empty_record_check(self):
     if self.dmap_records == []:
         raise dmap_exceptions.DmapDataError(self.filename,
                                             "DMap record is empty "
                                             "there is nothing to write.")
Ejemplo n.º 4
0
    def read_array(self, record_size) -> DmapArray:
        """
        Reads an array from a DMap record the byte arrays and
        stores the data properties in a DmapArray structure.

        Return
        ------
        DmapArray : namedtuple
             data structure that contains the data properties of
             the array read in.

        Raises
        -------
            DmapDataError
                if the array properties (like a dimension size == 0)
                are incorrect.

        See Also
        --------
        read_data : reads the data in the byte array
        read_numerical_array : reads in a numerical array from the byte array
        read_string_array: reads in a string/DMap array
        """
        array_name = self.read_data('s', 1)
        array_type = self.read_data('c', 1)

        self.check_data_type(array_type, array_name)

        array_type_fmt = DMAP_DATA_TYPES[array_type][0]
        array_fmt_bytes = DMAP_DATA_TYPES[array_type][1]

        array_dimension = self.read_data('i', 4)
        self.bytes_check(array_dimension, "array dimension",
                         record_size, "record size")
        self.zero_negative_check(array_dimension,
                                 "{name} array dimension"
                                 "".format(name=array_name))

        array_shape = [self.read_data('i', 4)
                       for i in range(0, array_dimension)]
        if array_dimension > 1:
            array_shape.reverse()

        # slist is the array that holds the range gates that have valid data
        # when qflg is 1
        if any(x <= 0 for x in array_shape) and array_name != "slist":
            message = "Error: Array shape {shape} contains "\
                "dimension size <= 0."\
                " Failed at record {rec}".format(shape=array_shape,
                                                 rec=self.rec_num)
            raise dmap_exceptions.DmapDataError(self.dmap_file, message)

        for i in range(array_dimension):
            if array_shape[i] >= record_size:
                message = "Error: Array {index}-dimension size {size}"\
                    " exceeds record size: {rec_size}. "\
                    "Failed at record {rec}"\
                    "".format(index=i,
                              size=array_shape[i],
                              rec_size=record_size,
                              rec=self.rec_num)
                raise dmap_exceptions.DmapDataError(self.dmap_file, message)

        # We could use np.prod(array_shape) but the for loop has a better
        # time performance. Note: cells can also be read as number of elements
        # depending on your background.
        total_num_cells = 1
        for i in array_shape:
            total_num_cells *= i
        self.bytes_check(total_num_cells, "total number of cells",
                         record_size, "record size")

        total_num_cells_bytes = total_num_cells * array_fmt_bytes
        self.bytes_check(total_num_cells_bytes,
                         "total number of cells in bytes",
                         record_size, "record size")

        # parsing an array of strings requires a different method. Numpy can't
        # parse strings or dmaps into arrays the way it can for other
        # types because it doesn't
        # know the sizes. They have to be manually read the slow way.
        # Because chars
        # are encoded as hex literals, they have to be read one at a
        # time to make sense.

        if array_type_fmt == 's':
            message = "Error: Trying to read array of strings."\
                " Currently not implemented."\
                " Failed at record {}".format(self.rec_num)
            # Not sure when this is used in a
            # so better to raise an error if used re-access the code.
            raise dmap_exceptions.DmapDataError(self.dmap_file, message)
            # FIXME: Not working
            # array_value = self.read_string_array(array_shape,
            #                                     array_type_fmt,
            #                                     array_fmt_bytes)
        elif array_type == DMAP:
            message = "Trying to read DMap array data type."\
                " Failed at record {}".format(self.rec_num)
            # Not sure when this is used in a
            # so better to raise an error if used re-access the code.
            raise dmap_exceptions.DmapDataError(self.dmap_file, message)
        else:
            array_value = self.read_numerical_array(array_shape,
                                                    array_type_fmt,
                                                    total_num_cells,
                                                    array_fmt_bytes)

        return DmapArray(array_name, array_value, array_type, array_type_fmt,
                         array_dimension, array_shape)
Ejemplo n.º 5
0
    def test_initial_data_integrity(self):
        """
        Quick method for testing the integrity of the dmap data.

        Raises
        ------
        CursorError
            If the cursor is not set to an expected value.
        DmapDataError
            If the data is corrupted by some byte offset.

        See Also
        --------
        zero_check : raises ZeroByteError
        byte_check : raises MistmatchByteError
        """
        pydarn_log.debug("Testing the integrity of the /stream")
        total_block_size = 0  # unit of bytes
        if self.cursor != 0:
            raise dmap_exceptions.CursorError(self.cursor, 0, self.rec_num)

        while self.cursor < self.dmap_end_bytes:
            """
            s headers contain the following:
                - encoding identifier: is a unique 32-bit integer that
                  indicates how the block was constructed.
                  It is used to differentiate between the possible future
                  changes to the DataMap format.
                - block size: 32-bit integer that represents the total size of
                  block including the header and the data.
                - Number of Scalars: number of scalar variables
                - Scalar data: the scalar data of the record.
                  Please see DmapScalar for more information on scalars.
                - Number of arrays: number of array variables
                  Please see DmapArray for more information on arrays.
            """
            # This is an unused variable but is need to move the cursor to the
            # next offset.
            # TODO: Possible check that uses the encoding identifier
            # encoding_identifier = self.read_data('i',4)
            self.cursor += 4
            block_size = self.read_data('i', 4)
            self.zero_negative_check(block_size, "block size")

            total_block_size += block_size
            self.bytes_check(total_block_size, "total block size",
                             self.dmap_end_bytes, "total bytes in file")

            # 2 is to include the encoding_identifier and size of data which
            # are both int types.
            self.cursor = self.cursor + block_size - 2 *\
                DMAP_DATA_TYPES[INT][1]

        if total_block_size != self.dmap_end_bytes:
            message = "Error: Initial integrity check shows"\
                " total block size: {total_size} < end bytes {end_bytes}."\
                " Failed at record {rec}."\
                "".format(total_size=total_block_size,
                          end_bytes=self.dmap_end_bytes,
                          rec=self.rec_num)
            raise dmap_exceptions.DmapDataError(self.data_file, message)
        self.cursor = 0