Beispiel #1
0
 def read_prologue(self):
     """Read the prologue metadata."""
     with open(self.filename, "rb") as fp_:
         fp_.seek(self.mda['total_header_length'])
         data = np.fromfile(fp_, dtype=hrit_prologue, count=1)
         self.prologue.update(recarray2dict(data))
         try:
             impf = np.fromfile(fp_, dtype=impf_configuration, count=1)[0]
         except IndexError:
             logger.info('No IMPF configuration field found in prologue.')
         else:
             self.prologue.update(recarray2dict(impf))
Beispiel #2
0
    def read_prologue(self):
        """Read the prologue metadata."""

        with open(self.filename, "rb") as fp_:
            fp_.seek(self.mda['total_header_length'])
            data = np.fromfile(fp_, dtype=hrit_prologue, count=1)
            self.prologue.update(recarray2dict(data))
            try:
                impf = np.fromfile(fp_, dtype=impf_configuration, count=1)[0]
            except IndexError:
                logger.info('No IMPF configuration field found in prologue.')
            else:
                self.prologue.update(recarray2dict(impf))
Beispiel #3
0
    def read_epilogue(self):
        """Read the epilogue metadata."""

        with open(self.filename, "rb") as fp_:
            fp_.seek(self.mda['total_header_length'])
            data = np.fromfile(fp_, dtype=hrit_epilogue, count=1)
            self.epilogue.update(recarray2dict(data))
Beispiel #4
0
 def read_prologue(self):
     """Read the prologue metadata."""
     with utils.generic_open(self.filename, mode="rb") as fp_:
         fp_.seek(self.mda['total_header_length'])
         data = np.frombuffer(fp_.read(hrit_prologue.itemsize),
                              dtype=hrit_prologue,
                              count=1)
         self.prologue.update(recarray2dict(data))
         try:
             impf = np.frombuffer(fp_.read(impf_configuration.itemsize),
                                  dtype=impf_configuration,
                                  count=1)[0]
         except ValueError:
             logger.info('No IMPF configuration field found in prologue.')
         else:
             self.prologue.update(recarray2dict(impf))
Beispiel #5
0
    def read_epilogue(self):
        """Read the epilogue metadata."""

        with open(self.filename, "rb") as fp_:
            fp_.seek(self.mda['total_header_length'])
            data = np.fromfile(fp_, dtype=hrit_epilogue, count=1)
            self.epilogue.update(recarray2dict(data))
Beispiel #6
0
 def read_epilogue(self):
     """Read the epilogue metadata."""
     with utils.generic_open(self.filename, mode="rb") as fp_:
         fp_.seek(self.mda['total_header_length'])
         data = np.frombuffer(fp_.read(hrit_epilogue.itemsize),
                              dtype=hrit_epilogue,
                              count=1)
         self.epilogue.update(recarray2dict(data))
Beispiel #7
0
    def _read_trailer(self):

        hdr_size = native_header.itemsize
        data_size = (self._get_data_dtype().itemsize *
                     self.mda['number_of_lines'])

        with open(self.filename) as fp:
            fp.seek(hdr_size + data_size)
            data = np.fromfile(fp, dtype=native_trailer, count=1)

        self.trailer.update(recarray2dict(data))
Beispiel #8
0
    def _read_trailer(self):

        hdr_size = native_header.itemsize
        data_size = (self._get_data_dtype().itemsize *
                     self.mda['number_of_lines'])

        with open(self.filename) as fp:

            fp.seek(hdr_size + data_size)
            data = np.fromfile(fp, dtype=native_trailer, count=1)

        self.trailer.update(recarray2dict(data))
Beispiel #9
0
 def test_mpef_product_header(self):
     """Test function for TestRecarray2Dict and mpef product header."""
     names = ['ImageLocation', 'GsicsCalMode', 'GsicsCalValidity',
              'Padding', 'OffsetToData', 'Padding2']
     mpef_header = np.dtype([(name, mpef_product_header.fields[name][0])
                             for name in names])
     mph_struct = np.array([('OPE', True, False, 'XX', 1000, '12345678')], dtype=mpef_header)
     test_mph = {'ImageLocation': "OPE",
                 'GsicsCalMode': True,
                 'GsicsCalValidity': False,
                 'Padding': 'XX',
                 'OffsetToData': 1000,
                 'Padding2': '12345678'
                 }
     self.assertEqual(recarray2dict(mph_struct), test_mph)
Beispiel #10
0
    def test_fun(self):

        # datatype definition
        pat_dt = np.dtype([('TrueRepeatCycleStart', time_cds_expanded),
                           ('PlanForwardScanEnd', time_cds_expanded),
                           ('PlannedRepeatCycleEnd', time_cds_expanded)])

        # planned acquisition time, add extra dimensions
        # these should be removed by recarray2dict
        pat = np.array([[[((21916, 41409544, 305, 262),
                           (21916, 42160340, 659, 856),
                           (21916, 42309417, 918, 443))]]],
                       dtype=pat_dt)

        expected = {
            'TrueRepeatCycleStart': datetime(2018, 1, 2, 11, 30, 9, 544305),
            'PlanForwardScanEnd': datetime(2018, 1, 2, 11, 42, 40, 340660),
            'PlannedRepeatCycleEnd': datetime(2018, 1, 2, 11, 45, 9, 417918)
        }

        self.assertEqual(recarray2dict(pat), expected)
Beispiel #11
0
    def test_fun(self):

        # datatype definition
        pat_dt = np.dtype([
            ('TrueRepeatCycleStart', time_cds_expanded),
            ('PlanForwardScanEnd', time_cds_expanded),
            ('PlannedRepeatCycleEnd', time_cds_expanded)
        ])

        # planned acquisition time, add extra dimensions
        # these should be removed by recarray2dict
        pat = np.array([[[(
            (21916, 41409544, 305, 262),
            (21916, 42160340, 659, 856),
            (21916, 42309417, 918, 443))]]], dtype=pat_dt)

        expected = {
            'TrueRepeatCycleStart': datetime(2018, 1, 2, 11, 30, 9, 544305),
            'PlanForwardScanEnd': datetime(2018, 1, 2, 11, 42, 40, 340660),
            'PlannedRepeatCycleEnd': datetime(2018, 1, 2, 11, 45, 9, 417918)
        }

        self.assertEqual(recarray2dict(pat), expected)
    def _read_header(self):
        """Read the header info."""
        data = np.fromfile(self.filename,
                           dtype=native_header, count=1)

        self.header.update(recarray2dict(data))

        data15hd = self.header['15_DATA_HEADER']
        sec15hd = self.header['15_SECONDARY_PRODUCT_HEADER']

        # Set the list of available channels:
        self.mda['available_channels'] = get_available_channels(self.header)
        self.mda['channel_list'] = [i for i in CHANNEL_NAMES.values()
                                    if self.mda['available_channels'][i]]

        self.platform_id = data15hd[
            'SatelliteStatus']['SatelliteDefinition']['SatelliteId']
        self.mda['platform_name'] = "Meteosat-" + SATNUM[self.platform_id]

        equator_radius = data15hd['GeometricProcessing'][
            'EarthModel']['EquatorialRadius'] * 1000.
        north_polar_radius = data15hd[
            'GeometricProcessing']['EarthModel']['NorthPolarRadius'] * 1000.
        south_polar_radius = data15hd[
            'GeometricProcessing']['EarthModel']['SouthPolarRadius'] * 1000.
        polar_radius = (north_polar_radius + south_polar_radius) * 0.5
        ssp_lon = data15hd['ImageDescription'][
            'ProjectionDescription']['LongitudeOfSSP']

        self.mda['projection_parameters'] = {'a': equator_radius,
                                             'b': polar_radius,
                                             'h': 35785831.00,
                                             'ssp_longitude': ssp_lon}

        north = int(sec15hd['NorthLineSelectedRectangle']['Value'])
        east = int(sec15hd['EastColumnSelectedRectangle']['Value'])
        south = int(sec15hd['SouthLineSelectedRectangle']['Value'])
        west = int(sec15hd['WestColumnSelectedRectangle']['Value'])

        ncolumns = west - east + 1
        nrows = north - south + 1

        # check if the file has less rows or columns than
        # the maximum, if so it is an area of interest file
        if (nrows < VISIR_NUM_LINES) or (ncolumns < VISIR_NUM_COLUMNS):
            self.mda['is_full_disk'] = False

        # If the number of columns in the file is not divisible by 4,
        # UMARF will add extra columns to the file
        modulo = ncolumns % 4
        padding = 0
        if modulo > 0:
            padding = 4 - modulo
        cols_visir = ncolumns + padding

        # Check the VISIR calculated column dimension against
        # the header information
        cols_visir_hdr = int(sec15hd['NumberColumnsVISIR']['Value'])
        if cols_visir_hdr != cols_visir:
            logger.warning(
                "Number of VISIR columns from the header is incorrect!")
            logger.warning("Header: %d", cols_visir_hdr)
            logger.warning("Calculated: = %d", cols_visir)

        # HRV Channel - check if the area is reduced in east west
        # direction as this affects the number of columns in the file
        cols_hrv_hdr = int(sec15hd['NumberColumnsHRV']['Value'])
        if ncolumns < VISIR_NUM_COLUMNS:
            cols_hrv = cols_hrv_hdr
        else:
            cols_hrv = int(cols_hrv_hdr / 2)

        # self.mda represents the 16bit dimensions not 10bit
        self.mda['number_of_lines'] = int(sec15hd['NumberLinesVISIR']['Value'])
        self.mda['number_of_columns'] = cols_visir
        self.mda['hrv_number_of_lines'] = int(sec15hd["NumberLinesHRV"]['Value'])
        self.mda['hrv_number_of_columns'] = cols_hrv
 def _read_amv_header(self):
     """Read AMV product header."""
     hdr = np.fromfile(self.filename, SegProdHeaders.amvFinal_prd_hdr, 1)
     hdr = hdr.newbyteorder('>')
     return recarray2dict(hdr)
Beispiel #14
0
 def _read_mpef_header(self):
     """Read MPEF header."""
     hdr = np.fromfile(self.filename, mpef_product_header, 1)
     return recarray2dict(hdr)
Beispiel #15
0
    def _read_header(self):
        """Read the header info"""

        data = np.fromfile(self.filename, dtype=native_header, count=1)

        self.header.update(recarray2dict(data))

        data15hd = self.header['15_DATA_HEADER']
        sec15hd = self.header['15_SECONDARY_PRODUCT_HEADER']

        # Set the list of available channels:
        self.available_channels = get_available_channels(self.header)
        self._channel_list = [
            i for i in CHANNEL_NAMES.values() if self.available_channels[i]
        ]

        self.platform_id = data15hd['SatelliteStatus']['SatelliteDefinition'][
            'SatelliteId']
        self.platform_name = "Meteosat-" + SATNUM[self.platform_id]

        equator_radius = data15hd['GeometricProcessing']['EarthModel'][
            'EquatorialRadius'] * 1000.
        north_polar_radius = data15hd['GeometricProcessing']['EarthModel'][
            'NorthPolarRadius'] * 1000.
        south_polar_radius = data15hd['GeometricProcessing']['EarthModel'][
            'SouthPolarRadius'] * 1000.
        polar_radius = (north_polar_radius + south_polar_radius) * 0.5
        ssp_lon = data15hd['ImageDescription']['ProjectionDescription'][
            'LongitudeOfSSP']

        self.mda['projection_parameters'] = {
            'a': equator_radius,
            'b': polar_radius,
            'h': 35785831.00,
            'ssp_longitude': ssp_lon
        }

        west = int(sec15hd['WestColumnSelectedRectangle']['Value'])
        east = int(sec15hd['EastColumnSelectedRectangle']['Value'])
        ncols_hrv_hdr = int(sec15hd['NumberColumnsHRV']['Value'])
        # We suspect the UMARF will pad out any ROI colums that
        # arent divisible by 4 so here we work out how many pixels have
        # been added to the column.
        x = ((west - east + 1) * (10.0 / 8) % 1)
        y = int((1 - x) * 4)

        if y < 4:
            # column has been padded with y pixels
            cols_visir = int((west - east + 1 + y) * 1.25)
        else:
            # no padding has occurred
            cols_visir = int((west - east + 1) * 1.25)

        # HRV Channel - check if an ROI
        if (west - east) < 3711:
            cols_hrv = int(np.ceil(ncols_hrv_hdr * 10.0 / 8))  # 6960
        else:
            cols_hrv = int(np.ceil(5568 * 10.0 / 8))  # 6960

        # self.mda should represent the 16bit dimensions not 10bit
        self.mda['number_of_lines'] = int(sec15hd['NumberLinesVISIR']['Value'])
        self.mda['number_of_columns'] = int(cols_visir / 1.25)
        self.mda['hrv_number_of_lines'] = int(
            sec15hd["NumberLinesHRV"]['Value'])
        self.mda['hrv_number_of_columns'] = int(cols_hrv / 1.25)

        # Check the calculated row,column dimensions against the header information:
        ncols = self.mda['number_of_columns']
        ncols_hdr = int(sec15hd['NumberLinesVISIR']['Value'])

        if ncols != ncols_hdr:
            logger.warning(
                "Number of VISIR columns from header and derived from data are not consistent!"
            )
            logger.warning("Number of columns read from header = %d",
                           ncols_hdr)
            logger.warning("Number of columns calculated from data = %d",
                           ncols)

        ncols_hrv = self.mda['hrv_number_of_columns']

        if ncols_hrv != ncols_hrv_hdr:
            logger.warning(
                "Number of HRV columns from header and derived from data are not consistent!"
            )
            logger.warning("Number of columns read from header = %d",
                           ncols_hrv_hdr)
            logger.warning("Number of columns calculated from data = %d",
                           ncols_hrv)
Beispiel #16
0
    def _read_header(self):
        """Read the header info"""

        data = np.fromfile(self.filename,
                           dtype=native_header, count=1)

        self.header.update(recarray2dict(data))

        data15hd = self.header['15_DATA_HEADER']
        sec15hd = self.header['15_SECONDARY_PRODUCT_HEADER']

        # Set the list of available channels:
        self.available_channels = get_available_channels(self.header)
        self._channel_list = [i for i in CHANNEL_NAMES.values()
                              if self.available_channels[i]]

        self.platform_id = data15hd[
            'SatelliteStatus']['SatelliteDefinition']['SatelliteId']
        self.platform_name = "Meteosat-" + SATNUM[self.platform_id]

        equator_radius = data15hd['GeometricProcessing'][
            'EarthModel']['EquatorialRadius'] * 1000.
        north_polar_radius = data15hd[
            'GeometricProcessing']['EarthModel']['NorthPolarRadius'] * 1000.
        south_polar_radius = data15hd[
            'GeometricProcessing']['EarthModel']['SouthPolarRadius'] * 1000.
        polar_radius = (north_polar_radius + south_polar_radius) * 0.5
        ssp_lon = data15hd['ImageDescription'][
            'ProjectionDescription']['LongitudeOfSSP']

        self.mda['projection_parameters'] = {'a': equator_radius,
                                             'b': polar_radius,
                                             'h': 35785831.00,
                                             'ssp_longitude': ssp_lon}

        west = int(sec15hd['WestColumnSelectedRectangle']['Value'])
        east = int(sec15hd['EastColumnSelectedRectangle']['Value'])
        ncols_hrv_hdr = int(sec15hd['NumberColumnsHRV']['Value'])
        # We suspect the UMARF will pad out any ROI colums that
        # arent divisible by 4 so here we work out how many pixels have
        # been added to the column.
        x = ((west - east + 1) * (10.0 / 8) % 1)
        y = int((1 - x) * 4)

        if y < 4:
            # column has been padded with y pixels
            cols_visir = int((west - east + 1 + y) * 1.25)
        else:
            # no padding has occurred
            cols_visir = int((west - east + 1) * 1.25)

        # HRV Channel - check if an ROI
        if (west - east) < 3711:
            cols_hrv = int(np.ceil(ncols_hrv_hdr * 10.0 / 8))  # 6960
        else:
            cols_hrv = int(np.ceil(5568 * 10.0 / 8))  # 6960

        # self.mda should represent the 16bit dimensions not 10bit
        self.mda['number_of_lines'] = int(sec15hd['NumberLinesVISIR']['Value'])
        self.mda['number_of_columns'] = int(cols_visir / 1.25)
        self.mda['hrv_number_of_lines'] = int(sec15hd["NumberLinesHRV"]['Value'])
        self.mda['hrv_number_of_columns'] = int(cols_hrv / 1.25)

        # Check the calculated row,column dimensions against the header information:
        ncols = self.mda['number_of_columns']
        ncols_hdr = int(sec15hd['NumberLinesVISIR']['Value'])

        if ncols != ncols_hdr:
            logger.warning(
                "Number of VISIR columns from header and derived from data are not consistent!")
            logger.warning("Number of columns read from header = %d", ncols_hdr)
            logger.warning("Number of columns calculated from data = %d", ncols)

        ncols_hrv = self.mda['hrv_number_of_columns']

        if ncols_hrv != ncols_hrv_hdr:
            logger.warning(
                "Number of HRV columns from header and derived from data are not consistent!")
            logger.warning("Number of columns read from header = %d", ncols_hrv_hdr)
            logger.warning("Number of columns calculated from data = %d", ncols_hrv)