예제 #1
0
    def load(self, key, inputcheckpoint, xslice, yslice, scenes_dates,
             dataloc):
        # save dates for debug purposes
        self.scenes_dates = scenes_dates

        # initialise empty matrix of the right size, with nan values (this assumes that xslice.step = 1 or None)
        shape = (xslice.stop - xslice.start, yslice.stop - yslice.start)
        self.values = np.full(shape, np.nan, order='F', dtype='<f8')

        if len(scenes_dates) == 0:
            # hack : if no scenes, we still need to get the latitude to compute
            # theta_sol_midi in albedo_angular_integration.f90
            filename = inputcheckpoint['filename']
            firstdate = robust_date_parse(inputcheckpoint['firstdate'])
            filename = instanciate_datetime(deepcopy(filename), firstdate)
            logging.warn('No data. Using latitude from file ' + filename)
            try:
                with AugmentedNetcdfDataset(filename, 'r') as f:
                    self.values[:, :] = f['latitude'][xslice, yslice]
                    return self
            except FileNotFoundError:
                logging.error(
                    'Apparently there is no input data scenes for this date. There is no BRDF checkpoint file either. The algorithm cannot be initialized with no input data'
                )
                exit_status('UNABLE_TO_CONFIG')
            return

        # loop through all each input scene date
        # note that we loop until one read is successful because we expect the
        # latitude to be the same for each scene date
        # in order to ensure this, we could add a security check (read each
        # date and compare to the latest one).
        for idate, d in enumerate(scenes_dates):
            filename = dataloc[d]['filename']

            # save filename for debug purposes
            self.filenames = {d: filename}
            logging.debug(str(d) + ' ' + filename)

            try:
                # actual reading of the data
                # TODO : honor the missing values and set to np.nan
                with AugmentedNetcdfDataset(filename, 'r') as f:
                    self.values[:, :] = f[key][xslice, yslice]
                    self.show_info(self.name, f[key])
                logging.debug('extract of ' + key + ' data = ' +
                              str(self.values[0, 0]))
                # return as soon as a file has been successfully read
                return self
            except KeyError as e:
                # if anything bad happend when reading the data
                logging.info('Problem reading ' + filename + '/' + 'key' +
                             ' to get the ' + self.name + ' ' + str(e))
                # just log the problem and skip it

        # if no files could be loaded successfully, show an error message
        logging.error('Cannot read files for "' + str(key) +
                      '" : input files location are : ' + str(dataloc))
        return self
예제 #2
0
 def load_brdf(self, filename, model_len, n_channels_ref, xslice, yslice):
     self.filename = filename
     self.infer_params(xslice, yslice)
     logging.info('Reading BRDF and qflag from ' +
                  self.filename)  # + str(date))
     try:
         with AugmentedNetcdfDataset(self.filename, 'r') as f:
             # TODO : honor the missing values and set to np.nan
             self.previous_date = datetime.strptime(f.getncattr('DATE'),
                                                    '%Y%m%d-%M%S')
             self.latitude = DataMatrixFloat(f['latitude'][self.xslice,
                                                           self.yslice])
             self.longitude = DataMatrixFloat(f['longitude'][self.xslice,
                                                             self.yslice])
             self.brdf = DataMatrixFloat(f['K012'][self.xslice,
                                                   self.yslice, :, :])
             self.covariance = DataMatrixFloat(
                 f['CK'][self.xslice, self.yslice, :, :, :])
             self.quality = DataMatrixInteger(f['Z-QFLAG'][self.xslice,
                                                           self.yslice, :])
             self.age_obs = DataMatrixInteger(f['Z-AGE'][self.xslice,
                                                         self.yslice, :])
             self.n_valid_obs = DataMatrixInteger(f['Z-NMOD'][self.xslice,
                                                              self.yslice])
     except Exception as e:
         logging.error('Problem reading brdf file "' + str(self.filename) +
                       '" ' + str(e))
         raise (e)
예제 #3
0
    def load(self,
             filenames,
             key,
             xslice,
             yslice,
             scenes_dates=None,
             dataloc=None):

        shapeK = (xslice.stop - xslice.start, yslice.stop - yslice.start, 4, 3)
        shapeCK = (xslice.stop - xslice.start, yslice.stop - yslice.start, 4,
                   3, 3)

        logging.info('Reading climatic BRDF from ' + filenames)
        try:
            with AugmentedNetcdfDataset(filenames, 'r') as f:
                try:
                    scale = f.getncattr("SCALE_FACTOR")
                except:
                    scale = 1
                if key == 'K012':
                    self.values = np.full(shapeK,
                                          np.nan,
                                          order='F',
                                          dtype='<f8')
                    self.values[:, :, :, :] = scale * f[key][xslice,
                                                             yslice, :, :]
                    self.show_info(self.name, f[key])
                elif (key == 'CK') or (key == 'CKa') or (key == 'CKb') or (
                        key == 'CKab'):
                    self.values = np.full(shapeCK,
                                          np.nan,
                                          order='F',
                                          dtype='<f8')
                    logging.debug(
                        f'actual size of covariance matrix {f[key].shape}')
                    self.values[:, :, :, :, :] = scale * f[key][
                        xslice, yslice, :, :, :]
                    self.show_info(self.name, f[key])
                else:
                    logging.error('Wrong parameter key')

            logging.debug('extraction of climatic brdf ' +
                          str(self.values[0, 0, 0]))
            return self
        except Exception as e:
            logging.error('Problem reading BRDF climato file "' +
                          str(filenames) + '" ' + str(e))
            raise (e)
예제 #4
0
    def infer_params(self, xslice=None, yslice=None):
        """ Infer brdf parameters, size, slices, from the file. If xslice and yslice are given, use them instead of infering the full size of the brdf file """
        try:
            # open the brdf file as a netcdf
            with AugmentedNetcdfDataset(self.filename, 'r') as f:
                # read the model_id from the attributes
                self.model_id = f.getncattr("BRDF_MODEL_ID")
                logging.info(
                    f'Reading brdf config from brdf file : model id = {self.model_id}'
                )

                # use the layer "CK" to get the sizes
                shape = f['CK'].shape
                if xslice is None:
                    # if xslice is unknown, defined it to be the whole range in the file
                    self.xslice = slice(0, shape[0])
                    logging.info(
                        f'Reading brdf config from brdf file : xslice = {self.xslice}'
                    )
                else:
                    # else, use the rovided value
                    self.xslice = xslice
                if yslice is None:
                    # if yslice is unknown, defined it to be the whole range in the file
                    self.yslice = slice(0, shape[1])
                    logging.info(
                        f'Reading brdf config from brdf file : yslice = {self.yslice}'
                    )
                else:
                    # else, use the rovided value
                    self.yslice = yslice

                # read also the number of bands in the brdf file
                self.n_channels_ref = shape[2]
                logging.info(
                    f'Reading brdf config from brdf file : n_channels_ref = {self.n_channels_ref}'
                )

                # read also the model_len from the brdf file
                self.model_len = shape[3]
                logging.info(
                    f'Reading brdf config from brdf file : model_len = {self.model_len}'
                )

        except Exception as e:
            logging.error('Problem reading brdf file "' + str(self.filename) +
                          '" ' + str(e))
            raise (e)
예제 #5
0
    def load(self, key, xslice, yslice, scenes_dates, dataloc):
        # save dates for debug purposes
        self.scenes_dates = scenes_dates

        # initialise empty matrix of the right size, with nan values (this assumes that xslice.step = 1 or None)
        shape = (xslice.stop - xslice.start, yslice.stop - yslice.start,
                 max(1, len(self.scenes_dates)))
        self.values = np.full(shape, np.nan, order='F', dtype='<f4')

        # loop through all each input scene date

        for idate, d in enumerate(scenes_dates):
            filename = dataloc[d]['filename']

            # save filename for debug purposes
            self.filenames[d] = filename
            logging.debug(str(d) + ' ' + filename)

            try:
                # actual reading of the data
                with AugmentedNetcdfDataset(filename, 'r') as f:
                    scale, offset = f[key].getncattr(
                        "SCALE"), f[key].getncattr("OFFSET")
                    # first dimension is the date, there is only one date per file. Therefore we have a 0 here
                    data = f[key][0, xslice, yslice]

                    # read the missing value mask
                    missing_data_mask = (data == f[key].getncattr("NO_DATA"))
                    # apply offset and scale
                    data = ((data / scale) - offset) * 100.  # we want %

                    # apply the missing value mask
                    data[missing_data_mask] = np.nan

                    # put the data in the right place in the full matrix
                    # Note : we are using the intermediate variable "data" because it may be slower to work directly with the full matrix if the data for one date is not contigous.
                    self.values[:, :, idate] = data

                    self.show_info(self.name, f[key])
            except Exception as e:
                # if anything bad happenned when reading the data
                logging.error('Problem reading ' + filename + '/' + 'key' +
                              ' to get the ' + self.name + ' ' + str(e))
                # just log the problem and skip the date

        logging.debug('extract of ' + key + ' data = ' +
                      str(self.values[0, 0, :]))
        return self
예제 #6
0
 def write_brdf(self, data, key, typ):
     """ Will write the numpy array "data", in the file defined above in "self.brdf_file", on the data layer "key".
         "typ" should be a known identifier, as the data will be processed differently according its value.
     """
     logging.debug('Writing ' + key + ':' + typ + ' to ' + self.brdf_file)
     try:
         ensure_dir(self.brdf_file)
         try:
             f = AugmentedNetcdfDataset(self.brdf_file,
                                        'a',
                                        format='NETCDF4')
         except OSError:
             f = AugmentedNetcdfDataset(self.brdf_file,
                                        'w',
                                        format='NETCDF4')
             f.createDimensionIfNotExists('X', self.xoutputsize)
             f.createDimensionIfNotExists('Y', self.youtputsize)
             f.createDimensionIfNotExists('NBAND', self.n_channels_ref)
             f.createDimensionIfNotExists('KERNEL_INDEX', self.model_len)
             f.createDimensionIfNotExists('KERNEL_INDEX2', self.model_len)
             self._set_date_and_version(f, self.date, __version__,
                                        self.model_id)
         if typ == 'brdf':
             outvar = f.createVariableIfNotExists(
                 key,
                 data.dtype, ('X', 'Y', 'NBAND', 'KERNEL_INDEX'),
                 zlib=True,
                 complevel=5,
                 fletcher32=True)
         elif typ == 'covariance':
             outvar = f.createVariableIfNotExists(
                 key,
                 data.dtype,
                 ('X', 'Y', 'NBAND', 'KERNEL_INDEX', 'KERNEL_INDEX2'),
                 zlib=True,
                 complevel=5,
                 fletcher32=True)
         elif typ == 'quality':
             outvar = f.createVariableIfNotExists(key,
                                                  data.dtype,
                                                  ('X', 'Y', 'NBAND'),
                                                  zlib=True,
                                                  complevel=5,
                                                  fletcher32=True)
         elif typ == 'age':
             outvar = f.createVariableIfNotExists(key,
                                                  data.dtype,
                                                  ('X', 'Y', 'NBAND'),
                                                  zlib=True,
                                                  complevel=5,
                                                  fletcher32=True)
         elif typ == 'latitude' or typ == 'longitude':
             outvar = f.createVariableIfNotExists(key,
                                                  data.dtype, ('X', 'Y'),
                                                  zlib=True,
                                                  complevel=5,
                                                  fletcher32=True)
         elif typ == 'n_valid_obs':
             data = data.astype('int8')
             outvar = f.createVariableIfNotExists(
                 key,
                 data.dtype, ('X', 'Y'),
                 zlib=True,
                 complevel=5,
                 fletcher32=True,
                 attributes={
                     'units': '',
                     'long_name': 'NMOD for {key}'.format(key=key)
                 })
         else:
             raise Exception('Unknown type of data to write : typ = ' +
                             str(typ))
         # here is the actual writing command
         outvar[self.xslice, self.yslice, ...] = data[...]
         f.close()
     except Exception as e:
         print(e)
         logging.error('Problem writing ' + key + ' on ' + self.brdf_file +
                       ' : ' + str(e))
         raise (e)
예제 #7
0
    def write_albedo(self, data, key, typ):
        """ Will write the numpy array "data", in the file defined above in "self.albedo_file", on the data layer "key".
            "typ" should be a known identifier, as the data will be processed differently according its value.
        """
        logging.debug('Writing ' + key + ' to ' +
                      self.albedo_file)  # + str(self.date))
        try:
            ensure_dir(self.albedo_file)
            try:
                f = AugmentedNetcdfDataset(self.albedo_file,
                                           'a',
                                           format='NETCDF4')
            except OSError:
                f = AugmentedNetcdfDataset(self.albedo_file,
                                           'w',
                                           format='NETCDF4')
                f.createDimensionIfNotExists('longitude', self.xoutputsize)
                f.createDimensionIfNotExists('latitude', self.youtputsize)
                f.createDimensionIfNotExists('NBAND', self.n_channels_ref)
                self._set_date_and_version(f, self.date, __version__,
                                           self.model_id)
                f.setncattr('institution', 'VITO')
            if typ == 'albedo':
                #print(f'--------------')
                #print(f'DATA {self.date} :{key}: {data[0, 0]}')
                scale_factor = 1. / 10000
                missing_value = -32767
                dtype = np.int16
                outvar = f.createVariableIfNotExists(
                    key,
                    dtype, ('latitude', 'longitude'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'offset': 0.,
                        'scale_factor': scale_factor,
                        'long_name': 'Albedo {key}'.format(key=key)
                    })
                missing = np.isnan(data)
                #######with numpy.warning.filterwarnings(divide='ignore'):
                #######        numpy.float64(1.0) / 0.0
                data[data < -3.0] = -3.0
                data[data > 3.0] = 3.0
                #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}')
                data = data / scale_factor
                data = data.astype(dtype)
                data[missing] = missing_value
                #print(f'DATA {self.date} :{key}: {data[0, 0]}')
                outvar[self.xslice, self.yslice] = data[:, :]
                #outvar[self.xslice, self.yslice] = 7
                ##f.close()

                ##f = AugmentedNetcdfDataset(self.albedo_file,'r', format='NETCDF4')
                ##var = f[key]
                ##var.set_auto_maskandscale(False)
                ###print(f'HERE {self.date} :{key}: {var[self.xslice, self.yslice][0,0]}')
                ##f.close()

                ##f = AugmentedNetcdfDataset(self.albedo_file,'r', format='NETCDF4')
                ##print(f'autoscale {self.date} :{key}: {f[key][self.xslice, self.yslice][0,0]}')
            elif typ == 'albedo_cov':
                scale_factor = 1. / 10000
                missing_value = -32767
                dtype = np.int16
                outvar = f.createVariableIfNotExists(
                    key,
                    dtype, ('latitude', 'longitude'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'offset': 0.,
                        'scale_factor': scale_factor,
                        'long_name': 'Albedo cov {key}'.format(key=key)
                    })
                missing = np.isnan(data)
                data[data < -3.0] = -3.0
                data[data > 3.0] = 3.0
                data = data / scale_factor
                data = data.astype(dtype)
                data[missing] = missing_value
                outvar[self.xslice, self.yslice] = data[:, :]
            elif typ == 'age':
                data = data.astype('int8')
                outvar = f.createVariableIfNotExists(
                    key,
                    data.dtype, ('latitude', 'longitude'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': 'days',
                        'long_name': 'Age {key}'.format(key=key)
                    })
                outvar[self.xslice, self.yslice] = data[:, :]
            elif typ == 'n_valid_obs':
                data = data.astype('int8')
                outvar = f.createVariableIfNotExists(
                    key,
                    data.dtype, ('latitude', 'longitude'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'long_name': 'NMOD for {key}'.format(key=key)
                    })
                outvar[self.xslice, self.yslice] = data[:, :]
            elif typ == 'quality':
                data = data.astype('uint8')
                outvar = f.createVariableIfNotExists(
                    key,
                    data.dtype, ('latitude', 'longitude'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'long_name': 'Quality flag {key}'.format(key=key)
                    })
                outvar[self.xslice, self.yslice] = data[:, :]
            elif typ == 'latitude':
                outvar = f.createVariableIfNotExists(key,
                                                     data.dtype, ('latitude'),
                                                     complevel=5,
                                                     fletcher32=True,
                                                     zlib=True,
                                                     attributes={
                                                         'units': 'degrees',
                                                         'title': 'latitude',
                                                         'long_name':
                                                         'latitude'
                                                     })
                outvar[
                    self.
                    xslice] = data[:,
                                   0]  # as per VITO's request, take only first column]
            elif typ == 'longitude':
                outvar = f.createVariableIfNotExists(key,
                                                     data.dtype, ('longitude'),
                                                     complevel=5,
                                                     fletcher32=True,
                                                     zlib=True,
                                                     attributes={
                                                         'units': 'degrees',
                                                         'title': 'longitude',
                                                         'long_name':
                                                         'longitude'
                                                     })
                outvar[self.yslice] = data[
                    0, :]  # as peer VITO's request, take only first row
            else:
                raise Exception('Unknown type of data to write : typ = ' +
                                str(typ))
            f.close()
        except Exception as e:
            logging.error('Problem writing ' + key + ' : ' + str(e))
            raise Exception()
예제 #8
0
    def _write_global_attributes(self, date, filename, options=None):
        """ Write hdf attributes that are global to the output hdf file """
        if options is None:
            options = {}
        common_attributes = {
            'image_reference_time': date.strftime('%Y%m%d%H%M%S'),
            'time_coverage_start': '20210801000000',
            'date_created': datetime.now().strftime('%Y%m%d%H%M%S'),
            'SAF': 'LSA',
            'CENTRE': 'IM-PT',
            'name': 'MTDAL',
            'archive_facility':
            'IPMA',  #'PARENT_PRODUCT_NAME': ['BRF', 'SAA/SZA', 'VAA/VZA', '-'],
            'SPECTRAL_CHANNEL_ID': '1 2 4 for MTG',
            'algorithm_version': '1.0.0',
            'base_algorithm_version': '1.0.0',  # should come from pyal1 code
            'product_version': '1.0.0',
            'cloud_coverage': '-',
            'OVERALL_QUALITY_FLAG': 'OK',
            'ASSOCIATED_QUALITY_INFORMATION': '-',
            'REGION_NAME': 'MTG-Disk',
            'COMPRESSION': 0,
            'FIELD_TYPE': 'Product',
            'FORECAST_STEP': 0,
            'NC': self.youtputsize,
            'NL': self.xoutputsize,
            'NB_PARAMETERS': 5,
            'platform': 'MTG1',  # should come from pyal1 code
            'sensor': 'FCI',
            'INSTRUMENT_MODE': 'STATIC_VIEW',
            'orbit_type': 'GEO',
            'PROJECTION_NAME': 'GEOS(+000.0)',
            'START_ORBIT_NUMBER': 0,
            'END_ORBIT_NUMBER': 0,
            'SUB_SATELLITE_POINT_START_LAT': 0.0,
            'SUB_SATELLITE_POINT_START_LON': 0.0,
            'SUB_SATELLITE_POINT_END_LAT': 0.0,
            'SUB_SATELLITE_POINT_END_LON': 0.0,
            'PIXEL_SIZE': '3.1km',
            'contacts': '*****@*****.**',
            'grid_mapping': 'geostationary',
            'GRANULE_TYPE': 'DP',
            'processing_level': '03',
            'PRODUCT_ACTUAL_SIZE': ' 110231552',
            'processing_mode': 'N',
            'disposition_mode': 'I',
            'DISPOSITION_FLAG': 'O',
            'product_frequency': 'daily',
            'STATISTIC_TYPE': 'recursive, timescale: 5days'
        }
        # the options dict overwrites the default values
        for k, v in options.items():
            common_attributes[k] = v

        f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4')

        for k, v in common_attributes.items():
            v = _format_recursively(v)
            f.setncattr(k, v)

        f.close()
예제 #9
0
    def write_albedo_after_spectral_integration(self,
                                                alldata,
                                                typ,
                                                filename,
                                                missing_val=-1,
                                                scale=10000.,
                                                offset=0.):
        dtype = '<i2'  # beware there is rounding of the value below
        ensure_dir(filename)
        try:
            f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4')
        except OSError:
            f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4')
            f.createDimensionIfNotExists('longitude', self.xoutputsize)
            f.createDimensionIfNotExists('latitude', self.youtputsize)
            f.createDimensionIfNotExists('time', 1)

        for iout, outname in enumerate(self.outalbedos_names):
            for iin, inname in enumerate(self.inalbedos_names):

                fullname = outname + '-' + inname

                if typ == 'albedo':
                    outkey = 'AL-' + fullname
                    scale_factor = 1. / 10000
                    missing_value = np.int16(-32767)
                    dtype = np.int16
                    outvar = f.createVariableIfNotExists(
                        outkey,
                        dtype, ('time', 'latitude', 'longitude'),
                        complevel=5,
                        fletcher32=True,
                        zlib=True,
                        attributes={
                            'units': '',
                            'offset': 0.,
                            'scale_factor': scale_factor,
                            '_FillValue': missing_value,
                            'long_name': 'Albedo {key}'.format(key=outkey)
                        })

                    data = np.array([alldata[:, :, iin, iout]])
                    missing = np.isnan(data)
                    #######with numpy.warning.filterwarnings(divide='ignore'):
                    #######        numpy.float64(1.0) / 0.0
                    data[data < -3.0] = -3.0
                    data[data > 3.0] = 3.0
                    #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}')
                    data = data / scale_factor
                    data = data.astype(dtype)
                    data[missing] = missing_value
                    #print(f'DATA {self.date} :{key}: {data[0, 0]}')
                    outvar[:, self.xslice, self.yslice] = data[:, :, :]
                elif typ == 'albedo-err':
                    outkey = 'AL-' + fullname + '-ERR'
                    scale_factor = 1. / 10000
                    missing_value = np.int16(-32767)
                    dtype = np.int16
                    outvar = f.createVariableIfNotExists(
                        outkey,
                        dtype, ('time', 'latitude', 'longitude'),
                        complevel=5,
                        fletcher32=True,
                        zlib=True,
                        attributes={
                            'units': '',
                            'offset': 0.,
                            'scale_factor': scale_factor,
                            '_FillValue': missing_value,
                            'long_name': 'Albedo cov {key}'.format(key=outkey)
                        })

                    data = np.array([alldata[:, :, iin, iout]])
                    missing = np.isnan(data)
                    data[data < -3.0] = -3.0
                    data[data > 3.0] = 3.0
                    data = data / scale_factor
                    data = data.astype(dtype)
                    data[missing] = missing_value
                    outvar[:, self.xslice, self.yslice] = data[:, :, :]
                if outkey == 'AL-NI-BH' or outkey == 'AL-NI-BH-ERR' or outkey == 'AL-VI-BH' or outkey == 'AL-VI-BH-ERR':
                    continue
                data = alldata[:, :, iin, iout]
                data_int = ((data * scale) + offset).round().astype(dtype)

                data_int[np.isnan(data)] = missing_val
                #~ dataset[self.xslice,self.yslice] = data_int
        f.close()
예제 #10
0
    def write_albedo_per_band(self,
                              alldata,
                              typ,
                              filename,
                              ichannel,
                              missing=-1,
                              scale=10000.,
                              offset=0.):
        dtype = '<i2'  # beware there is rounding of the value below
        logging.debug('Writing ' + filename + ' for albedo err channel ' +
                      str(ichannel + 1))

        ensure_dir(filename)
        try:
            f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4')
        except OSError:
            f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4')
            f.createDimensionIfNotExists('x', self.xoutputsize)
            f.createDimensionIfNotExists('y', self.youtputsize)
            f.createDimensionIfNotExists('time', 1)
            self._set_date_and_version(f, self.date, __version__,
                                       self.model_id)
            f.setncattr('institution', 'IPMA')

        for j, bhdh in enumerate(self.inalbedos_names):
            if typ == 'albedo':
                outkey = 'AL-SP-' + bhdh
                scale_factor = 1. / 10000
                missing_value = np.int16(-32768)
                dtype = np.int16

                outvar = f.createVariableIfNotExists(
                    outkey,
                    dtype, ('time', 'y', 'x'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'offset': 0.,
                        '_FillValue': missing_value,
                        'scale_factor': scale_factor,
                        'long_name': 'Albedo {key}'.format(key=outkey)
                    })
                data = np.array([alldata[:, :, ichannel, j]])  # pas beau

                missing = np.isnan(data)
                #######with numpy.warning.filterwarnings(divide='ignore'):
                #######        numpy.float64(1.0) / 0.0
                data[data < -3.0] = -3.0
                data[data > 3.0] = 3.0
                #print(f'{self.date} :{key}: m, s, l: {missing[0, 0]}')
                data = data / scale_factor
                data = data.astype(dtype)
                data[missing] = missing_value
                #print(f'DATA {self.date} :{key}: {data[0, 0]}')
                outvar[0, self.xslice, self.yslice] = data[:, :, :]
            elif typ == 'albedo-err':
                outkey = 'AL-SP-' + bhdh + '-ERR'
                scale_factor = 1. / 10000
                missing_value = np.int16(-32768)
                dtype = np.int16

                outvar = f.createVariableIfNotExists(
                    outkey,
                    dtype, ('time', 'y', 'x'),
                    complevel=5,
                    fletcher32=True,
                    zlib=True,
                    attributes={
                        'units': '',
                        'offset': 0.,
                        'scale_factor': scale_factor,
                        '_FillValue': missing_value,
                        'long_name': 'Albedo cov {key}'.format(key=outkey)
                    })
                data = np.array([alldata[:, :, ichannel, j]])
                missing = np.isnan(data)
                data[data < -3.0] = -3.0
                data[data > 3.0] = 3.0
                data = data / scale_factor
                data = data.astype(dtype)
                data[missing] = missing_value
                outvar[0, self.xslice, self.yslice] = data[:, :, :]

            #~ data = alldata[:,:,ichannel,j]
            #~ data_int = ((data * scale) + offset).round().astype(dtype)
            #~ data_int[np.isnan(data)] = missing
            #~ outvar[self.xslice,self.yslice] = data_int
        f.close()
예제 #11
0
    def write_brdf_covariance(self,
                              alldata,
                              configkey,
                              scale=10000.,
                              offset=0.,
                              missing=-32768):
        """ Write covariance in hdf file for the K012 kernel coefficients of the brdf model """
        for ichannel in range(0, self.n_channels_ref):
            for iparam in range(0, self.model_len):
                for jparam in range(iparam, self.model_len):
                    filename = self.config[configkey][f'band{ichannel+1}'][
                        'cov']
                    ensure_dir(filename)
                    outkey = f'C{iparam}{jparam}'
                    logging.debug('Writing ' + filename + ' for ' +
                                  str(outkey))

                    try:
                        f = AugmentedNetcdfDataset(filename,
                                                   'a',
                                                   format='NETCDF4')
                    except OSError:
                        f = AugmentedNetcdfDataset(filename,
                                                   'w',
                                                   format='NETCDF4')
                        f.createDimensionIfNotExists('longitude',
                                                     self.xoutputsize)
                        f.createDimensionIfNotExists('latitude',
                                                     self.youtputsize)
                        f.createDimensionIfNotExists('NBAND',
                                                     self.n_channels_ref)
                        f.createDimensionIfNotExists('KERNEL_INDEX',
                                                     self.model_len)
                        f.createDimensionIfNotExists('KERNEL_INDEX2',
                                                     self.model_len)
                        self._set_date_and_version(f, self.date, __version__,
                                                   self.model_id)

                    data = alldata[:, :, ichannel, iparam, jparam]
                    data_int = ((data * scale) + offset).round().astype(
                        self.dtype)
                    data_int[np.isnan(data)] = missing
                    missing = np.int16(missing)
                    #~ dataset[self.xslice,self.yslice] = data_int
                    outvar = f.createVariableIfNotExists(
                        outkey,
                        data_int.dtype, ('latitude', 'longitude'),
                        zlib=True,
                        complevel=5,
                        fletcher32=True,
                        attributes={
                            'units': '',
                            'offset': offset,
                            'scale_factor': scale,
                            '_FillValue': missing,
                            'long_name':
                            'BRDF covariance {key}'.format(key=outkey)
                        })

                    outvar = data_int
                    f.close()
예제 #12
0
    def write_qflag(self, data, outkey='QFLAGS', dtype='uint8', filename=None):
        logging.debug('Writing ' + filename + ' for ' + str(outkey))
        try:
            f = AugmentedNetcdfDataset(filename, 'a', format='NETCDF4')
            f.createDimensionIfNotExists('x', self.xoutputsize)
            f.createDimensionIfNotExists('y', self.youtputsize)
            f.createDimensionIfNotExists('NBAND', self.n_channels_ref)
        except OSError:
            f = AugmentedNetcdfDataset(filename, 'w', format='NETCDF4')
            f.createDimensionIfNotExists('x', self.xoutputsize)
            f.createDimensionIfNotExists('y', self.youtputsize)
            f.createDimensionIfNotExists('NBAND', self.n_channels_ref)
            self._set_date_and_version(f, self.date, __version__,
                                       self.model_id)
            f.setncattr('institution', 'IPMA')

        outvar = f.createVariableIfNotExists(outkey,
                                             data.dtype, ('y', 'x'),
                                             zlib=True,
                                             complevel=5,
                                             fletcher32=True)
        data = data.astype(dtype)
        outvar[self.xslice, self.yslice] = data
        f.close()
예제 #13
0
 def write_tocr(self, data, xslice, yslice, date, key, typ):
     logging.info('Writing ' + typ + ' to ' + self.outfilename)
     try:
         ensure_dir(self.outfilename)
         try:
             f = AugmentedNetcdfDataset(self.outfilename,'a', format='NETCDF4')
         except OSError:
             f = AugmentedNetcdfDataset(self.outfilename,'w', format='NETCDF4')
             f.createDimensionIfNotExists('latitude', self.xoutputsize)
             f.createDimensionIfNotExists('longitude', self.youtputsize)
             f.createDimensionIfNotExists('NBAND', self.n_channels_ref)
             self._set_date_and_version(f, date, __version__, self.model_id)
         if typ == 'TOC-R' or typ == 'TOC-R-ERR':
             scale_factor = 1./10000.0
             missing_value = -32767
             outvar = f.createVariableIfNotExists(key, 'int16', ('latitude','longitude','NBAND'), zlib=True, complevel=5, fletcher32=True,
                     attributes = {'units': '', 'offset':0., 'scale_factor':scale_factor } )
             missing = np.isnan(data)
             data[data < 0] = 0.0
             data[data > 1.01] = 1.0
             data = data / scale_factor
             data = data.astype(np.int16)
             data[missing] = missing_value
             outvar[xslice, yslice,:] = data[:,:,:]
         elif typ=='Z-QFLAG':
             outvar=f.createVariableIfNotExists(key, 'uint8', ('latitude','longitude','NBAND'), zlib=True, complevel=5, fletcher32=True)
             outvar[xslice,yslice,:]=data[:,:,:]
         elif typ == 'solzenith':
             outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude','longitude'), zlib=True, complevel=5, fletcher32=True)
             outvar[xslice, yslice] = data[:,:]
         elif typ == 'n_valid_obs':
             data = data.astype('int8')
             outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude','longitude'), zlib=True, complevel=5, fletcher32=True,
                     attributes = {'units': '',
                                   'long_name' : 'NMOD for {key}'.format(key=key) } )
             outvar[xslice, yslice] = data[:,:]
         elif typ == 'latitude':
             outvar = f.createVariableIfNotExists(key, data.dtype, ('latitude'), complevel=5, fletcher32=True, zlib=True,
                     attributes = {'units': 'degrees',
                                   'title' : 'latitude',
                                   'long_name' : 'latitude' } )
             outvar[xslice] = data[:,0] # as per VITO's request, take only first column]
         elif typ == 'longitude':
             outvar = f.createVariableIfNotExists(key, data.dtype, ('longitude'), complevel=5, fletcher32=True, zlib=True,
                     attributes = {'units': 'degrees',
                                   'title' : 'longitude',
                                   'long_name' : 'longitude' } )
             outvar[yslice] = data[0,:] # as peer VITO's request, take only first row
         else:
             raise Exception('Unknown type of data to write : typ = ' + str(typ))
         f.close()
     except Exception as e:
         logging.error('Problem writing ' + key + ' on ' + self.outfilename + ' : ' + str(e))
         raise(e)
예제 #14
0
    def load(self, key, xslice, yslice, scenes_dates, dataloc, n_channels, options):
        ignore_quality_bit = options.get('ignore_quality_bit', None)
        lwcs_mask_style = options.get('lwcs_mask_style','VGT')
        # save dates for debug purposes
        self.scenes_dates = scenes_dates

        # initialise empty matrix of the right size, with nan values (this assumes that xslice.step = 1 or None)
        shape = (xslice.stop - xslice.start, yslice.stop - yslice.start, n_channels, len(self.scenes_dates))
        self.missing = initial_missing_value
        self.values = np.full(shape, self.missing, order='F', dtype='int8')

        # loop through all each input scene date
        for idate, d in enumerate(self.scenes_dates):
            filename = dataloc[d]['filename']

            # save filename for debug purposes
            self.filenames[d] = filename
            logging.debug(str(d) + ' ' + filename )

            try:
                # actual reading of the data
                with AugmentedNetcdfDataset(filename,'r') as f:
                    for iband in range(0,n_channels):
                        # first dimension is the date, there is only one date per file. Therefore we have a 0 here
                        self.values[:,:,iband,idate] = f[key][0,xslice,yslice]
                    self.show_info(self.name, f[key])
            except Exception as e:
                # if anything bad happenned when reading the data
                logging.error('Problem reading ' + filename + '/' + 'key' + ' to get the ' + self.name + ' ' + str(e))
                # just log the problem and skip it
                continue
            logging.debug('initial lwcs_mask (first pixel) ' + self.name + ' data = '+str(_binary_repr_array(self.values[0,0,:])))

        try:
            self.badquality = np.zeros(self.values.shape, order='F', dtype='bool')
            if ignore_quality_bit is None:
                if lwcs_mask_style == 'VGT':
                    quality_bits =[0b00010000,0b00100000,0b01000000,0b10000000] # SWIR BLUE RED NIR
                    quality_bits = quality_bits[0:n_channels]
                    for iband, bit in enumerate(quality_bits):
                            self.badquality[:,:,iband,:] = (np.bitwise_and(self.values[:,:,iband,:], bit) != bit)
                elif lwcs_mask_style == 'AVHRR':
                    logging.debug(f'Filtering bad reflectances : self.values[:,:,:,:] {self.values[:,:,:,:]} ')
                    self.badquality = np.zeros(self.values.shape, order='F', dtype='bool')
                    logging.debug(f'1 Filtering bad reflectances : badquality = {self.badquality}')
                    if n_channels == 2:
                        # this is how it how it should be but the bits 8 9 10 seems to be absents
                        #self.badquality[:,:,0,:] = (np.bitwise_and(self.values[:,:,0,:], 0b10001000000) == 0b10000000000) # band b1 : bits 10 and 6
                        #self.badquality[:,:,1,:] = (np.bitwise_and(self.values[:,:,1,:], 0b01000100000) == 0b01000000000) # band b2 : bits 9  and 5
                        self.badquality[:,:,0,:] = (np.bitwise_and(self.values[:,:,0,:], 0b00001000000) == 0b00000000000) # band b1 : bits 6
                        logging.debug(f'2 Filtering bad reflectances : {self.values[:,:,0,:]} badquality = {self.badquality}')
                        self.badquality[:,:,1,:] = (np.bitwise_and(self.values[:,:,1,:], 0b00000100000) == 0b00000000000) # band b2 : bits 5
                        logging.debug(f'2 Filtering bad reflectances : {self.values[:,:,1,:]} badquality = {self.badquality}')
                    elif n_channels == 3:
                        # this is how it how it should be but the bits 8 9 10 seems to be absents
                        #self.badquality[:,:,0,:] = (np.bitwise_and(self.values[:,:,0,:], 0b10001000000) == 0b10000000000) # band b1 : bits 10 and 6
                        #self.badquality[:,:,1,:] = (np.bitwise_and(self.values[:,:,1,:], 0b01000100000) == 0b01000000000) # band b2 : bits 9 and 5
                        #self.badquality[:,:,2,:] = (np.bitwise_and(self.values[:,:,2,:], 0b00100010000) == 0b00100000000) # band b3a : bits 8 and 4
                        self.badquality[:,:,0,:] = (np.bitwise_and(self.values[:,:,0,:], 0b00001000000) == 0b00000000000) # band b1 : bits 6
                        self.badquality[:,:,1,:] = (np.bitwise_and(self.values[:,:,1,:], 0b00000100000) == 0b00000000000) # band b2 : bits 5
                        self.badquality[:,:,2,:] = (np.bitwise_and(self.values[:,:,2,:], 0b00000010000) == 0b00000000000) # band b3a : bits 4
                        logging.debug(f'3 Filtering bad reflectances : badquality = {self.badquality}')
            else:
                logging.warn(f'Option ignore_quality_bit is set to {ignore_quality_bit} -> force value of qflag to "good quality".')
                self.badquality[:,:,:,:] = False

        except:
            logging.error('Problem reading bad quality of the lwcs_mask. Ignoring it. DO NOT IGNORE THIS ERROR')

        logging.debug(f'lwcs_mask missing initial : {self.missing}')
        self.values, self.missing = adapt_mask_to_SAF(self.values, self.missing)
        logging.debug(f'lwcs_mask missing transformed : {self.missing}')
        logging.debug('initial lwcs_mask (transformed to SAF) ' + self.name + ' data = '+str(_binary_repr_array(self.values[0,0,0,:], 'saf')))

        logging.debug('extract of ' + key + ' data = '+str(self.values[0,0,:,:]))
        return self