def __init__(self, context, MCsteps=1000, parallel_cores=1):
     self._measurement_function_factory = CombineFactory()
     self.prop = PropagateUnc(context,
                              MCsteps,
                              parallel_cores=parallel_cores)
     self.avg = Average(context=context)
     self.templ = DataTemplates(context)
     self.writer = HypernetsWriter(context)
     self.plot = Plotting(context)
     self.context = context
예제 #2
0
 def __init__(self, context):
     self.context = context
     self.templ = DataTemplates(context=context)
     self.writer = HypernetsWriter(context)
     self.avg = Average(context)
     self.intp = Interpolate(context, MCsteps=1000)
     self.plot = Plotting(context)
     self.rhymeranc = RhymerAncillary(context)
     self.rhymerproc = RhymerProcessing(context)
     self.rhymershared = RhymerShared(context)
 def __init__(self, context, MCsteps=1000, parallel_cores=1):
     self._measurement_function_factory = ProtocolFactory(context=context)
     self.prop = PropagateUnc(context,
                              MCsteps,
                              parallel_cores=parallel_cores)
     self.templ = DataTemplates(context=context)
     self.writer = HypernetsWriter(context)
     self.avg = Average(context)
     self.calibrate = Calibrate(context)
     self.plot = Plotting(context)
     self.context = context
     self.rh = RhymerHypstar(context)
     self.rhp = RhymerProcessing(context)
     self.rhs = RhymerShared(context)
class CombineSWIR:
    def __init__(self, context, MCsteps=1000, parallel_cores=1):
        self._measurement_function_factory = CombineFactory()
        self.prop = PropagateUnc(context,
                                 MCsteps,
                                 parallel_cores=parallel_cores)
        self.avg = Average(context=context)
        self.templ = DataTemplates(context)
        self.writer = HypernetsWriter(context)
        self.plot = Plotting(context)
        self.context = context

    def combine(self, measurandstring, dataset_l1a, dataset_l1a_swir):
        dataset_l1a = self.perform_checks(dataset_l1a)
        dataset_l1b = self.avg.average_l1b(measurandstring, dataset_l1a)
        dataset_l1b_swir = self.avg.average_l1b(measurandstring,
                                                dataset_l1a_swir)
        combine_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value("measurement_function_combine"))
        input_vars = combine_function.get_argument_names()
        input_qty = [
            dataset_l1b["wavelength"].values,
            dataset_l1b[measurandstring].values,
            dataset_l1b_swir["wavelength"].values,
            dataset_l1b_swir[measurandstring].values,
            self.context.get_config_value("combine_lim_wav")
        ]
        u_random_input_qty = [
            None, dataset_l1b["u_random_" + measurandstring].values, None,
            dataset_l1b_swir["u_random_" + measurandstring].values, None
        ]
        u_systematic_input_qty_indep = [
            None, dataset_l1b["u_systematic_indep_" + measurandstring].values,
            None,
            dataset_l1b_swir["u_systematic_indep_" + measurandstring].values,
            None
        ]
        u_systematic_input_qty_corr = [
            None,
            dataset_l1b["u_systematic_corr_rad_irr_" + measurandstring].values,
            None, dataset_l1b_swir["u_systematic_corr_rad_irr_" +
                                   measurandstring].values, None
        ]
        corr_systematic_input_qty_indep = [
            None,
            dataset_l1b["corr_systematic_indep_" + measurandstring].values,
            None, dataset_l1b_swir["corr_systematic_indep_" +
                                   measurandstring].values, None
        ]
        corr_systematic_input_qty_corr = [
            None, dataset_l1b["corr_systematic_corr_rad_irr_" +
                              measurandstring].values, None,
            dataset_l1b_swir["corr_systematic_corr_rad_irr_" +
                             measurandstring].values, None
        ]
        #todo do this more consistently with other modules, and do a direct copy for ranges that don't overlap
        dataset_l1b_comb = self.templ.l1b_template_from_combine(
            measurandstring, dataset_l1b, dataset_l1b_swir)

        self.prop.process_measurement_function_l1(
            measurandstring,
            dataset_l1b_comb,
            combine_function.function,
            input_qty,
            u_random_input_qty,
            u_systematic_input_qty_indep,
            u_systematic_input_qty_corr,
            corr_systematic_input_qty_indep,
            corr_systematic_input_qty_corr,
            param_fixed=[True, False, True, False, True])

        if self.context.get_config_value("write_l1b"):
            self.writer.write(dataset_l1b_comb, overwrite=True)

        if self.context.get_config_value("plot_l1b"):
            self.plot.plot_series_in_sequence(measurandstring,
                                              dataset_l1b_comb)

        if self.context.get_config_value("plot_uncertainty"):
            self.plot.plot_relative_uncertainty(measurandstring,
                                                dataset_l1b_comb)

        if self.context.get_config_value("plot_correlation"):
            self.plot.plot_correlation(measurandstring, dataset_l1b_comb)

        # if self.context.get_config_value("plot_diff"):
        #     self.plot.plot_diff_scans(measurandstring,dataset_l1a,dataset_l1b)

        return dataset_l1b_comb

    def perform_checks(self, dataset_l1):
        """
        Identifies and removes faulty measurements (e.g. due to cloud cover).

        :param dataset_l0:
        :type dataset_l0:
        :return:
        :rtype:
        """

        return dataset_l1
class SurfaceReflectance:
    def __init__(self, context, MCsteps=1000, parallel_cores=1):
        self._measurement_function_factory = ProtocolFactory(context=context)
        self.prop = PropagateUnc(context,
                                 MCsteps,
                                 parallel_cores=parallel_cores)
        self.templ = DataTemplates(context=context)
        self.writer = HypernetsWriter(context)
        self.avg = Average(context)
        self.calibrate = Calibrate(context)
        self.plot = Plotting(context)
        self.context = context
        self.rh = RhymerHypstar(context)
        self.rhp = RhymerProcessing(context)
        self.rhs = RhymerShared(context)

    def process_l1c(self, dataset):
        dataset_l1c = self.templ.l1c_from_l1b_dataset(dataset)
        dataset_l1c = self.rh.get_wind(dataset_l1c)
        dataset_l1c = self.rh.get_fresnelrefl(dataset_l1c)

        l1ctol1b_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value(
                "measurement_function_surface_reflectance"))

        input_vars = l1ctol1b_function.get_argument_names()
        input_qty = self.prop.find_input(input_vars, dataset_l1c)
        u_random_input_qty = self.prop.find_u_random_input(
            input_vars, dataset_l1c)
        u_systematic_input_qty, corr_systematic_input_qty = \
            self.prop.find_u_systematic_input(input_vars, dataset_l1c)

        L1c = self.prop.process_measurement_function_l2(
            [
                "water_leaving_radiance", "reflectance_nosc", "reflectance",
                "epsilon"
            ],
            dataset_l1c,
            l1ctol1b_function.function,
            input_qty,
            u_random_input_qty,
            u_systematic_input_qty,
            corr_systematic_input_qty,
            param_fixed=[False, False, False, False, True])

        failSimil = self.rh.qc_similarity(L1c)
        L1c["quality_flag"][np.where(failSimil == 1)] = DatasetUtil.set_flag(
            L1c["quality_flag"][np.where(failSimil == 1)],
            "simil_fail")  # for i in range(len(mask))]

        if self.context.get_config_value("write_l1c"):
            self.writer.write(L1c, overwrite=True)
        for measurandstring in [
                "water_leaving_radiance", "reflectance_nosc", "reflectance",
                "epsilon"
        ]:
            try:
                if self.context.get_config_value("plot_l1c"):
                    self.plot.plot_series_in_sequence(measurandstring, L1c)

                if self.context.get_config_value("plot_uncertainty"):
                    self.plot.plot_relative_uncertainty(measurandstring,
                                                        L1c,
                                                        L2=True)
            except:
                print("not plotting ", measurandstring)
        return L1c

    def process_l2(self, dataset):
        dataset = self.perform_checks(dataset)
        l1tol2_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value(
                "measurement_function_surface_reflectance"))
        input_vars = l1tol2_function.get_argument_names()
        input_qty = self.prop.find_input(input_vars, dataset)
        u_random_input_qty = self.prop.find_u_random_input(input_vars, dataset)
        u_systematic_input_qty, cov_systematic_input_qty = \
            self.prop.find_u_systematic_input(input_vars, dataset)

        if self.context.get_config_value("network").lower() == "w":

            dataset_l2a = self.avg.average_L2(dataset)

            for measurandstring in [
                    "water_leaving_radiance", "reflectance_nosc",
                    "reflectance", "epsilon"
            ]:
                try:
                    if self.context.get_config_value("plot_l2a"):
                        self.plot.plot_series_in_sequence(
                            measurandstring, dataset_l2a)

                    if self.context.get_config_value("plot_uncertainty"):
                        self.plot.plot_relative_uncertainty(measurandstring,
                                                            dataset_l2a,
                                                            L2=True)

                    if self.context.get_config_value("plot_correlation"):
                        self.plot.plot_correlation(measurandstring,
                                                   dataset_l2a,
                                                   L2=True)
                except:
                    print("not plotting ", measurandstring)

        elif self.context.get_config_value("network").lower() == "l":
            dataset_l2a = self.templ.l2_from_l1c_dataset(dataset)
            dataset_l2a = self.prop.process_measurement_function_l2(
                ["reflectance"], dataset_l2a, l1tol2_function.function,
                input_qty, u_random_input_qty, u_systematic_input_qty,
                cov_systematic_input_qty)
            if self.context.get_config_value("plot_l2a"):
                self.plot.plot_series_in_sequence("reflectance", dataset_l2a)

            if self.context.get_config_value("plot_uncertainty"):
                self.plot.plot_relative_uncertainty("reflectance",
                                                    dataset_l2a,
                                                    L2=True)

            if self.context.get_config_value("plot_correlation"):
                self.plot.plot_correlation("reflectance", dataset_l2a, L2=True)
        else:
            self.context.logger.error("network is not correctly defined")

        if self.context.get_config_value("write_l2a"):
            self.writer.write(dataset_l2a, overwrite=True)

        return dataset_l2a

    def perform_checks(self, dataset_l1):
        """
        Identifies and removes faulty measurements (e.g. due to cloud cover).

        :param dataset_l0:
        :type dataset_l0:
        :return:
        :rtype:
        """

        return dataset_l1
예제 #6
0
class RhymerHypstar:
    def __init__(self, context):
        self.context = context
        self.templ = DataTemplates(context=context)
        self.writer = HypernetsWriter(context)
        self.avg = Average(context)
        self.intp = Interpolate(context, MCsteps=1000)
        self.plot = Plotting(context)
        self.rhymeranc = RhymerAncillary(context)
        self.rhymerproc = RhymerProcessing(context)
        self.rhymershared = RhymerShared(context)

    def qc_scan(self, dataset, measurandstring, dataset_l1b):
        ## no inclination
        ## difference at 550 nm < 25% with neighbours
        ##
        ## QV July 2018
        ## Last modifications: 2019-07-10 (QV) renamed from PANTR, integrated in rhymer
        # Modified 10/09/2020 by CG for the PANTHYR
        verbosity = self.context.get_config_value("verbosity")
        series_id = np.unique(dataset['series_id'])
        wave = dataset['wavelength'].values
        flags = np.zeros(shape=len(dataset['scan']))
        id = 0
        for s in series_id:

            scans = dataset['scan'][dataset['series_id'] == s]

            ##
            n = len(scans)
            ## get pixel index for wavelength
            iref, wref = self.rhymershared.closest_idx(
                wave, self.context.get_config_value("diff_wave"))

            cos_sza = []
            for i in dataset['solar_zenith_angle'].sel(scan=scans).values:
                cos_sza.append(math.cos(math.radians(i)))

            ## go through the current set of scans
            for i in range(n):
                ## test inclination
                ## not done

                if measurandstring == 'irradiance':
                    data = dataset['irradiance'].sel(scan=scans).T.values

                    ## test variability at 550 nm
                    if i == 0:
                        v = abs(1 - ((data[i][iref] / cos_sza[i]) /
                                     (data[i + 1][iref] / cos_sza[i + 1])))
                    elif i < n - 1:
                        v = max(
                            abs(1 - ((data[i][iref] / cos_sza[i]) /
                                     (data[i + 1][iref] / cos_sza[i + 1]))),
                            abs(1 - ((data[i][iref] / cos_sza[i]) /
                                     (data[i - 1][iref] / cos_sza[i - 1]))))
                    else:
                        v = abs(1 - ((data[i][iref] / cos_sza[i]) /
                                     (data[i - 1][iref] / cos_sza[i - 1])))
                else:
                    data = dataset['radiance'].sel(scan=scans).T.values
                    ## test variability at 550 nm
                    if i == 0:
                        v = abs(1 - (data[i][iref] / data[i + 1][iref]))
                    elif i < n - 1:
                        v = max(abs(1 - (data[i][iref] / data[i + 1][iref])),
                                abs(1 - (data[i][iref] / data[i - 1][iref])))
                    else:
                        v = abs(1 - (data[i][iref] / data[i - 1][iref]))

                ## continue if value exceeds the cv threshold
                if v > self.context.get_config_value("diff_threshold"):
                    # get flag value for the temporal variability
                    if measurandstring == 'irradiance':
                        flags[id] = 1
                        dataset_l1b['quality_flag'][range(
                            len(dataset_l1b['scan']))] = du.set_flag(
                                dataset_l1b["quality_flag"][range(
                                    len(dataset_l1b['scan']))],
                                "temp_variability_ed")
                    else:
                        flags[id] = 1
                        dataset_l1b['quality_flag'][range(
                            len(dataset_l1b['scan']))] = du.set_flag(
                                dataset_l1b["quality_flag"][range(
                                    len(dataset_l1b['scan']))],
                                "temp_variability_lu")

                    seq = dataset.attrs["sequence_id"]
                    ts = datetime.utcfromtimestamp(
                        dataset['acquisition_time'][i])

                    if verbosity > 2:
                        self.context.logger.info(
                            'Temporal jump: in {}:  Aquisition time {}, {}'.
                            format(
                                seq, ts, ', '.join([
                                    '{}:{}'.format(k,
                                                   dataset[k][scans[i]].values)
                                    for k in ['scan', 'quality_flag']
                                ])))
                id += 1

            return dataset_l1b, flags

    def cycleparse(self, rad, irr, dataset_l1b):

        protocol = self.context.get_config_value(
            "measurement_function_surface_reflectance")
        self.context.logger.debug(protocol)
        nbrlu = self.context.get_config_value("n_upwelling_rad")
        nbred = self.context.get_config_value("n_upwelling_irr")
        nbrlsky = self.context.get_config_value("n_downwelling_rad")

        if protocol != 'WaterNetworkProtocol':
            # here we should simply provide surface reflectance?
            # what about a non-standard protocol but that includes the required standard series?
            self.context.logger.error(
                'Unknown measurement protocol: {}'.format(protocol))
        else:
            uprad = []
            downrad = []
            for i in rad['scan']:
                scani = rad.sel(scan=i)
                senz = scani["viewing_zenith_angle"].values
                if senz < 90:
                    measurement = 'upwelling_radiance'
                    uprad.append(int(i))
                if senz >= 90:
                    measurement = 'downwelling_radiance'
                    downrad.append(int(i))
                if measurement is None: continue

            lu = rad.sel(scan=uprad)
            lsky = rad.sel(scan=downrad)

            for i in lu['scan']:
                scani = lu.sel(scan=i)
                sena = scani["viewing_azimuth_angle"].values
                senz = scani["viewing_zenith_angle"].values
                self.context.logger.debug(scani['acquisition_time'].values)
                ts = datetime.utcfromtimestamp(
                    int(scani['acquisition_time'].values))
                # not fromtimestamp?

                if (senz != 'NULL') & (sena != 'NULL'):
                    senz = float(senz)
                    sena = abs(float(sena))
                else:
                    dataset_l1b['quality_flag'] = du.set_flag(
                        dataset_l1b.sel(scan=i)['quality_flag'],
                        "angles_missing")
                    self.context.logger.info(
                        'NULL angles: Aquisition time {}, {}'.format(
                            ts, ', '.join([
                                '{}:{}'.format(k, scani[k].values)
                                for k in ['scan', 'quality_flag']
                            ])))
                    continue

            # check if we have the same azimuth for lu and lsky
            sena_lu = np.unique(lu["viewing_azimuth_angle"].values)
            sena_lsky = np.unique(lsky["viewing_azimuth_angle"].values)
            for i in sena_lu:
                if i not in sena_lsky:
                    dataset_l1b["quality_flag"][
                        dataset_l1b["viewing_azimuth_angle"] ==
                        i] = du.set_flag(
                            dataset_l1b["quality_flag"][
                                dataset_l1b["viewing_azimuth_angle"] == i],
                            "lu_eq_missing")
                    if self.context.get_config_value("verbosity") > 2:
                        ts = [
                            datetime.utcfromtimestamp(x)
                            for x in lu['acquisition_time'][
                                lu["viewing_azimuth_angle"] == i].values
                        ]
                        self.context.logger.info(
                            'No azimuthal equivalent downwelling radiance measurement: Aquisition time {}, {}'
                            .format(
                                ts, ', '.join([
                                    '{}:{}'.format(
                                        k, lu[k][lu["viewing_azimuth_angle"] ==
                                                 i].values)
                                    for k in ['scan', 'quality_flag']
                                ])))

            # check if we have the required fresnel angle for lsky
            senz_lu = np.unique(lu["viewing_zenith_angle"].values)
            senz_lsky = 180 - np.unique(lsky["viewing_zenith_angle"].values)
            for i in senz_lu:
                if i not in senz_lsky:
                    dataset_l1b["quality_flag"][
                        dataset_l1b["viewing_azimuth_angle"] ==
                        i] = du.set_flag(
                            dataset_l1b["quality_flag"][
                                dataset_l1b["viewing_azimuth_angle"] == i],
                            "fresnel_angle_missing")
                    ts = [
                        datetime.utcfromtimestamp(x)
                        for x in lu['acquisition_time'][
                            lu["viewing_zenith_angle"] == i].values
                    ]
                    self.context.logger.info(
                        'No downwelling radiance measurement at appropriate fresnel angle: Aquisition time {}, {}'
                        .format(
                            ts, ', '.join([
                                '{}:{}'.format(
                                    k, lu[k][lu["viewing_azimuth_angle"] ==
                                             i].values)
                                for k in ['scan', 'quality_flag']
                            ])))

            # check if correct number of radiance and irradiance data

            if lu.scan[lu['quality_flag'] <= 0].count() < nbrlu:
                for i in range(len(dataset_l1b["scan"])):
                    dataset_l1b["quality_flag"][
                        dataset_l1b["scan"] == i] = du.set_flag(
                            dataset_l1b["quality_flag"][dataset_l1b["scan"] ==
                                                        i], "min_nbrlu")
                self.context.logger.info(
                    "No enough upwelling radiance data for sequence {}".format(
                        lu.attrs['sequence_id']))
            if lsky.scan[lsky['quality_flag'] <= 1].count() < nbrlsky:
                for i in range(len(dataset_l1b["scan"])):
                    dataset_l1b["quality_flag"][
                        dataset_l1b["scan"] == i] = du.set_flag(
                            dataset_l1b["quality_flag"][dataset_l1b["scan"] ==
                                                        i], "min_nbrlsky")
                self.context.logger.info(
                    "No enough downwelling radiance data for sequence {}".
                    format(lsky.attrs['sequence_id']))
            if irr.scan[irr['quality_flag'] <= 1].count() < nbred:
                for i in range(len(dataset_l1b["scan"])):
                    dataset_l1b["quality_flag"][
                        dataset_l1b["scan"] == i] = du.set_flag(
                            dataset_l1b["quality_flag"][dataset_l1b["scan"] ==
                                                        i], "min_nbred")
                self.context.logger.info(
                    "No enough downwelling irradiance data for sequence {}".
                    format(irr.attrs['sequence_id']))

            return lu, lsky, irr, dataset_l1b

    def get_wind(self, l1b):

        lat = l1b.attrs['site_latitude']
        lon = l1b.attrs['site_latitude']
        wind = []
        for i in range(len(l1b.scan)):
            wa = self.context.get_config_value("wind_ancillary")
            if not wa:
                l1b["quality_flag"][l1b["scan"] == i] = du.set_flag(
                    l1b["quality_flag"][l1b["scan"] == i], "def_wind_flag")
                self.context.logger.info("Default wind speed {}".format(
                    self.context.get_config_value("wind_default")))
                wind.append(self.context.get_config_value("wind_default"))
            else:
                isodate = datetime.utcfromtimestamp(
                    l1b['acquisition_time'].values[i]).strftime('%Y-%m-%d')
                isotime = datetime.utcfromtimestamp(
                    l1b['acquisition_time'].values[i]).strftime('%H:%M:%S')
                anc_wind = self.rhymeranc.get_wind(isodate,
                                                   lon,
                                                   lat,
                                                   isotime=isotime)
                if anc_wind is not None:
                    wind.append(anc_wind)
        l1b['fresnel_wind'].values = wind
        return l1b

    def get_fresnelrefl(self, l1b):

        ## read mobley rho lut
        fresnel_coeff = np.zeros(len(l1b.scan))
        fresnel_vza = np.zeros(len(l1b.scan))
        fresnel_raa = np.zeros(len(l1b.scan))
        fresnel_sza = np.zeros(len(l1b.scan))

        wind = l1b["fresnel_wind"].values
        for i in range(len(l1b.scan)):
            fresnel_vza[i] = l1b['viewing_zenith_angle'][i].values
            fresnel_sza[i] = l1b['solar_zenith_angle'][i].values

            diffa = l1b['viewing_azimuth_angle'][i].values - l1b[
                'solar_azimuth_angle'][i].values

            if diffa >= 360:
                diffa = diffa - 360
            elif 0 <= diffa < 360:
                diffa = diffa
            else:
                diffa = diffa + 360
            fresnel_raa[i] = abs((diffa - 180))

            ## get fresnel reflectance
            if self.context.get_config_value("fresnel_option") == 'Mobley':
                if (fresnel_sza[i] is not None) & (fresnel_raa[i] is not None):
                    sza = min(fresnel_sza[i], 79.999)
                    rhof = self.rhymerproc.mobley_lut_interp(sza,
                                                             fresnel_vza[i],
                                                             fresnel_raa[i],
                                                             wind=wind[i])
                else:
                    l1b["quality_flag"][l1b["scan"] == i] = du.set_flag(
                        l1b["quality_flag"][l1b["scan"] == i],
                        "fresnel_default")
                    rhof = self.context.get_config_value("rhof_default")
            if self.context.get_config_value(
                    "fresnel_option") == 'Ruddick2006':
                rhof = self.context.get_config_value("rhof_default")
                self.context.logger.info("Apply Ruddick et al., 2006")
                if wind[i] is not None:
                    rhof = rhof + 0.00039 * wind[i] + 0.000034 * wind[i]**2

            fresnel_coeff[i] = rhof

        l1b["rhof"].values = fresnel_coeff
        l1b["fresnel_vza"].values = fresnel_vza
        l1b["fresnel_raa"].values = fresnel_raa
        l1b["fresnel_sza"].values = fresnel_sza

        return l1b

    def qc_similarity(self, L1c):

        wave = L1c["wavelength"]
        wr = L1c.attrs["similarity_waveref"]
        wp = L1c.attrs["similarity_wavethres"]

        epsilon = L1c["epsilon"]
        ## get pixel index for wavelength
        irefr, wrefr = self.rhymershared.closest_idx(wave, wr)

        failSimil = []
        scans = L1c['scan']
        for i in range(len(scans)):
            data = L1c['reflectance_nosc'].sel(scan=i).values
            if abs(epsilon[i]) > wp * data[irefr]:
                failSimil.append(1)
            else:
                failSimil.append(0)
        return failSimil

    def process_l1c_int(self, l1a_rad, l1a_irr):

        # because we average to Lu scan we propagate values from radiance!
        dataset_l1b = self.templ.l1c_int_template_from_l1a_dataset_water(
            l1a_rad)
        # QUALITY CHECK: TEMPORAL VARIABILITY IN ED AND LSKY -> ASSIGN FLAG
        dataset_l1b, flags_rad = self.qc_scan(l1a_rad, "radiance", dataset_l1b)
        dataset_l1b, flags_irr = self.qc_scan(l1a_irr, "irradiance",
                                              dataset_l1b)
        # QUALITY CHECK: MIN NBR OF SCANS -> ASSIGN FLAG
        # remove temporal variability scans before average
        l1a_rad = l1a_rad.sel(scan=np.where(np.array(flags_rad) != 1)[0])
        l1a_irr = l1a_irr.sel(scan=np.where(np.array(flags_irr) != 1)[0])

        # check number of scans per cycle for up, down radiance and irradiance
        L1a_uprad, L1a_downrad, L1a_irr, dataset_l1b = self.cycleparse(
            l1a_rad, l1a_irr, dataset_l1b)

        L1b_downrad = self.avg.average_l1b("radiance", L1a_downrad)
        L1b_irr = self.avg.average_l1b("irradiance", L1a_irr)
        # INTERPOLATE Lsky and Ed FOR EACH Lu SCAN! Threshold in time -> ASSIGN FLAG
        # interpolate_l1b_w calls interpolate_irradiance which includes interpolation of the
        # irradiance wavelength to the radiance wavelength
        L1c_int = self.intp.interpolate_l1b_w(dataset_l1b, L1a_uprad,
                                              L1b_downrad, L1b_irr)
        return L1c_int
예제 #7
0
    def test_end_to_end(self):
        this_directory_path = os.path.abspath(os.path.dirname(__file__))
        this_directory_path = os.path.join(this_directory_path,"..\\")

        tmpdir = "tmp_"+"".join(random.choices(string.ascii_lowercase,k=6))
        #context = setup_test_context()

        context = setup_test_context(
        raw_data_directory = os.path.join(tmpdir,"data"),
        archive_directory = os.path.join(tmpdir,"out"),
        metadata_db_url = "sqlite:///"+tmpdir+"/metadata.db",
        anomaly_db_url = "sqlite:///"+tmpdir+"/anomoly.db",
        archive_db_url = "sqlite:///"+tmpdir+"/archive.db",
        create_directories = True,
        create_dbs = False )

        context.set_config_value('network', 'l')
        context.set_config_value('measurement_function_calibrate', 'StandardMeasurementFunction')
        context.set_config_value('measurement_function_interpolate', 'LandNetworkInterpolationIrradianceLinear')
        context.set_config_value('measurement_function_surface_reflectance', 'LandNetworkProtocol')
        context.set_config_value("processor_directory",this_directory_path)
        context.set_config_value("calibration_directory",os.path.join(this_directory_path,"..\\calibration_files\\HYPSTAR_cal\\"))
        context.set_config_value("archive_directory", os.path.join(tmpdir,"out"))

        context.set_config_value('version','0.1')
        context.set_config_value('site_abbr','test')
        context.set_config_value('product_format','netcdf4')
        context.set_config_value('hypstar_cal_number','220241')
        context.set_config_value('cal_date','200728')
        context.set_config_value('outliers',7)

        context.set_config_value('write_l2a',True)
        context.set_config_value('write_l1a',True)
        context.set_config_value('write_l1b',True)
        context.set_config_value('plot_l2a',True)
        context.set_config_value('plot_l1a',True)
        context.set_config_value('plot_l1b',True)
        context.set_config_value('plot_diff',True)
        context.set_config_value('plotting_directory',os.path.join(tmpdir,"out/plots/"))
        context.set_config_value('plotting_format',"png")

        cal = Calibrate(context,MCsteps=100)
        avg = Average(context)
        intp = Interpolate(context,MCsteps=1000)
        surf = SurfaceReflectance(context,MCsteps=1000)

        test_l0_rad,test_l0_irr,test_l0_bla,test_l1a_rad,test_l1a_irr,test_l1b_rad,test_l1b_irr,test_l2a,test_l2a_avg=setup_test_files(context)

        L1a_rad = cal.calibrate_l1a("radiance",test_l0_rad,test_l0_bla)
        L1a_irr = cal.calibrate_l1a("irradiance",test_l0_irr,test_l0_bla)
        L1b_rad = avg.average_l1b("radiance",L1a_rad)
        L1b_irr = avg.average_l1b("irradiance",L1a_irr)
        L1c = intp.interpolate_l1c(L1b_rad,L1b_irr)
        L2a = surf.process_l2(L1c)
        # np.testing.assert_allclose(test_l1a_rad["radiance"].values[:,0],L1a_rad["radiance"].values[:,0],rtol=0.12,equal_nan=True)
        # np.testing.assert_allclose(test_l1b_rad["radiance"].values,L1b_rad["radiance"].values,rtol=0.12,equal_nan=True)
        #np.testing.assert_allclose(np.nansum(test_l1b_rad["radiance"].values),np.nansum(L1b_rad["radiance"].values),rtol=0.05,equal_nan=True)

        #np.testing.assert_allclose(test_l1b_rad["radiance"].values,L1b_rad["radiance"].values,rtol=0.03,equal_nan=True)
        #np.testing.assert_allclose(L1b_irr["irradiance"].values,test_l1b_irr["irradiance"].values,rtol=0.04,equal_nan=True)
        np.testing.assert_allclose(test_l2a["reflectance"].values,L2a["reflectance"].values,rtol=0.19,equal_nan=True)
        np.testing.assert_allclose(np.nansum(test_l2a["reflectance"].values),np.nansum(L2a["reflectance"].values),rtol=0.05,equal_nan=True)
        np.testing.assert_allclose(np.nansum(test_l2a_avg["reflectance"].values),np.nansum(L2a["reflectance"].values),rtol=0.001,equal_nan=True)
        shutil.rmtree(tmpdir)
예제 #8
0
    def process_sequence(self, sequence_path):
        """
        Processes sequence file
        """

        # update context
        self.context.set_config_value(
            "time",
            parse_sequence_path(sequence_path)["datetime"])
        self.context.set_config_value("sequence_path", sequence_path)
        self.context.set_config_value("sequence_name",
                                      os.path.basename(sequence_path))

        reader = HypernetsReader(self.context)
        calcon = CalibrationConverter(self.context)
        cal = Calibrate(self.context, MCsteps=100)
        surf = SurfaceReflectance(self.context, MCsteps=1000)
        avg = Average(self.context, )
        rhymer = RhymerHypstar(self.context)
        writer = HypernetsWriter(self.context)

        if self.context.get_config_value("network") == "w":

            calibration_data_rad, calibration_data_irr = calcon.read_calib_files(
            )
            # Read L0
            self.context.logger.info("Reading raw data...")
            l0_irr, l0_rad, l0_bla = reader.read_sequence(
                sequence_path, calibration_data_rad, calibration_data_irr)
            self.context.logger.info("Done")

            # Calibrate to L1a
            self.context.logger.info("Processing to L1a...")
            L1a_rad = cal.calibrate_l1a("radiance", l0_rad, l0_bla,
                                        calibration_data_rad)
            L1a_irr = cal.calibrate_l1a("irradiance", l0_irr, l0_bla,
                                        calibration_data_irr)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1b radiance...")
            L1b_rad = avg.average_l1b("radiance", L1a_rad)
            print(L1b_rad)
            if self.context.get_config_value("write_l1b"):
                writer.write(L1b_rad, overwrite=True)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1b irradiance...")
            L1b_irr = avg.average_l1b("irradiance", L1a_irr)
            if self.context.get_config_value("write_l1b"):
                writer.write(L1b_irr, overwrite=True)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1c...")
            L1c_int = rhymer.process_l1c_int(L1a_rad, L1a_irr)
            L1c = surf.process_l1c(L1c_int)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L2a...")
            L2a = surf.process_l2(L1c)
            self.context.logger.info("Done")

        elif self.context.get_config_value("network") == "l":
            comb = CombineSWIR(self.context, MCsteps=100)
            intp = Interpolate(self.context, MCsteps=1000)

            # Read L0
            self.context.logger.info("Reading raw data...")
            (calibration_data_rad, calibration_data_irr,
             calibration_data_swir_rad,
             calibration_data_swir_irr) = calcon.read_calib_files()
            l0_irr, l0_rad, l0_bla, l0_swir_irr, l0_swir_rad, l0_swir_bla = reader.read_sequence(
                sequence_path, calibration_data_rad, calibration_data_irr,
                calibration_data_swir_rad, calibration_data_swir_irr)
            self.context.logger.info("Done")

            # Calibrate to L1a
            self.context.logger.info("Processing to L1a...")
            L1a_rad = cal.calibrate_l1a("radiance", l0_rad, l0_bla,
                                        calibration_data_rad)
            L1a_irr = cal.calibrate_l1a("irradiance", l0_irr, l0_bla,
                                        calibration_data_irr)

            L1a_swir_rad = cal.calibrate_l1a("radiance",
                                             l0_swir_rad,
                                             l0_swir_bla,
                                             calibration_data_swir_rad,
                                             swir=True)
            L1a_swir_irr = cal.calibrate_l1a("irradiance",
                                             l0_swir_irr,
                                             l0_swir_bla,
                                             calibration_data_swir_irr,
                                             swir=True)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1b radiance...")
            L1b_rad = comb.combine("radiance", L1a_rad, L1a_swir_rad)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1b irradiance...")
            L1b_irr = comb.combine("irradiance", L1a_irr, L1a_swir_irr)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L1c...")
            L1c = intp.interpolate_l1c(L1b_rad, L1b_irr)
            self.context.logger.info("Done")

            self.context.logger.info("Processing to L2a...")
            L2a = surf.process_l2(L1c)
            self.context.logger.info("Done")

        else:
            raise NameError("Invalid network: " +
                            self.context.get_config_value("network"))

        return None