コード例 #1
0
 def __init__(self,context,MCsteps=1000,parallel_cores=1):
     self._measurement_function_factory = InterpolationFactory()
     self.prop = PropagateUnc(context, MCsteps, parallel_cores=parallel_cores)
     self.templ = DataTemplates(context=context)
     self.writer=HypernetsWriter(context)
     self.plot=Plotting(context)
     self.context=context
コード例 #2
0
 def __init__(self, context, MCsteps=1000, parallel_cores=1):
     self._measurement_function_factory = ProtocolFactory(context=context)
     self.prop = PropagateUnc(context,
                              MCsteps,
                              parallel_cores=parallel_cores)
     self.templ = DataTemplates(context=context)
     self.writer = HypernetsWriter(context)
     self.avg = Average(context)
     self.calibrate = Calibrate(context)
     self.plot = Plotting(context)
     self.context = context
     self.rh = RhymerHypstar(context)
     self.rhp = RhymerProcessing(context)
     self.rhs = RhymerShared(context)
コード例 #3
0
class CombineSWIR:
    def __init__(self, context, MCsteps=1000, parallel_cores=1):
        self._measurement_function_factory = CombineFactory()
        self.prop = PropagateUnc(context,
                                 MCsteps,
                                 parallel_cores=parallel_cores)
        self.avg = Average(context=context)
        self.templ = DataTemplates(context)
        self.writer = HypernetsWriter(context)
        self.plot = Plotting(context)
        self.context = context

    def combine(self, measurandstring, dataset_l1a, dataset_l1a_swir):
        dataset_l1a = self.perform_checks(dataset_l1a)
        dataset_l1b = self.avg.average_l1b(measurandstring, dataset_l1a)
        dataset_l1b_swir = self.avg.average_l1b(measurandstring,
                                                dataset_l1a_swir)
        combine_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value("measurement_function_combine"))
        input_vars = combine_function.get_argument_names()
        input_qty = [
            dataset_l1b["wavelength"].values,
            dataset_l1b[measurandstring].values,
            dataset_l1b_swir["wavelength"].values,
            dataset_l1b_swir[measurandstring].values,
            self.context.get_config_value("combine_lim_wav")
        ]
        u_random_input_qty = [
            None, dataset_l1b["u_random_" + measurandstring].values, None,
            dataset_l1b_swir["u_random_" + measurandstring].values, None
        ]
        u_systematic_input_qty_indep = [
            None, dataset_l1b["u_systematic_indep_" + measurandstring].values,
            None,
            dataset_l1b_swir["u_systematic_indep_" + measurandstring].values,
            None
        ]
        u_systematic_input_qty_corr = [
            None,
            dataset_l1b["u_systematic_corr_rad_irr_" + measurandstring].values,
            None, dataset_l1b_swir["u_systematic_corr_rad_irr_" +
                                   measurandstring].values, None
        ]
        corr_systematic_input_qty_indep = [
            None,
            dataset_l1b["corr_systematic_indep_" + measurandstring].values,
            None, dataset_l1b_swir["corr_systematic_indep_" +
                                   measurandstring].values, None
        ]
        corr_systematic_input_qty_corr = [
            None, dataset_l1b["corr_systematic_corr_rad_irr_" +
                              measurandstring].values, None,
            dataset_l1b_swir["corr_systematic_corr_rad_irr_" +
                             measurandstring].values, None
        ]
        #todo do this more consistently with other modules, and do a direct copy for ranges that don't overlap
        dataset_l1b_comb = self.templ.l1b_template_from_combine(
            measurandstring, dataset_l1b, dataset_l1b_swir)

        self.prop.process_measurement_function_l1(
            measurandstring,
            dataset_l1b_comb,
            combine_function.function,
            input_qty,
            u_random_input_qty,
            u_systematic_input_qty_indep,
            u_systematic_input_qty_corr,
            corr_systematic_input_qty_indep,
            corr_systematic_input_qty_corr,
            param_fixed=[True, False, True, False, True])

        if self.context.get_config_value("write_l1b"):
            self.writer.write(dataset_l1b_comb, overwrite=True)

        if self.context.get_config_value("plot_l1b"):
            self.plot.plot_series_in_sequence(measurandstring,
                                              dataset_l1b_comb)

        if self.context.get_config_value("plot_uncertainty"):
            self.plot.plot_relative_uncertainty(measurandstring,
                                                dataset_l1b_comb)

        if self.context.get_config_value("plot_correlation"):
            self.plot.plot_correlation(measurandstring, dataset_l1b_comb)

        # if self.context.get_config_value("plot_diff"):
        #     self.plot.plot_diff_scans(measurandstring,dataset_l1a,dataset_l1b)

        return dataset_l1b_comb

    def perform_checks(self, dataset_l1):
        """
        Identifies and removes faulty measurements (e.g. due to cloud cover).

        :param dataset_l0:
        :type dataset_l0:
        :return:
        :rtype:
        """

        return dataset_l1
コード例 #4
0
class SurfaceReflectance:
    def __init__(self, context, MCsteps=1000, parallel_cores=1):
        self._measurement_function_factory = ProtocolFactory(context=context)
        self.prop = PropagateUnc(context,
                                 MCsteps,
                                 parallel_cores=parallel_cores)
        self.templ = DataTemplates(context=context)
        self.writer = HypernetsWriter(context)
        self.avg = Average(context)
        self.calibrate = Calibrate(context)
        self.plot = Plotting(context)
        self.context = context
        self.rh = RhymerHypstar(context)
        self.rhp = RhymerProcessing(context)
        self.rhs = RhymerShared(context)

    def process_l1c(self, dataset):
        dataset_l1c = self.templ.l1c_from_l1b_dataset(dataset)
        dataset_l1c = self.rh.get_wind(dataset_l1c)
        dataset_l1c = self.rh.get_fresnelrefl(dataset_l1c)

        l1ctol1b_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value(
                "measurement_function_surface_reflectance"))

        input_vars = l1ctol1b_function.get_argument_names()
        input_qty = self.prop.find_input(input_vars, dataset_l1c)
        u_random_input_qty = self.prop.find_u_random_input(
            input_vars, dataset_l1c)
        u_systematic_input_qty, corr_systematic_input_qty = \
            self.prop.find_u_systematic_input(input_vars, dataset_l1c)

        L1c = self.prop.process_measurement_function_l2(
            [
                "water_leaving_radiance", "reflectance_nosc", "reflectance",
                "epsilon"
            ],
            dataset_l1c,
            l1ctol1b_function.function,
            input_qty,
            u_random_input_qty,
            u_systematic_input_qty,
            corr_systematic_input_qty,
            param_fixed=[False, False, False, False, True])

        failSimil = self.rh.qc_similarity(L1c)
        L1c["quality_flag"][np.where(failSimil == 1)] = DatasetUtil.set_flag(
            L1c["quality_flag"][np.where(failSimil == 1)],
            "simil_fail")  # for i in range(len(mask))]

        if self.context.get_config_value("write_l1c"):
            self.writer.write(L1c, overwrite=True)
        for measurandstring in [
                "water_leaving_radiance", "reflectance_nosc", "reflectance",
                "epsilon"
        ]:
            try:
                if self.context.get_config_value("plot_l1c"):
                    self.plot.plot_series_in_sequence(measurandstring, L1c)

                if self.context.get_config_value("plot_uncertainty"):
                    self.plot.plot_relative_uncertainty(measurandstring,
                                                        L1c,
                                                        L2=True)
            except:
                print("not plotting ", measurandstring)
        return L1c

    def process_l2(self, dataset):
        dataset = self.perform_checks(dataset)
        l1tol2_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value(
                "measurement_function_surface_reflectance"))
        input_vars = l1tol2_function.get_argument_names()
        input_qty = self.prop.find_input(input_vars, dataset)
        u_random_input_qty = self.prop.find_u_random_input(input_vars, dataset)
        u_systematic_input_qty, cov_systematic_input_qty = \
            self.prop.find_u_systematic_input(input_vars, dataset)

        if self.context.get_config_value("network").lower() == "w":

            dataset_l2a = self.avg.average_L2(dataset)

            for measurandstring in [
                    "water_leaving_radiance", "reflectance_nosc",
                    "reflectance", "epsilon"
            ]:
                try:
                    if self.context.get_config_value("plot_l2a"):
                        self.plot.plot_series_in_sequence(
                            measurandstring, dataset_l2a)

                    if self.context.get_config_value("plot_uncertainty"):
                        self.plot.plot_relative_uncertainty(measurandstring,
                                                            dataset_l2a,
                                                            L2=True)

                    if self.context.get_config_value("plot_correlation"):
                        self.plot.plot_correlation(measurandstring,
                                                   dataset_l2a,
                                                   L2=True)
                except:
                    print("not plotting ", measurandstring)

        elif self.context.get_config_value("network").lower() == "l":
            dataset_l2a = self.templ.l2_from_l1c_dataset(dataset)
            dataset_l2a = self.prop.process_measurement_function_l2(
                ["reflectance"], dataset_l2a, l1tol2_function.function,
                input_qty, u_random_input_qty, u_systematic_input_qty,
                cov_systematic_input_qty)
            if self.context.get_config_value("plot_l2a"):
                self.plot.plot_series_in_sequence("reflectance", dataset_l2a)

            if self.context.get_config_value("plot_uncertainty"):
                self.plot.plot_relative_uncertainty("reflectance",
                                                    dataset_l2a,
                                                    L2=True)

            if self.context.get_config_value("plot_correlation"):
                self.plot.plot_correlation("reflectance", dataset_l2a, L2=True)
        else:
            self.context.logger.error("network is not correctly defined")

        if self.context.get_config_value("write_l2a"):
            self.writer.write(dataset_l2a, overwrite=True)

        return dataset_l2a

    def perform_checks(self, dataset_l1):
        """
        Identifies and removes faulty measurements (e.g. due to cloud cover).

        :param dataset_l0:
        :type dataset_l0:
        :return:
        :rtype:
        """

        return dataset_l1
コード例 #5
0
class Interpolate:
    def __init__(self,context,MCsteps=1000,parallel_cores=1):
        self._measurement_function_factory = InterpolationFactory()
        self.prop = PropagateUnc(context, MCsteps, parallel_cores=parallel_cores)
        self.templ = DataTemplates(context=context)
        self.writer=HypernetsWriter(context)
        self.plot=Plotting(context)
        self.context=context

    def interpolate_l1b_w(self, dataset_l1b, dataset_l1a_uprad,dataset_l1b_downrad, dataset_l1b_irr):

        # chek for upwelling radiance
        upscan = [i for i, e in enumerate(dataset_l1a_uprad['viewing_zenith_angle'].values) if e < 90]

        dataset_l1b=self.templ.l1c_int_template_from_l1a_dataset_water(dataset_l1a_uprad)

        dataset_l1b["wavelength"] = dataset_l1a_uprad["wavelength"]
        dataset_l1b["upwelling_radiance"] = dataset_l1a_uprad["radiance"].sel(scan=upscan)
        dataset_l1b["acquisition_time"] = dataset_l1a_uprad["acquisition_time"].sel(scan=upscan)
        # is this correct????
        dataset_l1b["u_random_upwelling_radiance"] = dataset_l1a_uprad["u_random_radiance"].sel(scan=upscan)
        dataset_l1b["u_systematic_indep_upwelling_radiance"] = dataset_l1a_uprad["u_systematic_indep_radiance"].sel(scan=upscan)
        dataset_l1b["u_systematic_corr_rad_irr_upwelling_radiance"] = dataset_l1a_uprad["u_systematic_corr_rad_irr_radiance"].sel(scan=upscan)
        dataset_l1b["corr_random_upwelling_radiance"] = dataset_l1a_uprad["corr_random_radiance"]
        dataset_l1b["corr_systematic_indep_upwelling_radiance"] = dataset_l1a_uprad["corr_systematic_indep_radiance"]
        dataset_l1b["corr_systematic_corr_rad_irr_upwelling_radiance"] = dataset_l1a_uprad["corr_systematic_corr_rad_irr_radiance"]

        self.context.logger.info("interpolate sky radiance")
        dataset_l1b=self.interpolate_skyradiance(dataset_l1b, dataset_l1b_downrad)
        self.context.logger.info("interpolate irradiances")
        dataset_l1b=self.interpolate_irradiance(dataset_l1b, dataset_l1b_irr)
        return dataset_l1b

    def interpolate_l1c(self,dataset_l1b_rad,dataset_l1b_irr):


        dataset_l1c=self.templ.l1c_from_l1b_dataset(dataset_l1b_rad)
        dataset_l1c["acquisition_time"].values = dataset_l1b_rad["acquisition_time"].values

        dataset_l1c=self.interpolate_irradiance(dataset_l1c,dataset_l1b_irr)

        if self.context.get_config_value("write_l1c"):
            self.writer.write(dataset_l1c,overwrite=True)

        if self.context.get_config_value("plot_l1c"):
            self.plot.plot_series_in_sequence("irradiance",dataset_l1c)

        if self.context.get_config_value("plot_uncertainty"):
            self.plot.plot_relative_uncertainty("irradiance",dataset_l1c)

        if self.context.get_config_value("plot_correlation"):
            self.plot.plot_correlation("irradiance",dataset_l1c)

        return dataset_l1c

    def interpolate_irradiance(self,dataset_l1c,dataset_l1b_irr):
        measurement_function_interpolate_wav = self.context.get_config_value(
            'measurement_function_interpolate_wav')
        interpolation_function_wav = self._measurement_function_factory\
            .get_measurement_function(measurement_function_interpolate_wav)

        measurement_function_interpolate_time = self.context.get_config_value(
            'measurement_function_interpolate_time')
        interpolation_function_time = self._measurement_function_factory\
            .get_measurement_function(measurement_function_interpolate_time)

        # Interpolate in wavelength to radiance wavelengths
        wavs_rad=dataset_l1c["wavelength"].values
        wavs_irr=dataset_l1b_irr["wavelength"].values

        dataset_l1c_temp = self.templ.l1ctemp_dataset(dataset_l1c,dataset_l1b_irr)

        dataset_l1c_temp = self.prop.process_measurement_function_l1("irradiance",
            dataset_l1c_temp,interpolation_function_wav.function,
            [wavs_rad,wavs_irr,dataset_l1b_irr['irradiance'].values],
            [None,None,dataset_l1b_irr['u_random_irradiance'].values],
            [None,None,dataset_l1b_irr['u_systematic_indep_irradiance'].values],
            [None,None,dataset_l1b_irr['u_systematic_corr_rad_irr_irradiance'].values],
            [None,None,dataset_l1b_irr["corr_systematic_indep_irradiance"].values],
            [None,None,dataset_l1b_irr["corr_systematic_corr_rad_irr_irradiance"].values],
            )

        # Interpolate in time to radiance times
        acqui_irr = dataset_l1b_irr['acquisition_time'].values
        acqui_rad = dataset_l1c['acquisition_time'].values

        dataset_l1c = self.prop.process_measurement_function_l1("irradiance",
            dataset_l1c,interpolation_function_time.function,
            [acqui_rad,acqui_irr,dataset_l1c_temp['irradiance'].values],
            [None,None,dataset_l1c_temp['u_random_irradiance'].values],
            [None,None,dataset_l1c_temp['u_systematic_indep_irradiance'].values],
            [None,None,dataset_l1c_temp['u_systematic_corr_rad_irr_irradiance'].values],
            [None,None,dataset_l1c_temp["corr_systematic_indep_irradiance"].values],
            [None,None,dataset_l1c_temp["corr_systematic_corr_rad_irr_irradiance"].values],
            param_fixed=[False,True,True])
        return dataset_l1c

    def interpolate_skyradiance(self,dataset_l1c,dataset_l1a_skyrad):
        measurement_function_interpolate_time = self.context.get_config_value(
            'measurement_function_interpolate_time')
        interpolation_function_time = self._measurement_function_factory\
            .get_measurement_function(measurement_function_interpolate_time)

        acqui_irr = dataset_l1a_skyrad['acquisition_time'].values
        acqui_rad = dataset_l1c['acquisition_time'].values

        dataset_l1c = self.prop.process_measurement_function_l1("downwelling_radiance",dataset_l1c,
                                                        interpolation_function_time.function,
                                                        [acqui_rad,acqui_irr,
                                                         dataset_l1a_skyrad[
                                                             'radiance'].values],
                                                        [None,None,dataset_l1a_skyrad[
                                                            'u_random_radiance'].values],
                                                        [None,None,dataset_l1a_skyrad[
                                                            'u_systematic_indep_radiance'].values],
                                                        [None,None,dataset_l1a_skyrad[
                                                            'u_systematic_corr_rad_irr_radiance'].values],
                                                        [None,None,dataset_l1a_skyrad["corr_systematic_indep_radiance"].values],
                                                        [None,None,dataset_l1a_skyrad["corr_systematic_corr_rad_irr_radiance"].values],
                                                        param_fixed=[False,True,True])
        return dataset_l1c
コード例 #6
0
class Calibrate:
    def __init__(self, context, MCsteps=1000, parallel_cores=0):
        self._measurement_function_factory = MeasurementFunctionFactory()
        self.prop = PropagateUnc(context,
                                 MCsteps,
                                 parallel_cores=parallel_cores)
        self.templ = DataTemplates(context)
        self.writer = HypernetsWriter(context)
        self.plot = Plotting(context)
        self.context = context

    def calibrate_l1a(self,
                      measurandstring,
                      dataset_l0,
                      dataset_l0_bla,
                      calibration_data,
                      swir=False):
        if measurandstring != "radiance" and measurandstring != "irradiance":
            self.context.logger.error(
                "the measurandstring needs to be either 'radiance' or 'irradiance"
            )
            exit()

        if self.context.get_config_value("plot_l0"):
            self.plot.plot_scans_in_series("digital_number", dataset_l0)

        calibrate_function = self._measurement_function_factory.get_measurement_function(
            self.context.get_config_value("measurement_function_calibrate"))
        input_vars = calibrate_function.get_argument_names()

        dataset_l0 = self.preprocess_l0(dataset_l0, dataset_l0_bla,
                                        calibration_data)
        dataset_l1a = self.templ.l1a_template_from_l0_dataset(
            measurandstring, dataset_l0, swir)
        input_qty = self.prop.find_input_l1a(input_vars, dataset_l0,
                                             calibration_data)
        u_random_input_qty = self.prop.find_u_random_input_l1a(
            input_vars, dataset_l0, calibration_data)
        u_systematic_input_qty_indep,u_systematic_input_qty_corr,\
        corr_systematic_input_qty_indep,corr_systematic_input_qty_corr = self.prop.find_u_systematic_input_l1a(input_vars, dataset_l0, calibration_data)
        dataset_l1a = self.prop.process_measurement_function_l1a(
            measurandstring, dataset_l1a, calibrate_function.function,
            input_qty, u_random_input_qty, u_systematic_input_qty_indep,
            u_systematic_input_qty_corr, corr_systematic_input_qty_indep,
            corr_systematic_input_qty_corr)

        if self.context.get_config_value("write_l1a"):
            self.writer.write(dataset_l1a, overwrite=True)

        if self.context.get_config_value("plot_l1a"):
            self.plot.plot_scans_in_series(measurandstring, dataset_l1a)

        if self.context.get_config_value("plot_l1a_diff"):
            self.plot.plot_diff_scans(measurandstring, dataset_l1a)

        if self.context.get_config_value("plot_uncertainty"):
            self.plot.plot_relative_uncertainty(measurandstring, dataset_l1a)

        if self.context.get_config_value("plot_correlation"):
            self.plot.plot_correlation(measurandstring, dataset_l1a)

        return dataset_l1a

    def find_nearest_black(self, dataset, acq_time, int_time):
        ids = np.where((abs(dataset['acquisition_time'] - acq_time) == min(
            abs(dataset['acquisition_time'] - acq_time)))
                       & (dataset['integration_time'] == int_time))
        #todo check if interation time alwasy has to be same

        return np.mean(dataset["digital_number"].values[:, ids], axis=2)[:, 0]

    def preprocess_l0(self, datasetl0, datasetl0_bla, dataset_calib):
        """
        Identifies and removes faulty measurements (e.g. due to cloud cover).

        :param dataset_l0:
        :type dataset_l0:
        :return:
        :rtype:
        """
        wavs = dataset_calib["wavelength"].values
        wavpix = dataset_calib["wavpix"].values

        datasetl0 = datasetl0.isel(wavelength=slice(int(wavpix[0]),
                                                    int(wavpix[-1]) + 1))
        datasetl0_bla = datasetl0_bla.isel(
            wavelength=slice(int(wavpix[0]),
                             int(wavpix[-1]) + 1))
        mask = self.clip_and_mask(datasetl0, datasetl0_bla)

        datasetl0 = datasetl0.assign_coords(wavelength=wavs)
        datasetl0_bla = datasetl0_bla.assign_coords(wavelength=wavs)

        datasetl0["quality_flag"][np.where(mask == 1)] = DatasetUtil.set_flag(
            datasetl0["quality_flag"][np.where(mask == 1)],
            "outliers")  #for i in range(len(mask))]

        DN_rand = DatasetUtil.create_variable(
            [len(datasetl0["wavelength"]),
             len(datasetl0["scan"])],
            dim_names=["wavelength", "scan"],
            dtype=np.uint32,
            fill_value=0)

        datasetl0["u_random_digital_number"] = DN_rand

        rand = np.zeros_like(DN_rand.values)
        series_ids = np.unique(datasetl0['series_id'])
        for i in range(len(series_ids)):
            ids = np.where(datasetl0['series_id'] == series_ids[i])[0]
            ids_masked = np.where((datasetl0['series_id'] == series_ids[i])
                                  & (mask == 0))[0]
            dark_signals = np.zeros_like(
                datasetl0['digital_number'].values[:, ids_masked])
            for ii, id in enumerate(ids_masked):
                dark_signals[:, ii] = self.find_nearest_black(
                    datasetl0_bla, datasetl0['acquisition_time'].values[id],
                    datasetl0['integration_time'].values[id])
            std = np.std((datasetl0['digital_number'].values[:, ids_masked] -
                          dark_signals),
                         axis=1)
            for ii, id in enumerate(ids):
                rand[:, id] = std

        datasetl0["u_random_digital_number"].values = rand

        DN_dark = DatasetUtil.create_variable(
            [len(datasetl0["wavelength"]),
             len(datasetl0["scan"])],
            dim_names=["wavelength", "scan"],
            dtype=np.uint32,
            fill_value=0)

        datasetl0["dark_signal"] = DN_dark

        dark_signals = []
        acqui = datasetl0['acquisition_time'].values
        inttimes = datasetl0['integration_time'].values
        for i in range(len(acqui)):
            dark_signals.append(
                self.find_nearest_black(datasetl0_bla, acqui[i], inttimes[i]))

        datasetl0["dark_signal"].values = np.array(dark_signals).T

        return datasetl0

    def clip_and_mask(self, dataset, dataset_bla, k_unc=3):
        mask = []

        # check if zeros, max, fillvalue:

        # check if integrated signal is outlier
        series_ids = np.unique(dataset['series_id'])
        for i in range(len(series_ids)):
            ids = np.where(dataset['series_id'] == series_ids[i])
            dark_signals = self.find_nearest_black(
                dataset_bla, np.mean(dataset['acquisition_time'].values[ids]),
                np.mean(dataset['integration_time'].values[ids]))
            intsig = np.nanmean((dataset["digital_number"].values[:, ids] -
                                 dark_signals[:, None, None]),
                                axis=0)[0]
            noisestd, noiseavg = self.sigma_clip(
                intsig)  # calculate std and avg for non NaN columns
            maski = np.zeros_like(intsig)  # mask the columns that have NaN
            maski[np.where(np.abs(intsig - noiseavg) >= k_unc * noisestd)] = 1
            mask = np.append(mask, maski)

        # check if 10% of pixels are outiers

        # mask_wvl = np.zeros((len(datasetl0["wavelength"]),len(datasetl0["scan"])))
        # for i in range(len(dataset["wavelength"])):

        return mask

    def sigma_clip(self,
                   values,
                   tolerance=0.01,
                   median=True,
                   sigma_thresh=3.0):
        # Remove NaNs from input values
        values = np.array(values)
        values = values[np.where(np.isnan(values) == False)]
        values_original = np.copy(values)

        # Continue loop until result converges
        diff = 10E10
        while diff > tolerance:
            # Assess current input iteration
            if median == False:
                average = np.mean(values)
            elif median == True:
                average = np.median(values)
            sigma_old = np.std(values)

            # Mask those pixels that lie more than 3 stdev away from mean
            check = np.zeros([len(values)])
            check[np.where(values > (average +
                                     (sigma_thresh * sigma_old)))] = 1
            # check[ np.where( values<(average-(sigma_thresh*sigma_old)) ) ] = 1
            values = values[np.where(check < 1)]

            # Re-measure sigma and test for convergence
            sigma_new = np.std(values)
            diff = abs(sigma_old - sigma_new) / sigma_old

        # Perform final mask
        check = np.zeros([len(values)])
        check[np.where(values > (average + (sigma_thresh * sigma_old)))] = 1
        check[np.where(values < (average - (sigma_thresh * sigma_old)))] = 1
        values = values[np.where(check < 1)]

        # Return results
        return sigma_new, average