예제 #1
0
def test_adapters_with_ascat():
    ascat_data_folder = os.path.join(
        os.path.dirname(__file__),
        "..",
        "test-data",
        "sat",
        "ascat",
        "netcdf",
        "55R22",
    )
    ascat_grid_folder = os.path.join(
        os.path.dirname(__file__),
        "..",
        "test-data",
        "sat",
        "ascat",
        "netcdf",
        "grid",
    )
    grid_fname = os.path.join(ascat_grid_folder, "TUW_WARP5_grid_info_2_1.nc")

    ascat_reader = AscatGriddedNcTs(
        ascat_data_folder,
        "TUW_METOP_ASCAT_WARP55R22_{:04d}",
        grid_filename=grid_fname,
    )

    ascat_anom = AnomalyAdapter(ascat_reader, window_size=35, columns=["sm"])
    data = ascat_anom.read_ts(12.891455, 45.923004)
    assert data is not None
    assert np.any(data["sm"].values != 0)
    data = ascat_anom.read(12.891455, 45.923004)
    assert data is not None
    assert np.any(data["sm"].values != 0)

    ascat_self = SelfMaskingAdapter(ascat_reader, ">", 0, "sm")
    data2 = ascat_self.read_ts(12.891455, 45.923004)
    assert data2 is not None
    assert np.all(data2["sm"].values > 0)
    data2 = ascat_self.read(12.891455, 45.923004)
    assert data2 is not None
    assert np.all(data2["sm"].values > 0)

    ascat_mask = MaskingAdapter(ascat_reader, ">", 0, "sm")
    data3 = ascat_mask.read_ts(12.891455, 45.923004)
    assert data3 is not None
    assert np.any(data3["sm"].values)
    data3 = ascat_mask.read(12.891455, 45.923004)
    assert data3 is not None
    assert np.any(data3["sm"].values)

    ascat_clim = AnomalyClimAdapter(ascat_reader, columns=["sm"])
    data4 = ascat_clim.read_ts(12.891455, 45.923004)
    assert data4 is not None
    assert np.any(data["sm"].values != 0)
    data4 = ascat_clim.read(12.891455, 45.923004)
    assert data4 is not None
    assert np.any(data["sm"].values != 0)
예제 #2
0
def test_anomaly_adapter():
    ds = TestDataset('', n=20)
    ds_anom = AnomalyAdapter(ds)
    data_anom = ds_anom.read_ts()
    data_anom2 = ds_anom.read()
    nptest.assert_almost_equal(data_anom['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom['y'].values[0], -4.25)
    nptest.assert_almost_equal(data_anom2['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom2['y'].values[0], -4.25)
예제 #3
0
def test_anomaly_adapter():
    ds = TestDataset('', n=20)
    ds_anom = AnomalyAdapter(ds)
    data_anom = ds_anom.read_ts()
    data_anom2 = ds_anom.read()
    nptest.assert_almost_equal(data_anom['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom['y'].values[0], -4.25)
    nptest.assert_almost_equal(data_anom2['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom2['y'].values[0], -4.25)
예제 #4
0
def test_timezone_removal():
    tz_reader = TestTimezoneReader()

    reader_anom = AnomalyAdapter(tz_reader, window_size=35, columns=["data"])
    assert reader_anom.read_ts(0) is not None

    reader_self = SelfMaskingAdapter(tz_reader, ">", 0, "data")
    assert reader_self.read_ts(0) is not None

    reader_mask = MaskingAdapter(tz_reader, ">", 0, "data")
    assert reader_mask.read_ts(0) is not None

    reader_clim = AnomalyClimAdapter(tz_reader, columns=["data"])
    assert reader_clim.read_ts(0) is not None
예제 #5
0
def test_adapters_with_ascat():
    ascat_data_folder = os.path.join(os.path.dirname(__file__), '..',
                                     'test-data', 'sat', 'ascat', 'netcdf',
                                     '55R22')
    ascat_grid_folder = os.path.join(os.path.dirname(__file__), '..',
                                     'test-data', 'sat', 'ascat', 'netcdf',
                                     'grid')

    ascat_reader = AscatSsmCdr(ascat_data_folder,
                               ascat_grid_folder,
                               grid_filename='TUW_WARP5_grid_info_2_1.nc')

    ascat_anom = AnomalyAdapter(ascat_reader, window_size=35, columns=['sm'])
    data = ascat_anom.read_ts(12.891455, 45.923004)
    assert data is not None
    assert np.any(data['sm'].values != 0)
    data = ascat_anom.read(12.891455, 45.923004)
    assert data is not None
    assert np.any(data['sm'].values != 0)

    ascat_self = SelfMaskingAdapter(ascat_reader, '>', 0, 'sm')
    data2 = ascat_self.read_ts(12.891455, 45.923004)
    assert data2 is not None
    assert np.all(data2['sm'].values > 0)
    data2 = ascat_self.read(12.891455, 45.923004)
    assert data2 is not None
    assert np.all(data2['sm'].values > 0)

    ascat_mask = MaskingAdapter(ascat_reader, '>', 0, 'sm')
    data3 = ascat_mask.read_ts(12.891455, 45.923004)
    assert data3 is not None
    assert np.any(data3['sm'].values)
    data3 = ascat_mask.read(12.891455, 45.923004)
    assert data3 is not None
    assert np.any(data3['sm'].values)

    ascat_clim = AnomalyClimAdapter(ascat_reader, columns=['sm'])
    data4 = ascat_clim.read_ts(12.891455, 45.923004)
    assert data4 is not None
    assert np.any(data['sm'].values != 0)
    data4 = ascat_clim.read(12.891455, 45.923004)
    assert data4 is not None
    assert np.any(data['sm'].values != 0)
예제 #6
0
def test_adapters_custom_fct_name():
    def assert_all_read_fcts(reader):
        assert (np.all(reader.read() == reader.read_ts()))
        assert (np.all(reader.read() == reader.alias_read()))

    base = TestDataset("", n=20)
    assert_all_read_fcts(base)
    sma = SelfMaskingAdapter(base,
                             '>=',
                             5,
                             column_name='y',
                             read_name='alias_read')
    assert_all_read_fcts(sma)
    smanom = AnomalyAdapter(sma, read_name='alias_read')
    assert_all_read_fcts(smanom)
예제 #7
0
def test_anomaly_adapter_one_column():
    ds = TestDataset('', n=20)
    ds_anom = AnomalyAdapter(ds, columns=['x'])
    data_anom = ds_anom.read_ts()
    nptest.assert_almost_equal(data_anom['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom['y'].values[0], 0)
예제 #8
0
def test_anomaly_adapter_one_column():
    ds = TestDataset("", n=20)
    ds_anom = AnomalyAdapter(ds, columns=["x"])
    data_anom = ds_anom.read()
    nptest.assert_almost_equal(data_anom["x"].values[0], -8.5)
    nptest.assert_almost_equal(data_anom["y"].values[0], 0)
예제 #9
0
def create_pytesmo_validation(validation_run):
    ds_list = []
    ref_name = None
    scaling_ref_name = None

    ds_num = 1
    for dataset_config in validation_run.dataset_configurations.all():
        reader = create_reader(dataset_config.dataset, dataset_config.version)
        reader = setup_filtering(
            reader, list(dataset_config.filters.all()),
            list(dataset_config.parametrisedfilter_set.all()),
            dataset_config.dataset, dataset_config.variable)

        if validation_run.anomalies == ValidationRun.MOVING_AVG_35_D:
            reader = AnomalyAdapter(
                reader,
                window_size=35,
                columns=[dataset_config.variable.pretty_name])
        if validation_run.anomalies == ValidationRun.CLIMATOLOGY:
            # make sure our baseline period is in UTC and without timezone information
            anomalies_baseline = [
                validation_run.anomalies_from.astimezone(tz=pytz.UTC).replace(
                    tzinfo=None),
                validation_run.anomalies_to.astimezone(tz=pytz.UTC).replace(
                    tzinfo=None)
            ]
            reader = AnomalyClimAdapter(
                reader,
                columns=[dataset_config.variable.pretty_name],
                timespan=anomalies_baseline)

        if (validation_run.reference_configuration and
            (dataset_config.id == validation_run.reference_configuration.id)):
            # reference is always named "0-..."
            dataset_name = '{}-{}'.format(0, dataset_config.dataset.short_name)
        else:
            dataset_name = '{}-{}'.format(ds_num,
                                          dataset_config.dataset.short_name)
            ds_num += 1

        ds_list.append((dataset_name, {
            'class': reader,
            'columns': [dataset_config.variable.pretty_name]
        }))

        if (validation_run.reference_configuration and
            (dataset_config.id == validation_run.reference_configuration.id)):
            ref_name = dataset_name
            ref_short_name = validation_run.reference_configuration.dataset.short_name

        if (validation_run.scaling_ref
                and (dataset_config.id == validation_run.scaling_ref.id)):
            scaling_ref_name = dataset_name

    datasets = dict(ds_list)
    ds_num = len(ds_list)

    period = None
    if validation_run.interval_from is not None and validation_run.interval_to is not None:
        # while pytesmo can't deal with timezones, normalise the validation period to utc; can be removed once pytesmo can do timezones
        startdate = validation_run.interval_from.astimezone(UTC).replace(
            tzinfo=None)
        enddate = validation_run.interval_to.astimezone(UTC).replace(
            tzinfo=None)
        period = [startdate, enddate]

    upscale_parms = None
    if validation_run.upscaling_method != "none":
        __logger.debug("Upscaling option is active")
        upscale_parms = {
            "upscaling_method": validation_run.upscaling_method,
            "temporal_stability": validation_run.temporal_stability,
        }
        upscaling_lut = create_upscaling_lut(
            validation_run=validation_run,
            datasets=datasets,
            ref_name=ref_name,
        )
        upscale_parms["upscaling_lut"] = upscaling_lut
        __logger.debug("Lookup table for non-reference datasets " +
                       ", ".join(upscaling_lut.keys()) + " created")
        __logger.debug("{}".format(upscaling_lut))

    datamanager = DataManager(
        datasets,
        ref_name=ref_name,
        period=period,
        read_ts_names='read',
        upscale_parms=upscale_parms,
    )
    ds_names = get_dataset_names(datamanager.reference_name,
                                 datamanager.datasets,
                                 n=ds_num)

    # set value of the metadata template according to what reference dataset is used
    if ref_short_name == 'ISMN':
        metadata_template = METADATA_TEMPLATE['ismn_ref']
    else:
        metadata_template = METADATA_TEMPLATE['other_ref']

    pairwise_metrics = PairwiseIntercomparisonMetrics(
        metadata_template=metadata_template,
        calc_kendall=False,
    )

    metric_calculators = {(ds_num, 2): pairwise_metrics.calc_metrics}

    if (len(ds_names) >= 3) and (validation_run.tcol is True):
        tcol_metrics = TripleCollocationMetrics(
            ref_name,
            metadata_template=metadata_template,
        )
        metric_calculators.update({(ds_num, 3): tcol_metrics.calc_metrics})

    if validation_run.scaling_method == validation_run.NO_SCALING:
        scaling_method = None
    else:
        scaling_method = validation_run.scaling_method

    __logger.debug(f"Scaling method: {scaling_method}")
    __logger.debug(f"Scaling dataset: {scaling_ref_name}")

    val = Validation(datasets=datamanager,
                     temporal_matcher=make_combined_temporal_matcher(
                         pd.Timedelta(12, "H")),
                     spatial_ref=ref_name,
                     scaling=scaling_method,
                     scaling_ref=scaling_ref_name,
                     metrics_calculators=metric_calculators,
                     period=period)

    return val
예제 #10
0
def test_anomaly_adapter_one_column():
    ds = TestDataset('', n=20)
    ds_anom = AnomalyAdapter(ds, columns=['x'])
    data_anom = ds_anom.read_ts()
    nptest.assert_almost_equal(data_anom['x'].values[0], -8.5)
    nptest.assert_almost_equal(data_anom['y'].values[0], 0)
예제 #11
0
def create_pytesmo_validation(validation_run):
    ds_list = []
    ref_name = None
    scaling_ref_name = None

    ds_num = 1
    for dataset_config in validation_run.dataset_configurations.all():
        reader = create_reader(dataset_config.dataset, dataset_config.version)
        reader = setup_filtering(
            reader, list(dataset_config.filters.all()),
            list(dataset_config.parametrisedfilter_set.all()),
            dataset_config.dataset, dataset_config.variable)

        if validation_run.anomalies == ValidationRun.MOVING_AVG_35_D:
            reader = AnomalyAdapter(
                reader,
                window_size=35,
                columns=[dataset_config.variable.pretty_name])
        if validation_run.anomalies == ValidationRun.CLIMATOLOGY:
            # make sure our baseline period is in UTC and without timezone information
            anomalies_baseline = [
                validation_run.anomalies_from.astimezone(tz=pytz.UTC).replace(
                    tzinfo=None),
                validation_run.anomalies_to.astimezone(tz=pytz.UTC).replace(
                    tzinfo=None)
            ]
            reader = AnomalyClimAdapter(
                reader,
                columns=[dataset_config.variable.pretty_name],
                timespan=anomalies_baseline)

        if ((validation_run.reference_configuration) and
            (dataset_config.id == validation_run.reference_configuration.id)):
            # reference is always named "0-..."
            dataset_name = '{}-{}'.format(0, dataset_config.dataset.short_name)
        else:
            dataset_name = '{}-{}'.format(ds_num,
                                          dataset_config.dataset.short_name)
            ds_num += 1

        ds_list.append((dataset_name, {
            'class': reader,
            'columns': [dataset_config.variable.pretty_name]
        }))

        if ((validation_run.reference_configuration) and
            (dataset_config.id == validation_run.reference_configuration.id)):
            ref_name = dataset_name
        if ((validation_run.scaling_ref)
                and (dataset_config.id == validation_run.scaling_ref.id)):
            scaling_ref_name = dataset_name

    datasets = dict(ds_list)
    ds_num = len(ds_list)

    period = None
    if validation_run.interval_from is not None and validation_run.interval_to is not None:
        ## while pytesmo can't deal with timezones, normalise the validation period to utc; can be removed once pytesmo can do timezones
        startdate = validation_run.interval_from.astimezone(UTC).replace(
            tzinfo=None)
        enddate = validation_run.interval_to.astimezone(UTC).replace(
            tzinfo=None)
        period = [startdate, enddate]

    datamanager = DataManager(datasets,
                              ref_name=ref_name,
                              period=period,
                              read_ts_names='read')
    ds_names = get_dataset_names(datamanager.reference_name,
                                 datamanager.datasets,
                                 n=ds_num)

    if (len(ds_names) >= 3) and (validation_run.tcol is True):
        # if there are 3 or more dataset, do TC, exclude ref metrics
        metrics = TCMetrics(
            dataset_names=ds_names,
            tc_metrics_for_ref=False,
            other_names=['k{}'.format(i + 1) for i in range(ds_num - 1)])
    else:
        metrics = IntercomparisonMetrics(
            dataset_names=ds_names,
            other_names=['k{}'.format(i + 1) for i in range(ds_num - 1)])

    if validation_run.scaling_method == validation_run.NO_SCALING:
        scaling_method = None
    else:
        scaling_method = validation_run.scaling_method

    __logger.debug(f"Scaling method: {scaling_method}")
    __logger.debug(f"Scaling dataset: {scaling_ref_name}")

    val = Validation(datasets=datamanager,
                     spatial_ref=ref_name,
                     temporal_window=0.5,
                     scaling=scaling_method,
                     scaling_ref=scaling_ref_name,
                     metrics_calculators={
                         (ds_num, ds_num): metrics.calc_metrics
                     },
                     period=period)

    return val