Esempio n. 1
0
def test_force_dataset_regeneration(run_generate, summary_store: SummaryStore,
                                    module_index: Index):
    """
    We should be able to force-replace dataset extents with the "--recreate-dataset-extents" option
    """
    run_generate("ls8_nbar_albers")
    [example_dataset
     ] = summary_store.index.datasets.search_eager(product="ls8_nbar_albers",
                                                   limit=1)

    original_footprint = summary_store.get_dataset_footprint_region(
        example_dataset.id)
    assert original_footprint is not None

    # Now let's break the footprint!
    alchemy_engine(module_index).execute(
        f"update {CUBEDASH_SCHEMA}.dataset_spatial "
        "    set footprint="
        "        ST_SetSRID("
        "            ST_GeomFromText("
        "                'POLYGON((-71.1776585052917 42.3902909739571,-71.1776820268866 42.3903701743239,"
        "                          -71.1776063012595 42.3903825660754,-71.1775826583081 42.3903033653531,"
        "                          -71.1776585052917 42.3902909739571))'"
        "            ),"
        "            4326"
        "        )"
        "    where id=%s",
        example_dataset.id,
    )
    # Make sure it worked
    footprint = summary_store.get_dataset_footprint_region(example_dataset.id)
    assert footprint != original_footprint, "Test data didn't successfully override"

    # Now force-recreate dataset extents
    run_generate("-v", "ls8_nbar_albers", "--recreate-dataset-extents")

    # ... and they should be correct again
    footprint = summary_store.get_dataset_footprint_region(example_dataset.id)
    assert footprint == original_footprint, "Dataset extent was not regenerated"
Esempio n. 2
0
def test_computed_regions_match_those_summarised(summary_store: SummaryStore):
    """
    The region code for all datasets should be computed identically when
    done in both SQL and Python.
    """
    summary_store.refresh_all_products()

    # Loop through all datasets in the test data to check that the the DB and Python
    # functions give identical region codes.
    for product in summary_store.index.products.get_all():
        region_info = GridRegionInfo.for_product(product, None)
        for dataset in summary_store.index.datasets.search(
                product=product.name):
            (
                footprint,
                alchemy_calculated_region_code,
            ) = summary_store.get_dataset_footprint_region(dataset.id)

            python_calculated_region_code = region_info.dataset_region_code(
                dataset)
            assert python_calculated_region_code == alchemy_calculated_region_code, (
                "Python and DB calculated region codes didn't product the same value. "
                f"{python_calculated_region_code!r} != {alchemy_calculated_region_code!r}"
                f"for product {dataset.type.name!r}, dataset {dataset!r}")