예제 #1
0
    def test_crosscheck_pq8_fc8(self):
        """Cross-check metadata between Landsat 8 pc and fc datasets."""

        pq_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.PQ8_DIR, self.PQ8_SCENE))

        fc_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.FC8_DIR, self.FC8_SCENE))

        self.cross_check(pq_ds, fc_ds, self.CROSSCHECK_KEYS_TWO)
예제 #2
0
    def test_crosscheck_ortho_pq(self):
        """Cross-check metadata between ortho and pq datasets."""

        ortho_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.ORTHO_DIR, self.ORTHO_SCENE))

        pq_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.PQ_DIR, self.PQ_SCENE))

        self.cross_check(ortho_ds, pq_ds, self.CROSSCHECK_KEYS_TWO)
예제 #3
0
    def test_crosscheck_ortho8_fc8(self):
        """Cross-check metadata between Landsat 8 ortho and fc datasets."""

        ortho_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.ORTHO8_DIR, self.ORTHO8_SCENE))

        fc_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.FC8_DIR, self.FC8_SCENE))

        self.cross_check(ortho_ds, fc_ds, self.CROSSCHECK_KEYS_TWO)
예제 #4
0
    def test_crosscheck_ortho8_nbar8(self):
        """Cross-check metadata between Landsat 8 ortho and nbar datasets."""

        ortho_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.ORTHO8_DIR, self.ORTHO8_SCENE))

        nbar_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.NBAR8_DIR, self.NBAR8_SCENE))

        self.cross_check(ortho_ds, nbar_ds, self.CROSSCHECK_KEYS_TWO)
예제 #5
0
    def test_fc8_scene(self):
        """Test for a Landsat 8 Fractional Cover scene."""

        fc_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.FC8_DIR, self.FC8_SCENE))
        mdd = fc_ds.metadata_dict

        self.dump_metadata('fc8_metadata.txt', mdd, self.SMALL_METADATA_KEYS)
        self.dump_string('fc8_xml.xml', mdd['xml_text'])
        self.assertIsNone(mdd['mtl_text'])

        self.check_file('fc8_metadata.txt')
        self.check_file('fc8_xml.xml')
예제 #6
0
    def test_nbar8_scene(self):
        """Test for a Landsat 8 NBAR scene."""

        nbar_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.NBAR8_DIR, self.NBAR8_SCENE))
        mdd = nbar_ds.metadata_dict

        self.dump_metadata('nbar8_metadata.txt', mdd, self.SMALL_METADATA_KEYS)
        self.dump_string('nbar8_xml.xml', mdd['xml_text'])
        self.assertIsNone(mdd['mtl_text'])

        self.check_file('nbar8_metadata.txt')
        self.check_file('nbar8_xml.xml')
예제 #7
0
    def test_pq8_scene(self):
        """Test for a Landsat 8 Pixel Quality scene."""

        pq_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.PQ8_DIR, self.PQ8_SCENE))
        mdd = pq_ds.metadata_dict

        self.dump_metadata('pq8_metadata.txt', mdd, self.SMALL_METADATA_KEYS)
        self.dump_string('pq8_xml.xml', mdd['xml_text'])
        self.assertIsNone(mdd['mtl_text'])

        self.check_file('pq8_metadata.txt')
        self.check_file('pq8_xml.xml')
    def check_buildvrt(self, idataset):
        """Test the LandsatBandstack.buildvrt() method by comparing output to a
        file on disk"""

        assert idataset in range(len(DATASETS_TO_INGEST))

        print 'Testing Dataset %s' %DATASETS_TO_INGEST[idataset]
        dset = LandsatDataset(DATASETS_TO_INGEST[idataset])
        # Create a DatasetRecord instance so that we can access its
        # list_tile_types() method. In doing this we need to create a
        # collection object and entries on the acquisition and dataset
        # tables of the database.
        self.collection.begin_transaction()
        acquisition = \
            self.collection.create_acquisition_record(dset)
        dset_record = acquisition.create_dataset_record(dset)
        self.collection.commit_transaction()
        tile_type_list = dset_record.list_tile_types()
        #Assume dataset has tile_type = 1 only:
        tile_type_id = 1
        dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
        ls_bandstack = dset.stack_bands(dataset_bands_dict)
        temp_dir = self.collection.get_temp_tile_directory()
        ls_bandstack.buildvrt(temp_dir)
        # Get benchmark vrt for comparision
        vrt_benchmark = os.path.join(self.BENCHMARK_DIR,
                                     os.path.basename(ls_bandstack.vrt_name))
        diff_cmd = ["diff",
                    "-I",
                    "[Ff]ilename",
                    "%s" %vrt_benchmark,
                    "%s" %ls_bandstack.vrt_name
                    ]
        result = execute(diff_cmd, shell=False)
        if result['stdout'] != '':
            self.fail("Differences between vrt files:\n" + result['stdout'])
        if result['stderr'] != '':
            self.fail("Error in system diff command:\n" + result['stderr'])
예제 #9
0
    def test_ortho_scene(self):
        """Test for an ORTHO (level 1) scene."""

        ortho_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.ORTHO_DIR, self.ORTHO_SCENE))
        mdd = ortho_ds.metadata_dict

        self.dump_metadata('ortho_metadata.txt', mdd, self.SMALL_METADATA_KEYS)
        self.dump_string('ortho_xml.xml', mdd['xml_text'])
        self.dump_string('ortho_mtl.txt', mdd['mtl_text'])

        self.check_file('ortho_metadata.txt')
        self.check_file('ortho_xml.xml')
        self.check_file('ortho_mtl.txt')
예제 #10
0
    def check_buildvrt(self, idataset):
        """Test the LandsatBandstack.buildvrt() method by comparing output to a
        file on disk"""

        assert idataset in range(len(DATASETS_TO_INGEST))

        print 'Testing Dataset %s' % DATASETS_TO_INGEST[idataset]
        dset = LandsatDataset(DATASETS_TO_INGEST[idataset])
        # Create a DatasetRecord instance so that we can access its
        # list_tile_types() method. In doing this we need to create a
        # collection object and entries on the acquisition and dataset
        # tables of the database.
        self.collection.begin_transaction()
        acquisition = \
            self.collection.create_acquisition_record(dset)
        dset_record = acquisition.create_dataset_record(dset)
        self.collection.commit_transaction()
        tile_type_list = dset_record.list_tile_types()
        #Assume dataset has tile_type = 1 only:
        tile_type_id = 1
        dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
        ls_bandstack = dset.stack_bands(dataset_bands_dict)
        temp_dir = self.collection.get_temp_tile_directory()
        ls_bandstack.buildvrt(temp_dir)
        # Get benchmark vrt for comparision
        vrt_benchmark = os.path.join(self.BENCHMARK_DIR,
                                     os.path.basename(ls_bandstack.vrt_name))
        diff_cmd = [
            "diff", "-I", "[Ff]ilename",
            "%s" % vrt_benchmark,
            "%s" % ls_bandstack.vrt_name
        ]
        result = execute(diff_cmd, shell=False)
        if result['stdout'] != '':
            self.fail("Differences between vrt files:\n" + result['stdout'])
        if result['stderr'] != '':
            self.fail("Error in system diff command:\n" + result['stderr'])
예제 #11
0
    def test_build_metadata_dict(self):
        """Test for the build_metadata_dict method.

        This method is actually defined in AbstractDataset, but
        an AbstractDataset cannot be instantiated, so it is tested here.
        """

        ortho_ds = LandsatDataset(
            os.path.join(self.INPUT_DIR, self.ORTHO_DIR, self.ORTHO_SCENE))

        mdd = ortho_ds.metadata_dict

        self.assertEqual(set(self.METADATA_KEYS), set(mdd.keys()))

        for k in mdd.keys():
            mdd_value = mdd[k]
            accessor_name = 'get_' + k
            accessor_value = getattr(ortho_ds, accessor_name)()
            self.assertEqual(mdd_value, accessor_value)
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        bench_footprints = {}
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                if iacquisition > 0:
                    continue
                if processing_level in ['NBAR']:
                    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)
                self.collection.commit_transaction()
                # List the benchmark footprints associated with this datset
                ftprints = \
                    self.get_benchmark_footprints(dset_record.mdd,
                                                  TestIngest.BENCHMARK_DIR)
                bench_footprints.setdefault(processing_level, {})
                bench_footprints[processing_level].setdefault(iacquisition, {})
                bench_footprints[processing_level][iacquisition] = ftprints
                LOGGER.info('bench_footprints=%s', str(ftprints))
                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                temp_dir = os.path.join(self.ingester.datacube.tile_root,
                                    'ingest_temp')
                # Form scene vrt
                ls_bandstack.buildvrt(temp_dir)
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    if tile_footprint not in [(117, -35), (115, -34)]:
                        continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    # Because date-time of PQA datasets is coming directly from
                    # the PQA dataset, rather NBAR, match on ymd string of
                    # datetime, rather than the micorseconds version in the
                    # NBAR data.
                    tile_benchmark = \
                        self.get_benchmark_tile(dset_record.mdd,
                                                TestIngest.BENCHMARK_DIR,
                                                tile_footprint)
                    LOGGER.info('tile_benchmark is %s', tile_benchmark)
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        # The tile might have data but, if PQA does not, then
                        # the benchmark tile will not exist
                        if tile_footprint not in bench_footprints \
                                [processing_level][iacquisition]:
                            assert tile_footprint not in \
                            bench_footprints['PQA'][iacquisition], \
                                "Old ingester found PQA tile and should have "\
                                "found cooresponding tile for %s"\
                                %processing_level

                            LOGGER.info('%s tile %s has data in new ingester',
                                        processing_level, str(tile_footprint))
                            continue
                        # Tile exists in old ingester and new ingester
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        assert (data1 == data2).all(), \
                            "Reprojected tile differs " \
                            "from %s" %tile_benchmark
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                        assert tile_footprint not in \
                            bench_footprints[processing_level][iacquisition], \
                            "%s tile %s does not have data " \
                            %(processing_level, str(tile_footprint))
                    LOGGER.info('-' * 80)
예제 #13
0
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                #if iacquisition > 0:
                #    continue
                #if processing_level in ['NBAR']:
                #    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)

                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                # Form scene vrt
                ls_bandstack.buildvrt(self.collection.
                                      get_temp_tile_directory())
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    #if tile_footprint not in [(117, -35), (115, -34)]:
                    #    continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s...',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    LOGGER.info('...done')

                    if self.POPULATE_EXPECTED:
                        continue
                    #Do comparision with expected results
                    tile_benchmark = self.swap_dir_in_path(tile_contents.
                                                           tile_output_path,
                                                           'output',
                                                           'expected')
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        LOGGER.info("Comparing test output with benchmark:\n"\
                                        "benchmark: %s\ntest output: %s",
                                    tile_benchmark,
                                    tile_contents.temp_tile_output_path)
                        # Do comparision with expected directory
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        if not (data1 == data2).all():
                            self.fail("Reprojected tile differs " \
                                          "from %s" %tile_benchmark)
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                    LOGGER.info('-' * 80)
                self.collection.commit_transaction()
예제 #14
0
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                #if iacquisition > 0:
                #    continue
                #if processing_level in ['NBAR']:
                #    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)

                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                # Form scene vrt
                ls_bandstack.buildvrt(
                    self.collection.get_temp_tile_directory())
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    #if tile_footprint not in [(117, -35), (115, -34)]:
                    #    continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s...',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    LOGGER.info('...done')

                    if self.POPULATE_EXPECTED:
                        continue
                    #Do comparision with expected results
                    tile_benchmark = self.swap_dir_in_path(
                        tile_contents.tile_output_path, 'output', 'expected')
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        LOGGER.info("Comparing test output with benchmark:\n"\
                                        "benchmark: %s\ntest output: %s",
                                    tile_benchmark,
                                    tile_contents.temp_tile_output_path)
                        # Do comparision with expected directory
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        if not (data1 == data2).all():
                            self.fail("Reprojected tile differs " \
                                          "from %s" %tile_benchmark)
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                    LOGGER.info('-' * 80)
                self.collection.commit_transaction()
예제 #15
0
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        bench_footprints = {}
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                if iacquisition > 0:
                    continue
                if processing_level in ['NBAR']:
                    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)
                self.collection.commit_transaction()
                # List the benchmark footprints associated with this datset
                ftprints = \
                    self.get_benchmark_footprints(dset_record.mdd,
                                                  TestIngest.BENCHMARK_DIR)
                bench_footprints.setdefault(processing_level, {})
                bench_footprints[processing_level].setdefault(iacquisition, {})
                bench_footprints[processing_level][iacquisition] = ftprints
                LOGGER.info('bench_footprints=%s', str(ftprints))
                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                temp_dir = os.path.join(self.ingester.datacube.tile_root,
                                        'ingest_temp')
                # Form scene vrt
                ls_bandstack.buildvrt(temp_dir)
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    if tile_footprint not in [(117, -35), (115, -34)]:
                        continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    # Because date-time of PQA datasets is coming directly from
                    # the PQA dataset, rather NBAR, match on ymd string of
                    # datetime, rather than the micorseconds version in the
                    # NBAR data.
                    tile_benchmark = \
                        self.get_benchmark_tile(dset_record.mdd,
                                                TestIngest.BENCHMARK_DIR,
                                                tile_footprint)
                    LOGGER.info('tile_benchmark is %s', tile_benchmark)
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        # The tile might have data but, if PQA does not, then
                        # the benchmark tile will not exist
                        if tile_footprint not in bench_footprints \
                                [processing_level][iacquisition]:
                            assert tile_footprint not in \
                            bench_footprints['PQA'][iacquisition], \
                                "Old ingester found PQA tile and should have "\
                                "found cooresponding tile for %s"\
                                %processing_level

                            LOGGER.info('%s tile %s has data in new ingester',
                                        processing_level, str(tile_footprint))
                            continue
                        # Tile exists in old ingester and new ingester
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        assert (data1 == data2).all(), \
                            "Reprojected tile differs " \
                            "from %s" %tile_benchmark
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                        assert tile_footprint not in \
                            bench_footprints[processing_level][iacquisition], \
                            "%s tile %s does not have data " \
                            %(processing_level, str(tile_footprint))
                    LOGGER.info('-' * 80)