def test_insert_tile_record(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        # Test a single dataset for tile_record creation
        processing_level = 'PQA'
        dataset_path = TestIngest.DATASETS_TO_INGEST[processing_level][0]
        LOGGER.info('Testing Dataset %s', dataset_path)
        dset = LandsatDataset(dataset_path)
        # Create a DatasetRecord instance so that we can access its
        # list_tile_types() method. In doing this we need to create a
        # collection object and entries on the acquisition and dataset
        # tables of the database.
        self.collection.begin_transaction()
        acquisition = \
            self.collection.create_acquisition_record(dset)
        dset_record = acquisition.create_dataset_record(dset)

        # List the benchmark footprints associated with this datset
        ftprints = \
            TestTileContents.get_benchmark_footprints(dset_record.mdd,
                                                      TestIngest.BENCHMARK_DIR)
        LOGGER.info('bench_footprints=%s', str(ftprints))
        # Get tile types
        dummy_tile_type_list = dset_record.list_tile_types()
        # Assume dataset has tile_type = 1 only:
        tile_type_id = 1
        dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
        ls_bandstack = dset.stack_bands(dataset_bands_dict)
        temp_dir = os.path.join(self.ingester.datacube.tile_root,
                                'ingest_temp')
        # Form scene vrt
        ls_bandstack.buildvrt(temp_dir)
        # Reproject scene data onto selected tile coverage
        tile_footprint_list = dset_record.get_coverage(tile_type_id)
        LOGGER.info('coverage=%s', str(tile_footprint_list))
        for tile_footprint in tile_footprint_list:
            tile_contents = \
                self.collection.create_tile_contents(tile_type_id,
                                                     tile_footprint,
                                                     ls_bandstack)
            LOGGER.info('reprojecting for %s tile %s',
                        processing_level, str(tile_footprint))
            #Need to call reproject to set tile_contents.tile_extents
            tile_contents.reproject()
            if tile_contents.has_data():
                dummy_tile_record = \
                    dset_record.create_tile_record(tile_contents)
        self.collection.commit_transaction()
Beispiel #2
0
 def test_get_bbox_dataset(self, tile_type_id=1):
     """Test the DatasetRecord class get_bbox() method on six landsat
     datasets."""
     #pylint: disable=too-many-locals
     cube_tile_size = \
         (self.ingester.datacube.tile_type_dict[tile_type_id]['x_size'],
          self.ingester.datacube.tile_type_dict[tile_type_id]['y_size'])
     cube_pixels = \
         (self.ingester.datacube.tile_type_dict[tile_type_id]['x_pixels'],
          self.ingester.datacube.tile_type_dict[tile_type_id]['y_pixels'])
     tile_crs = \
         self.ingester.datacube.tile_type_dict[tile_type_id]['crs']
     for idataset in range(len(DATASETS_TO_INGEST)):
         # Get information required for calculating the bounding box.
         dset = LandsatDataset(DATASETS_TO_INGEST[idataset])
         dataset_crs = dset.get_projection()
         geotrans = dset.get_geo_transform()
         pixels = dset.get_x_pixels()
         lines = dset.get_y_pixels()
         # Create a DatasetRecord instance so that we can test its
         # get_bbox() method. In doing this we need to create a
         # collection object and entries on the acquisition and dataset
         # tables of the database.
         self.collection.begin_transaction()
         acquisition = \
             self.collection.create_acquisition_record(dset)
         dset_record = acquisition.create_dataset_record(dset)
         self.collection.commit_transaction()
         # Test the DatasetRecord get_bbox() method
         #Determine the bounding quadrilateral of the dataset extent
         transformation = \
             dset_record.define_transformation(dataset_crs, tile_crs)
         bbox = dset_record.get_bbox(transformation, geotrans,
                                     pixels, lines)
         reference_dataset_bbox = DATASET_BBOX[idataset]
         #Check bounding box is as expected
         print 'Checking bbox for Dataset %d' %idataset
         residual_in_pixels = \
             [((x2 - x1) * cube_pixels[0] / cube_tile_size[0],
               (y2 - y1) * cube_pixels[1] / cube_tile_size[1])
              for ((x1, y1), (x2, y2)) in zip(reference_dataset_bbox, bbox)]
         assert all(abs(dx) < TOLERANCE and  abs(dy) < TOLERANCE
                    for (dx, dy) in residual_in_pixels), \
                    "bounding box calculation incorrect"
    def check_buildvrt(self, idataset):
        """Test the LandsatBandstack.buildvrt() method by comparing output to a
        file on disk"""

        assert idataset in range(len(DATASETS_TO_INGEST))

        print 'Testing Dataset %s' %DATASETS_TO_INGEST[idataset]
        dset = LandsatDataset(DATASETS_TO_INGEST[idataset])
        # Create a DatasetRecord instance so that we can access its
        # list_tile_types() method. In doing this we need to create a
        # collection object and entries on the acquisition and dataset
        # tables of the database.
        self.collection.begin_transaction()
        acquisition = \
            self.collection.create_acquisition_record(dset)
        dset_record = acquisition.create_dataset_record(dset)
        self.collection.commit_transaction()
        tile_type_list = dset_record.list_tile_types()
        #Assume dataset has tile_type = 1 only:
        tile_type_id = 1
        dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
        ls_bandstack = dset.stack_bands(dataset_bands_dict)
        temp_dir = self.collection.get_temp_tile_directory()
        ls_bandstack.buildvrt(temp_dir)
        # Get benchmark vrt for comparision
        vrt_benchmark = os.path.join(self.BENCHMARK_DIR,
                                     os.path.basename(ls_bandstack.vrt_name))
        diff_cmd = ["diff",
                    "-I",
                    "[Ff]ilename",
                    "%s" %vrt_benchmark,
                    "%s" %ls_bandstack.vrt_name
                    ]
        result = execute(diff_cmd, shell=False)
        if result['stdout'] != '':
            self.fail("Differences between vrt files:\n" + result['stdout'])
        if result['stderr'] != '':
            self.fail("Error in system diff command:\n" + result['stderr'])
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        bench_footprints = {}
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                if iacquisition > 0:
                    continue
                if processing_level in ['NBAR']:
                    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)
                self.collection.commit_transaction()
                # List the benchmark footprints associated with this datset
                ftprints = \
                    self.get_benchmark_footprints(dset_record.mdd,
                                                  TestIngest.BENCHMARK_DIR)
                bench_footprints.setdefault(processing_level, {})
                bench_footprints[processing_level].setdefault(iacquisition, {})
                bench_footprints[processing_level][iacquisition] = ftprints
                LOGGER.info('bench_footprints=%s', str(ftprints))
                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                temp_dir = os.path.join(self.ingester.datacube.tile_root,
                                    'ingest_temp')
                # Form scene vrt
                ls_bandstack.buildvrt(temp_dir)
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    if tile_footprint not in [(117, -35), (115, -34)]:
                        continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    # Because date-time of PQA datasets is coming directly from
                    # the PQA dataset, rather NBAR, match on ymd string of
                    # datetime, rather than the micorseconds version in the
                    # NBAR data.
                    tile_benchmark = \
                        self.get_benchmark_tile(dset_record.mdd,
                                                TestIngest.BENCHMARK_DIR,
                                                tile_footprint)
                    LOGGER.info('tile_benchmark is %s', tile_benchmark)
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        # The tile might have data but, if PQA does not, then
                        # the benchmark tile will not exist
                        if tile_footprint not in bench_footprints \
                                [processing_level][iacquisition]:
                            assert tile_footprint not in \
                            bench_footprints['PQA'][iacquisition], \
                                "Old ingester found PQA tile and should have "\
                                "found cooresponding tile for %s"\
                                %processing_level

                            LOGGER.info('%s tile %s has data in new ingester',
                                        processing_level, str(tile_footprint))
                            continue
                        # Tile exists in old ingester and new ingester
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        assert (data1 == data2).all(), \
                            "Reprojected tile differs " \
                            "from %s" %tile_benchmark
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                        assert tile_footprint not in \
                            bench_footprints[processing_level][iacquisition], \
                            "%s tile %s does not have data " \
                            %(processing_level, str(tile_footprint))
                    LOGGER.info('-' * 80)
    def test_make_mosaics(self):
        """Make mosaic tiles from two adjoining scenes."""
        # pylint: disable=too-many-locals
        nbar1, nbar2 = TestIngest.MOSAIC_SOURCE_NBAR
        ortho1, ortho2 = TestIngest.MOSAIC_SOURCE_ORTHO
        pqa1, pqa2 = TestIngest.MOSAIC_SOURCE_PQA
        # Set the list of datset paths which should result in mosaic tiles
        dataset_list = [nbar1, nbar2, ortho1, ortho2, pqa1, pqa2]
        dataset_list = [pqa1, pqa2]
        for dataset_path in dataset_list:
            dset = LandsatDataset(dataset_path)
            self.collection.begin_transaction()
            acquisition = \
                self.collection.create_acquisition_record(dset)
            dset_record = acquisition.create_dataset_record(dset)
            # Get tile types
            dummy_tile_type_list = dset_record.list_tile_types()
            # Assume dataset has tile_type = 1 only:
            tile_type_id = 1
            dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
            ls_bandstack = dset.stack_bands(dataset_bands_dict)
            temp_dir = os.path.join(self.ingester.datacube.tile_root,
                                    'ingest_temp')
            # Form scene vrt
            ls_bandstack.buildvrt(temp_dir)
            # Reproject scene data onto selected tile coverage
            tile_footprint_list = dset_record.get_coverage(tile_type_id)
            LOGGER.info('coverage=%s', str(tile_footprint_list))
            for tile_ftprint in tile_footprint_list:
                #Only do that footprint for which we have benchmark mosaics
                if tile_ftprint not in [(150, -26)]:
                    continue
                tile_contents = \
                    self.collection.create_tile_contents(tile_type_id,
                                                         tile_ftprint,
                                                         ls_bandstack)
                LOGGER.info('Calling reproject for %s tile %s...',
                            dset_record.mdd['processing_level'], tile_ftprint)
                tile_contents.reproject()
                LOGGER.info('...finished')
                if tile_contents.has_data():
                    LOGGER.info('tile %s has data',
                                tile_contents.temp_tile_output_path)
                    tile_record = dset_record.create_tile_record(tile_contents)
                    mosaic_required = tile_record.make_mosaics()

                    if not mosaic_required:
                        continue
                    #Test mosaic tiles against benchmark
                    mosaic_benchmark = TestTileContents.get_benchmark_tile(
                        dset_record.mdd,
                        os.path.join(TestIngest.BENCHMARK_DIR,
                                     'mosaic_cache'),
                        tile_ftprint)
                    mosaic_new = TestTileContents.get_benchmark_tile(
                        dset_record.mdd,
                        os.path.join(os.path.dirname(
                                tile_contents.temp_tile_output_path),
                                     'mosaic_cache'),
                        tile_ftprint)
                    LOGGER.info("Calling load_and_check...")
                    ([data1, data2], dummy_nlayers) = \
                        TestLandsatTiler.load_and_check(
                        mosaic_benchmark,
                        mosaic_new,
                        tile_contents.band_stack.band_dict,
                        tile_contents.band_stack.band_dict)
                    LOGGER.info('Checking arrays ...')
                    if dset_record.mdd['processing_level'] == 'PQA':
                        ind = (data1 == data2)
                        # Check that differences are due to differing treatment
                        # of contiguity bit.
                        data1_diff = data1[~ind]
                        data2_diff = data2[~ind]
                        contiguity_diff =  \
                            np.logical_or(
                            np.bitwise_and(data1_diff, 1 << 8) == 0,
                            np.bitwise_and(data2_diff, 1 << 8) == 0)
                        assert contiguity_diff.all(), \
                            "mosaiced tile %s differs from benchmark %s" \
                            %(mosaic_new, mosaic_benchmark)
                    else:
                        diff_cmd = ["diff",
                                    "-I",
                                    "[Ff]ilename",
                                    "%s" %mosaic_benchmark,
                                    "%s" %mosaic_new
                                    ]
                        result = execute(diff_cmd, shell=False)
                        assert result['stdout'] == '', \
                            "Differences between vrt files"
                        assert result['stderr'] == '', \
                            "Error in system diff command"
                else:
                    LOGGER.info('... tile has no data')
                    tile_contents.remove()
            self.collection.commit_transaction()
Beispiel #6
0
    def test_get_coverage(self, tile_type_id=1):
        # pylint: disable=too-many-locals
        """Test the methods called by the dataset_record.get_coverage() method.

        The constants at the top of this file provide test data expected to be
        returned by the tested get_coverage methods:
        1. TILE_XLL, TILE_YLL,... : dataset bounding box in tile projection
                                    coordinates TILE_CRS
        2. DEFINITE_TILES: tiles in inner rectangle
        3. POSSIBLE_TILES: tiles in outer rectangle
        4. INTERSECTED_TILES: those tiles from the outer rectangle that
        intersect the dataset bounding box
        5. CONTAINED_TILES: those tiles from outer rectangle wholly contained
        in the dataset bounding box
        6. COVERAGE: the tiles to be returned from DatasetRecord.get_coverage()
        """
        total_definite_tiles = set()
        total_possible_tiles = set()
        total_intersected_tiles = set()
        total_contained_tiles = set()
        total_touched_tiles = set()
        total_coverage = set()
        cube_origin = \
            (self.ingester.datacube.tile_type_dict[tile_type_id]['x_origin'],
             self.ingester.datacube.tile_type_dict[tile_type_id]['y_origin'])
        cube_tile_size = \
            (self.ingester.datacube.tile_type_dict[tile_type_id]['x_size'],
             self.ingester.datacube.tile_type_dict[tile_type_id]['y_size'])
        tile_crs = \
            self.ingester.datacube.tile_type_dict[tile_type_id]['crs']
        for idataset in range(len(DATASETS_TO_INGEST)):
            print 'Getting the coverage from Dataset %d' %idataset
            dset = LandsatDataset(DATASETS_TO_INGEST[idataset])
            dataset_crs = dset.get_projection()
            geotrans = dset.get_geo_transform()
            pixels = dset.get_x_pixels()
            lines = dset.get_y_pixels()
            # Create a DatasetRecord instance so that we can test its
            # get_coverage() method. In doing this we need to create a
            # collection object and entries on the acquisition and dataset
            # tables of the database.
            self.collection.begin_transaction()
            acquisition = \
                self.collection.create_acquisition_record(dset)
            dset_record = acquisition.create_dataset_record(dset)
            self.collection.commit_transaction()
            # Test the DatasetRecord get_bbox() method
            #Determine the bounding quadrilateral of the dataset extent
            transformation = \
                dset_record.define_transformation(dataset_crs, tile_crs)
            #Determine the bounding quadrilateral of the dataset extent
            bbox = dset_record.get_bbox(transformation, geotrans,
                                        pixels, lines)
            #Get the definite and possible tiles from this dataset and
            #accumulate in running total
            definite_tiles, possible_tiles = \
                dset_record.get_definite_and_possible_tiles(bbox, cube_origin,
                                                            cube_tile_size)
            total_definite_tiles = \
                total_definite_tiles.union(definite_tiles)
            total_possible_tiles = \
                total_possible_tiles.union(possible_tiles)
            #Get intersected tiles and accumulate in running total
            intersected_tiles = \
                dset_record.get_intersected_tiles(possible_tiles,
                                                  bbox,
                                                  cube_origin,
                                                  cube_tile_size)
            total_intersected_tiles = \
                total_intersected_tiles.union(intersected_tiles)
            #Take out intersected tiles from possibole tiles and get contained
            possible_tiles = possible_tiles.difference(intersected_tiles)
            contained_tiles = \
                dset_record.get_contained_tiles(possible_tiles,
                                                     bbox,
                                                     cube_origin,
                                                     cube_tile_size)
            total_contained_tiles = \
                total_contained_tiles.union(contained_tiles)
            #Use parent method to get touched tiles
            touched_tiles = \
                dset_record.get_touched_tiles(bbox,
                                              cube_origin,
                                              cube_tile_size)
            total_touched_tiles = total_touched_tiles.union(touched_tiles)
            #use parent method get_coverage to get coverage
            coverage = dset_record.get_coverage(tile_type_id)
            total_coverage = total_coverage.union(coverage)

        #Check definite and possible tiles are as expected
        assert total_definite_tiles == DEFINITE_TILES, \
            "Set of definite tiles disagrees with test data"
        assert total_possible_tiles == POSSIBLE_TILES, \
            "Set of possible tiles disagrees with test data"
        #Check intersected tiles are as expected
        assert total_intersected_tiles == INTERSECTED_TILES, \
            "Set of intersected tiles disagrees with test data"
         #Check contained tiles are as expected
        assert total_contained_tiles == CONTAINED_TILES, \
            "Set of tiles not in the definite set but wholly contained " \
            "within the dataset bbox does not agree with test data"
         #Check results of get_touced_tiles against expectations
        assert total_touched_tiles == COVERAGE, \
            "Set of tiles returned by get_touched_tiles does not agree " \
            "with test data"
        assert total_coverage == COVERAGE, \
            "Set of tiles returned by get_coverage does not agree " \
            "with test data"
Beispiel #7
0
    def test_reproject(self):
        """Test the Landsat tiling process method by comparing output to a
        file on disk."""
        # pylint: disable=too-many-locals
        #For each processing_level, and dataset keep record of those
        #tile footprints in the benchmark set.
        for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])):
            for processing_level in ['PQA', 'NBAR', 'ORTHO']:
                #Skip all but PQA and ORTHO for first dataset.
                #TODO program this in as a paramter of the suite
                #if iacquisition > 0:
                #    continue
                #if processing_level in ['NBAR']:
                #    continue
                dataset_path =  \
                    TestIngest.DATASETS_TO_INGEST[processing_level]\
                    [iacquisition]
                LOGGER.info('Testing Dataset %s', dataset_path)
                dset = LandsatDataset(dataset_path)
                #return
                # Create a DatasetRecord instance so that we can access its
                # list_tile_types() method. In doing this we need to create a
                # collection object and entries on the acquisition and dataset
                # tables of the database.
                self.collection.begin_transaction()
                acquisition = \
                    self.collection.create_acquisition_record(dset)
                dset_record = acquisition.create_dataset_record(dset)

                # Get tile types
                dummy_tile_type_list = dset_record.list_tile_types()
                # Assume dataset has tile_type = 1 only:
                tile_type_id = 1
                dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
                ls_bandstack = dset.stack_bands(dataset_bands_dict)
                # Form scene vrt
                ls_bandstack.buildvrt(self.collection.
                                      get_temp_tile_directory())
                # Reproject scene data onto selected tile coverage
                tile_footprint_list = dset_record.get_coverage(tile_type_id)
                LOGGER.info('coverage=%s', str(tile_footprint_list))
                for tile_footprint in tile_footprint_list:
                    #Skip all but PQA and ORTHO for first dataset.
                    #TODO program this in as a paramter of the suite
                    #if tile_footprint not in [(117, -35), (115, -34)]:
                    #    continue
                    tile_contents = \
                        self.collection.create_tile_contents(tile_type_id,
                                                             tile_footprint,
                                                             ls_bandstack)
                    LOGGER.info('reprojecting for %s tile %s...',
                                processing_level, str(tile_footprint))
                    tile_contents.reproject()
                    LOGGER.info('...done')

                    if self.POPULATE_EXPECTED:
                        continue
                    #Do comparision with expected results
                    tile_benchmark = self.swap_dir_in_path(tile_contents.
                                                           tile_output_path,
                                                           'output',
                                                           'expected')
                    if tile_contents.has_data():
                        LOGGER.info('Tile %s has data', str(tile_footprint))
                        LOGGER.info("Comparing test output with benchmark:\n"\
                                        "benchmark: %s\ntest output: %s",
                                    tile_benchmark,
                                    tile_contents.temp_tile_output_path)
                        # Do comparision with expected directory
                        LOGGER.info('Calling load and check ...')
                        ([data1, data2], dummy_nlayers) = \
                            TestLandsatTiler.load_and_check(
                            tile_benchmark,
                            tile_contents.temp_tile_output_path,
                            tile_contents.band_stack.band_dict,
                            tile_contents.band_stack.band_dict)
                        LOGGER.info('Checking arrays ...')
                        if not (data1 == data2).all():
                            self.fail("Reprojected tile differs " \
                                          "from %s" %tile_benchmark)
                        LOGGER.info('...OK')
                    else:
                        LOGGER.info('No data in %s', str(tile_footprint))
                    LOGGER.info('-' * 80)
                self.collection.commit_transaction()