def test_make_mosaics(self): """Make mosaic tiles from two adjoining scenes.""" # pylint: disable=too-many-locals dataset_list = \ [TestIngest.DATASETS_TO_INGEST[level][i] for i in range(6) for level in ['PQA', 'NBAR', 'ORTHO']] dataset_list.extend(TestIngest.MOSAIC_SOURCE_NBAR) dataset_list.extend(TestIngest.MOSAIC_SOURCE_PQA) dataset_list.extend(TestIngest.MOSAIC_SOURCE_ORTHO) random.shuffle(dataset_list) LOGGER.info("Ingesting following datasets:") for dset in dataset_list: LOGGER.info('%d) %s', dataset_list.index(dset), dset) for dataset_path in dataset_list: LOGGER.info('Ingesting Dataset %d:\n%s', dataset_list.index(dataset_path), dataset_path) dset = LandsatDataset(dataset_path) self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_ftprint in tile_footprint_list: #Only do that footprint for which we have benchmark mosaics if tile_ftprint not in [(141, -38)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_ftprint, ls_bandstack) LOGGER.info('Calling reproject for %s tile %s...', dset_record.mdd['processing_level'], tile_ftprint) tile_contents.reproject() LOGGER.info('...finished') if tile_contents.has_data(): LOGGER.info('tile %s has data', tile_contents.temp_tile_output_path) tile_record = dset_record.create_tile_record(tile_contents) mosaic_required = tile_record.make_mosaics() if not mosaic_required: continue # Test mosaic tiles against benchmark # At this stage, transaction for this dataset not yet # commited and so the tiles from this dataset, including # any mosaics are still in the temporary location. if self.POPULATE_EXPECTED: continue mosaic_benchmark = \ TestTileContents.swap_dir_in_path(tile_contents .mosaic_final_pathname, 'output', 'expected') mosaic_new = tile_contents.mosaic_temp_pathname LOGGER.info("Comparing test output with benchmark:\n"\ "benchmark: %s\ntest output: %s", mosaic_benchmark, mosaic_new) if dset_record.mdd['processing_level'] == 'PQA': LOGGER.info("For PQA mosaic, calling load_and_check...") ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( mosaic_benchmark, mosaic_new, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if ~(data1 == data2).all(): self.fail("Difference in PQA mosaic " "from expected result: %s and %s" %(mosaic_benchmark, mosaic_new)) # Check that differences are due to differing treatment # of contiguity bit. else: diff_cmd = ["diff", "-I", "[Ff]ilename", "%s" %mosaic_benchmark, "%s" %mosaic_new ] result = execute(diff_cmd, shell=False) assert result['stdout'] == '', \ "Differences between vrt files" assert result['stderr'] == '', \ "Error in system diff command" else: LOGGER.info('... tile has no data') tile_contents.remove() self.collection.commit_transaction()
def test_make_mosaics(self): """Make mosaic tiles from two adjoining scenes.""" # pylint: disable=too-many-locals nbar1, nbar2 = TestIngest.MOSAIC_SOURCE_NBAR ortho1, ortho2 = TestIngest.MOSAIC_SOURCE_ORTHO pqa1, pqa2 = TestIngest.MOSAIC_SOURCE_PQA # Set the list of datset paths which should result in mosaic tiles dataset_list = [nbar1, nbar2, ortho1, ortho2, pqa1, pqa2] dataset_list = [pqa1, pqa2] for dataset_path in dataset_list: dset = LandsatDataset(dataset_path) self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_ftprint in tile_footprint_list: #Only do that footprint for which we have benchmark mosaics if tile_ftprint not in [(150, -26)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_ftprint, ls_bandstack) LOGGER.info('Calling reproject for %s tile %s...', dset_record.mdd['processing_level'], tile_ftprint) tile_contents.reproject() LOGGER.info('...finished') if tile_contents.has_data(): LOGGER.info('tile %s has data', tile_contents.temp_tile_output_path) tile_record = dset_record.create_tile_record(tile_contents) mosaic_required = tile_record.make_mosaics() if not mosaic_required: continue #Test mosaic tiles against benchmark mosaic_benchmark = TestTileContents.get_benchmark_tile( dset_record.mdd, os.path.join(TestIngest.BENCHMARK_DIR, 'mosaic_cache'), tile_ftprint) mosaic_new = TestTileContents.get_benchmark_tile( dset_record.mdd, os.path.join( os.path.dirname( tile_contents.temp_tile_output_path), 'mosaic_cache'), tile_ftprint) LOGGER.info("Calling load_and_check...") ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( mosaic_benchmark, mosaic_new, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if dset_record.mdd['processing_level'] == 'PQA': ind = (data1 == data2) # Check that differences are due to differing treatment # of contiguity bit. data1_diff = data1[~ind] data2_diff = data2[~ind] contiguity_diff = \ np.logical_or( np.bitwise_and(data1_diff, 1 << 8) == 0, np.bitwise_and(data2_diff, 1 << 8) == 0) assert contiguity_diff.all(), \ "mosaiced tile %s differs from benchmark %s" \ %(mosaic_new, mosaic_benchmark) else: diff_cmd = [ "diff", "-I", "[Ff]ilename", "%s" % mosaic_benchmark, "%s" % mosaic_new ] result = execute(diff_cmd, shell=False) assert result['stdout'] == '', \ "Differences between vrt files" assert result['stderr'] == '', \ "Error in system diff command" else: LOGGER.info('... tile has no data') tile_contents.remove() self.collection.commit_transaction()
def test_make_mosaics(self): """Make mosaic tiles from two adjoining scenes.""" # pylint: disable=too-many-locals nbar1, nbar2 = TestIngest.MOSAIC_SOURCE_NBAR ortho1, ortho2 = TestIngest.MOSAIC_SOURCE_ORTHO pqa1, pqa2 = TestIngest.MOSAIC_SOURCE_PQA # Set the list of datset paths which should result in mosaic tiles dataset_list = [nbar1, nbar2, ortho1, ortho2, pqa1, pqa2] dataset_list = [pqa1, pqa2] for dataset_path in dataset_list: dset = LandsatDataset(dataset_path) self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_ftprint in tile_footprint_list: #Only do that footprint for which we have benchmark mosaics if tile_ftprint not in [(150, -26)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_ftprint, ls_bandstack) LOGGER.info('Calling reproject for %s tile %s...', dset_record.mdd['processing_level'], tile_ftprint) tile_contents.reproject() LOGGER.info('...finished') if tile_contents.has_data(): LOGGER.info('tile %s has data', tile_contents.temp_tile_output_path) tile_record = dset_record.create_tile_record(tile_contents) mosaic_required = tile_record.make_mosaics() if not mosaic_required: continue #Test mosaic tiles against benchmark mosaic_benchmark = TestTileContents.get_benchmark_tile( dset_record.mdd, os.path.join(TestIngest.BENCHMARK_DIR, 'mosaic_cache'), tile_ftprint) mosaic_new = TestTileContents.get_benchmark_tile( dset_record.mdd, os.path.join(os.path.dirname( tile_contents.temp_tile_output_path), 'mosaic_cache'), tile_ftprint) LOGGER.info("Calling load_and_check...") ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( mosaic_benchmark, mosaic_new, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if dset_record.mdd['processing_level'] == 'PQA': ind = (data1 == data2) # Check that differences are due to differing treatment # of contiguity bit. data1_diff = data1[~ind] data2_diff = data2[~ind] contiguity_diff = \ np.logical_or( np.bitwise_and(data1_diff, 1 << 8) == 0, np.bitwise_and(data2_diff, 1 << 8) == 0) assert contiguity_diff.all(), \ "mosaiced tile %s differs from benchmark %s" \ %(mosaic_new, mosaic_benchmark) else: diff_cmd = ["diff", "-I", "[Ff]ilename", "%s" %mosaic_benchmark, "%s" %mosaic_new ] result = execute(diff_cmd, shell=False) assert result['stdout'] == '', \ "Differences between vrt files" assert result['stderr'] == '', \ "Error in system diff command" else: LOGGER.info('... tile has no data') tile_contents.remove() self.collection.commit_transaction()
def test_reproject(self): """Test the Landsat tiling process method by comparing output to a file on disk.""" # pylint: disable=too-many-locals #For each processing_level, and dataset keep record of those #tile footprints in the benchmark set. for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])): for processing_level in ['PQA', 'NBAR', 'ORTHO']: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if iacquisition > 0: # continue #if processing_level in ['NBAR']: # continue dataset_path = \ TestIngest.DATASETS_TO_INGEST[processing_level]\ [iacquisition] LOGGER.info('Testing Dataset %s', dataset_path) dset = LandsatDataset(dataset_path) #return # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) # Form scene vrt ls_bandstack.buildvrt(self.collection. get_temp_tile_directory()) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_footprint in tile_footprint_list: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if tile_footprint not in [(117, -35), (115, -34)]: # continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_footprint, ls_bandstack) LOGGER.info('reprojecting for %s tile %s...', processing_level, str(tile_footprint)) tile_contents.reproject() LOGGER.info('...done') if self.POPULATE_EXPECTED: continue #Do comparision with expected results tile_benchmark = self.swap_dir_in_path(tile_contents. tile_output_path, 'output', 'expected') if tile_contents.has_data(): LOGGER.info('Tile %s has data', str(tile_footprint)) LOGGER.info("Comparing test output with benchmark:\n"\ "benchmark: %s\ntest output: %s", tile_benchmark, tile_contents.temp_tile_output_path) # Do comparision with expected directory LOGGER.info('Calling load and check ...') ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( tile_benchmark, tile_contents.temp_tile_output_path, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if not (data1 == data2).all(): self.fail("Reprojected tile differs " \ "from %s" %tile_benchmark) LOGGER.info('...OK') else: LOGGER.info('No data in %s', str(tile_footprint)) LOGGER.info('-' * 80) self.collection.commit_transaction()
def test_reproject(self): """Test the Landsat tiling process method by comparing output to a file on disk.""" # pylint: disable=too-many-locals #For each processing_level, and dataset keep record of those #tile footprints in the benchmark set. bench_footprints = {} for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])): for processing_level in ['PQA', 'NBAR', 'ORTHO']: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite if iacquisition > 0: continue if processing_level in ['NBAR']: continue dataset_path = \ TestIngest.DATASETS_TO_INGEST[processing_level]\ [iacquisition] LOGGER.info('Testing Dataset %s', dataset_path) dset = LandsatDataset(dataset_path) #return # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) self.collection.commit_transaction() # List the benchmark footprints associated with this datset ftprints = \ self.get_benchmark_footprints(dset_record.mdd, TestIngest.BENCHMARK_DIR) bench_footprints.setdefault(processing_level, {}) bench_footprints[processing_level].setdefault(iacquisition, {}) bench_footprints[processing_level][iacquisition] = ftprints LOGGER.info('bench_footprints=%s', str(ftprints)) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_footprint in tile_footprint_list: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite if tile_footprint not in [(117, -35), (115, -34)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_footprint, ls_bandstack) LOGGER.info('reprojecting for %s tile %s', processing_level, str(tile_footprint)) tile_contents.reproject() # Because date-time of PQA datasets is coming directly from # the PQA dataset, rather NBAR, match on ymd string of # datetime, rather than the micorseconds version in the # NBAR data. tile_benchmark = \ self.get_benchmark_tile(dset_record.mdd, TestIngest.BENCHMARK_DIR, tile_footprint) LOGGER.info('tile_benchmark is %s', tile_benchmark) if tile_contents.has_data(): LOGGER.info('Tile %s has data', str(tile_footprint)) # The tile might have data but, if PQA does not, then # the benchmark tile will not exist if tile_footprint not in bench_footprints \ [processing_level][iacquisition]: assert tile_footprint not in \ bench_footprints['PQA'][iacquisition], \ "Old ingester found PQA tile and should have "\ "found cooresponding tile for %s"\ %processing_level LOGGER.info('%s tile %s has data in new ingester', processing_level, str(tile_footprint)) continue # Tile exists in old ingester and new ingester LOGGER.info('Calling load and check ...') ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( tile_benchmark, tile_contents.temp_tile_output_path, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') assert (data1 == data2).all(), \ "Reprojected tile differs " \ "from %s" %tile_benchmark LOGGER.info('...OK') else: LOGGER.info('No data in %s', str(tile_footprint)) assert tile_footprint not in \ bench_footprints[processing_level][iacquisition], \ "%s tile %s does not have data " \ %(processing_level, str(tile_footprint)) LOGGER.info('-' * 80)
def test_make_mosaics(self): """Make mosaic tiles from two adjoining scenes.""" # pylint: disable=too-many-locals dataset_list = \ [TestIngest.DATASETS_TO_INGEST[level][i] for i in range(6) for level in ['PQA', 'NBAR', 'ORTHO']] dataset_list.extend(TestIngest.MOSAIC_SOURCE_NBAR) dataset_list.extend(TestIngest.MOSAIC_SOURCE_PQA) dataset_list.extend(TestIngest.MOSAIC_SOURCE_ORTHO) random.shuffle(dataset_list) LOGGER.info("Ingesting following datasets:") for dset in dataset_list: LOGGER.info('%d) %s', dataset_list.index(dset), dset) for dataset_path in dataset_list: LOGGER.info('Ingesting Dataset %d:\n%s', dataset_list.index(dataset_path), dataset_path) dset = LandsatDataset(dataset_path) self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_ftprint in tile_footprint_list: #Only do that footprint for which we have benchmark mosaics if tile_ftprint not in [(141, -38)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_ftprint, ls_bandstack) LOGGER.info('Calling reproject for %s tile %s...', dset_record.mdd['processing_level'], tile_ftprint) tile_contents.reproject() LOGGER.info('...finished') if tile_contents.has_data(): LOGGER.info('tile %s has data', tile_contents.temp_tile_output_path) tile_record = dset_record.create_tile_record(tile_contents) mosaic_required = tile_record.make_mosaics() if not mosaic_required: continue # Test mosaic tiles against benchmark # At this stage, transaction for this dataset not yet # commited and so the tiles from this dataset, including # any mosaics are still in the temporary location. if self.POPULATE_EXPECTED: continue mosaic_benchmark = \ TestTileContents.swap_dir_in_path(tile_contents .mosaic_final_pathname, 'output', 'expected') mosaic_new = tile_contents.mosaic_temp_pathname LOGGER.info("Comparing test output with benchmark:\n"\ "benchmark: %s\ntest output: %s", mosaic_benchmark, mosaic_new) if dset_record.mdd['processing_level'] == 'PQA': LOGGER.info( "For PQA mosaic, calling load_and_check...") ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( mosaic_benchmark, mosaic_new, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if ~(data1 == data2).all(): self.fail("Difference in PQA mosaic " "from expected result: %s and %s" % (mosaic_benchmark, mosaic_new)) # Check that differences are due to differing treatment # of contiguity bit. else: diff_cmd = [ "diff", "-I", "[Ff]ilename", "%s" % mosaic_benchmark, "%s" % mosaic_new ] result = execute(diff_cmd, shell=False) assert result['stdout'] == '', \ "Differences between vrt files" assert result['stderr'] == '', \ "Error in system diff command" else: LOGGER.info('... tile has no data') tile_contents.remove() self.collection.commit_transaction()
def test_reproject(self): """Test the Landsat tiling process method by comparing output to a file on disk.""" # pylint: disable=too-many-locals #For each processing_level, and dataset keep record of those #tile footprints in the benchmark set. for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])): for processing_level in ['PQA', 'NBAR', 'ORTHO']: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if iacquisition > 0: # continue #if processing_level in ['NBAR']: # continue dataset_path = \ TestIngest.DATASETS_TO_INGEST[processing_level]\ [iacquisition] LOGGER.info('Testing Dataset %s', dataset_path) dset = LandsatDataset(dataset_path) #return # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) # Form scene vrt ls_bandstack.buildvrt( self.collection.get_temp_tile_directory()) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_footprint in tile_footprint_list: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if tile_footprint not in [(117, -35), (115, -34)]: # continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_footprint, ls_bandstack) LOGGER.info('reprojecting for %s tile %s...', processing_level, str(tile_footprint)) tile_contents.reproject() LOGGER.info('...done') if self.POPULATE_EXPECTED: continue #Do comparision with expected results tile_benchmark = self.swap_dir_in_path( tile_contents.tile_output_path, 'output', 'expected') if tile_contents.has_data(): LOGGER.info('Tile %s has data', str(tile_footprint)) LOGGER.info("Comparing test output with benchmark:\n"\ "benchmark: %s\ntest output: %s", tile_benchmark, tile_contents.temp_tile_output_path) # Do comparision with expected directory LOGGER.info('Calling load and check ...') ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( tile_benchmark, tile_contents.temp_tile_output_path, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if not (data1 == data2).all(): self.fail("Reprojected tile differs " \ "from %s" %tile_benchmark) LOGGER.info('...OK') else: LOGGER.info('No data in %s', str(tile_footprint)) LOGGER.info('-' * 80) self.collection.commit_transaction()