def check_buildvrt(self, idataset): """Test the LandsatBandstack.buildvrt() method by comparing output to a file on disk""" assert idataset in range(len(DATASETS_TO_INGEST)) print 'Testing Dataset %s' %DATASETS_TO_INGEST[idataset] dset = LandsatDataset(DATASETS_TO_INGEST[idataset]) # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) self.collection.commit_transaction() tile_type_list = dset_record.list_tile_types() #Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = self.collection.get_temp_tile_directory() ls_bandstack.buildvrt(temp_dir) # Get benchmark vrt for comparision vrt_benchmark = os.path.join(self.BENCHMARK_DIR, os.path.basename(ls_bandstack.vrt_name)) diff_cmd = ["diff", "-I", "[Ff]ilename", "%s" %vrt_benchmark, "%s" %ls_bandstack.vrt_name ] result = execute(diff_cmd, shell=False) if result['stdout'] != '': self.fail("Differences between vrt files:\n" + result['stdout']) if result['stderr'] != '': self.fail("Error in system diff command:\n" + result['stderr'])
def test_reproject(self): """Test the Landsat tiling process method by comparing output to a file on disk.""" # pylint: disable=too-many-locals #For each processing_level, and dataset keep record of those #tile footprints in the benchmark set. for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])): for processing_level in ['PQA', 'NBAR', 'ORTHO']: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if iacquisition > 0: # continue #if processing_level in ['NBAR']: # continue dataset_path = \ TestIngest.DATASETS_TO_INGEST[processing_level]\ [iacquisition] LOGGER.info('Testing Dataset %s', dataset_path) dset = LandsatDataset(dataset_path) #return # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) # Form scene vrt ls_bandstack.buildvrt(self.collection. get_temp_tile_directory()) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_footprint in tile_footprint_list: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite #if tile_footprint not in [(117, -35), (115, -34)]: # continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_footprint, ls_bandstack) LOGGER.info('reprojecting for %s tile %s...', processing_level, str(tile_footprint)) tile_contents.reproject() LOGGER.info('...done') if self.POPULATE_EXPECTED: continue #Do comparision with expected results tile_benchmark = self.swap_dir_in_path(tile_contents. tile_output_path, 'output', 'expected') if tile_contents.has_data(): LOGGER.info('Tile %s has data', str(tile_footprint)) LOGGER.info("Comparing test output with benchmark:\n"\ "benchmark: %s\ntest output: %s", tile_benchmark, tile_contents.temp_tile_output_path) # Do comparision with expected directory LOGGER.info('Calling load and check ...') ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( tile_benchmark, tile_contents.temp_tile_output_path, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') if not (data1 == data2).all(): self.fail("Reprojected tile differs " \ "from %s" %tile_benchmark) LOGGER.info('...OK') else: LOGGER.info('No data in %s', str(tile_footprint)) LOGGER.info('-' * 80) self.collection.commit_transaction()
def test_reproject(self): """Test the Landsat tiling process method by comparing output to a file on disk.""" # pylint: disable=too-many-locals #For each processing_level, and dataset keep record of those #tile footprints in the benchmark set. bench_footprints = {} for iacquisition in range(len(TestIngest.DATASETS_TO_INGEST['PQA'])): for processing_level in ['PQA', 'NBAR', 'ORTHO']: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite if iacquisition > 0: continue if processing_level in ['NBAR']: continue dataset_path = \ TestIngest.DATASETS_TO_INGEST[processing_level]\ [iacquisition] LOGGER.info('Testing Dataset %s', dataset_path) dset = LandsatDataset(dataset_path) #return # Create a DatasetRecord instance so that we can access its # list_tile_types() method. In doing this we need to create a # collection object and entries on the acquisition and dataset # tables of the database. self.collection.begin_transaction() acquisition = \ self.collection.create_acquisition_record(dset) dset_record = acquisition.create_dataset_record(dset) self.collection.commit_transaction() # List the benchmark footprints associated with this datset ftprints = \ self.get_benchmark_footprints(dset_record.mdd, TestIngest.BENCHMARK_DIR) bench_footprints.setdefault(processing_level, {}) bench_footprints[processing_level].setdefault(iacquisition, {}) bench_footprints[processing_level][iacquisition] = ftprints LOGGER.info('bench_footprints=%s', str(ftprints)) # Get tile types dummy_tile_type_list = dset_record.list_tile_types() # Assume dataset has tile_type = 1 only: tile_type_id = 1 dataset_bands_dict = dset_record.get_tile_bands(tile_type_id) ls_bandstack = dset.stack_bands(dataset_bands_dict) temp_dir = os.path.join(self.ingester.datacube.tile_root, 'ingest_temp') # Form scene vrt ls_bandstack.buildvrt(temp_dir) # Reproject scene data onto selected tile coverage tile_footprint_list = dset_record.get_coverage(tile_type_id) LOGGER.info('coverage=%s', str(tile_footprint_list)) for tile_footprint in tile_footprint_list: #Skip all but PQA and ORTHO for first dataset. #TODO program this in as a paramter of the suite if tile_footprint not in [(117, -35), (115, -34)]: continue tile_contents = \ self.collection.create_tile_contents(tile_type_id, tile_footprint, ls_bandstack) LOGGER.info('reprojecting for %s tile %s', processing_level, str(tile_footprint)) tile_contents.reproject() # Because date-time of PQA datasets is coming directly from # the PQA dataset, rather NBAR, match on ymd string of # datetime, rather than the micorseconds version in the # NBAR data. tile_benchmark = \ self.get_benchmark_tile(dset_record.mdd, TestIngest.BENCHMARK_DIR, tile_footprint) LOGGER.info('tile_benchmark is %s', tile_benchmark) if tile_contents.has_data(): LOGGER.info('Tile %s has data', str(tile_footprint)) # The tile might have data but, if PQA does not, then # the benchmark tile will not exist if tile_footprint not in bench_footprints \ [processing_level][iacquisition]: assert tile_footprint not in \ bench_footprints['PQA'][iacquisition], \ "Old ingester found PQA tile and should have "\ "found cooresponding tile for %s"\ %processing_level LOGGER.info('%s tile %s has data in new ingester', processing_level, str(tile_footprint)) continue # Tile exists in old ingester and new ingester LOGGER.info('Calling load and check ...') ([data1, data2], dummy_nlayers) = \ TestLandsatTiler.load_and_check( tile_benchmark, tile_contents.temp_tile_output_path, tile_contents.band_stack.band_dict, tile_contents.band_stack.band_dict) LOGGER.info('Checking arrays ...') assert (data1 == data2).all(), \ "Reprojected tile differs " \ "from %s" %tile_benchmark LOGGER.info('...OK') else: LOGGER.info('No data in %s', str(tile_footprint)) assert tile_footprint not in \ bench_footprints[processing_level][iacquisition], \ "%s tile %s does not have data " \ %(processing_level, str(tile_footprint)) LOGGER.info('-' * 80)