def analyze_protected_lands(result, area_of_interest=None): if 'error' in result: raise Exception('[analyze_protected_lands] {}'.format(result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 result = parse(result) histogram = {} total_count = 0 categories = [] for key, count in result.iteritems(): total_count += count histogram[key] = count + histogram.get(key, 0) for class_id, (code, name) in layer_classmaps.PROTECTED_LANDS.iteritems(): categories.append({ 'area': histogram.get(class_id, 0) * pixel_width * pixel_width, 'class_id': class_id, 'code': code, 'coverage': float(histogram.get(class_id, 0)) / total_count, 'type': name, }) return { 'survey': { 'name': 'protected_lands', 'displayName': 'Protected Lands', 'categories': categories, } }
def analyze_soil(result, area_of_interest=None): if 'error' in result: raise Exception('[analyze_soil] {}'.format(result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 histogram = {} total_count = 0 categories = [] # Convert results to histogram, calculate total for key, count in result.iteritems(): total_count += count s = make_tuple(key[4:]) # Change {"List(1)":5} to {1:5} s = s if s != settings.NODATA else 3 # Map NODATA to 3 histogram[s] = count + histogram.get(s, 0) for soil, (code, name) in layer_classmaps.SOIL.iteritems(): categories.append({ 'area': histogram.get(soil, 0) * pixel_width * pixel_width, 'code': code, 'coverage': float(histogram.get(soil, 0)) / total_count, 'type': name, }) return { 'survey': { 'name': 'soil', 'displayName': 'Soil', 'categories': categories, } }
def analyze_nlcd(result, area_of_interest=None): if 'error' in result: raise Exception('[analyze_nlcd] {}'.format(result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 histogram = {} total_count = 0 categories = [] # Convert results to histogram, calculate total for key, count in result.iteritems(): total_count += count histogram[make_tuple(key[4:])] = count # Change {"List(1)":5} to {1:5} for nlcd, (code, name) in settings.NLCD_MAPPING.iteritems(): categories.append({ 'area': histogram.get(nlcd, 0) * pixel_width * pixel_width, 'code': code, 'coverage': float(histogram.get(nlcd, 0)) / total_count, 'nlcd': nlcd, 'type': name, }) return { 'survey': { 'name': 'land', 'displayName': 'Land', 'categories': categories, } }
def collect_nlcd(histogram, geojson=None): """ Convert raw NLCD geoprocessing result to area dictionary """ pixel_width = aoi_resolution(geojson) if geojson else 1 categories = [{ 'area': histogram.get(nlcd, 0) * pixel_width * pixel_width, 'code': code, 'nlcd': nlcd, 'type': name, } for nlcd, (code, name) in layer_classmaps.NLCD.iteritems()] return {'categories': categories}
def analyze_nlcd(result, area_of_interest=None, nlcd_year='2011_2011'): if 'error' in result: raise Exception(f'[analyze_nlcd_{nlcd_year}] {result["error"]}') pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 result = parse(result) histogram = {} total_ara = 0 total_count = 0 categories = [] def area(dictionary, key, default=0): return dictionary.get(key, default) * pixel_width * pixel_width # Convert results to histogram, calculate total for key, count in result.items(): nlcd, ara = key total_count += count total_ara += count if ara == 1 else 0 histogram[nlcd] = count + histogram.get(nlcd, 0) has_ara = total_ara > 0 for nlcd, (code, name) in layer_classmaps.NLCD.items(): categories.append({ 'area': area(histogram, nlcd), 'active_river_area': area(result, (nlcd, 1)) if has_ara else None, 'code': code, 'coverage': float(histogram.get(nlcd, 0)) / total_count, 'nlcd': nlcd, 'type': name, }) return { 'survey': { 'name': f'land_{nlcd_year}', 'displayName': f'Land Use/Cover {nlcd_year[5:]} (NLCD{nlcd_year[2:4]})', 'categories': categories, } }
def run_tr55(censuses, aoi, model_input, cached_aoi_census=None): """ A Celery wrapper around our TR55 implementation. censuses is either output from previous tasks in the job chain or are provided directly (in the case where the AoI census and modification censuses are cached). If cached_aoi_census is provided, censuses will only contain the modification_censuses, which were generated in the previous task. If cached_aoi_census isn't provided, the AoI census will be the first census in censuses, and everything else is a modification census. """ # Get precipitation and cell resolution precip = precipitation(model_input) # Normalize AOI to handle single-ring multipolygon # inputs sent from RWD as well as shapes sent from the front-end aoi = to_one_ring_multipolygon(aoi) width = aoi_resolution(aoi) resolution = width * width if precip is None: raise Exception('No precipitation value defined') # Modification/BMP fragments and their censuses # The original modifications are not POSTed. We only # send the altered modifications/modification pieces. modification_pieces = model_input.get('modification_pieces') modification_censuses = (censuses[1:] if cached_aoi_census is None else censuses[0:]) # Calculate total areas for each type modification area_sums = {} for piece in modification_pieces: kinds = piece['value'] area = piece['area'] if 'bmp' in kinds: kind = kinds['bmp'] else: kind = kinds['reclass'] if kind in area_sums: area_sums[kind] += area else: area_sums[kind] = area # The area of interest census aoi_census = cached_aoi_census if cached_aoi_census else censuses[0] if modification_pieces and not modification_censuses: raise Exception('Missing censuses for modifications') elif modification_censuses and not modification_pieces: modification_censuses = [] modifications = apply_modifications_to_census(modification_pieces, modification_censuses) aoi_census['modifications'] = modifications aoi_census['BMPs'] = area_sums # Run the model under both current conditions and Pre-Columbian # conditions. try: model_output = simulate_day(aoi_census, precip, cell_res=resolution) precolumbian_output = simulate_day(aoi_census, precip, cell_res=resolution, precolumbian=True) model_output['pc_unmodified'] = precolumbian_output['unmodified'] model_output['pc_modified'] = precolumbian_output['modified'] runoff = format_runoff(model_output) quality = format_quality(model_output) except KeyError as e: runoff = {} quality = [] logger.error('Bad input data to TR55: %s' % e) # Modifications were added to aoi_census for TR-55, but we do # not want to persist it since we have it stored seperately # and it may cause problems when sharing the aoi_census # for other model runs and scenarios. aoi_census.pop('modifications', None) # Return all results return { 'inputmod_hash': model_input['inputmod_hash'], 'modification_hash': model_input['modification_hash'], 'aoi_census': aoi_census, 'modification_censuses': modification_censuses, 'runoff': runoff, 'quality': quality }