def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) bridge_results = [] list_bridges = bridges # Converting list of bridges into a dictionary for ease of reference bridges = dict() for br in list_bridges: bridges[br["id"]] = br list_bridges = None # Clear as it's not needed anymore processed_bridges = [] grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set) for demand, grouped_brs in grouped_bridges.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50)) # TODO: Move to globals? for brs in br_chunks: points = [] for br_id in brs: location = GeoUtil.get_location(bridges[br_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == "earthquake": hazard_vals = \ self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tsunami": hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tornado": hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == "hurricane": hazard_vals = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for br_id in brs: bridge_result = collections.OrderedDict() bridge = bridges[br_id] selected_fragility_set = fragility_set[br_id] hazard_val = hazard_vals[i]['hazardValue'] hazard_std_dev = 0.0 if use_hazard_uncertainty: # TODO Get this from API once implemented raise ValueError("Uncertainty Not Implemented!") adjusted_fragility_set = copy.deepcopy(selected_fragility_set) if use_liquefaction and 'liq' in bridge['properties']: for fragility in adjusted_fragility_set.fragility_curves: fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq']) dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability) bridge_result['guid'] = bridge['properties']['guid'] bridge_result.update(dmg_probability) bridge_result.update(dmg_intervals) bridge_result["retrofit"] = retrofit_type bridge_result["retrocost"] = retrofit_cost bridge_result["demandtype"] = input_demand_type bridge_result["demandunits"] = input_demand_units bridge_result["hazardtype"] = hazard_type bridge_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"]["spans"] \ is not None and bridge["properties"]["spans"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["spans"]) elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \ is not None and bridge["properties"]["SPANS"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["SPANS"]) else: bridge_result['spans'] = 1 bridge_results.append(bridge_result) processed_bridges.append(br_id) # remove processed bridges i = i + 1 unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0} unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability) for br_id, br in bridges.items(): if br_id not in processed_bridges: unmapped_br_result = collections.OrderedDict() unmapped_br_result['guid'] = br['properties']['guid'] unmapped_br_result.update(unmapped_dmg_probability) unmapped_br_result.update(unmapped_dmg_intervals) unmapped_br_result["retrofit"] = "Non-Retrofit" unmapped_br_result["retrocost"] = 0.0 unmapped_br_result["demandtype"] = "None" unmapped_br_result['demandunits'] = "None" unmapped_br_result["hazardtype"] = "None" unmapped_br_result['hazardval'] = 0.0 bridge_results.append(unmapped_br_result) return bridge_results
def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, use_hazard_uncertainty, use_liquefaction, liq_geology_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty (bool): Hazard uncertainty. True for using uncertainty when computing damage, False otherwise. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ result = [] fragility_key = self.get_parameter("fragility_key") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) epf_results = [] # Converting list of epfs into a dictionary for ease of reference list_epfs = epfs epfs = dict() for epf in list_epfs: epfs[epf["id"]] = epf del list_epfs # Clear as it's not needed anymore processed_epf = [] grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set) for demand, grouped_epf_items in grouped_epfs.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50)) for epf_chunk in epf_chunks: points = [] for epf_id in epf_chunk: location = GeoUtil.get_location(epfs[epf_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError( 'Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for epf_id in epf_chunk: epf_result = collections.OrderedDict() epf = epfs[epf_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") selected_fragility_set = fragility_set[epf_id] limit_states = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( limit_states) epf_result['guid'] = epf['properties']['guid'] epf_result.update(limit_states) epf_result.update(dmg_interval) epf_result['demandtype'] = input_demand_type epf_result['demandunits'] = input_demand_units epf_result['hazardtype'] = hazard_type epf_result['hazardval'] = hazard_val epf_results.append(epf_result) processed_epf.append(epf_id) i = i + 1 # when there is liquefaction, limit state need to be modified if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: liq_fragility_key = self.get_parameter( "liquefaction_fragility_key") if liq_fragility_key is None: liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY liq_fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liq_fragility_key) grouped_liq_epfs = AnalysisUtil.group_by_demand_type( epfs, liq_fragility_set) for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items(): liq_input_demand_type = liq_demand[0] liq_input_demand_units = liq_demand[1] # For every group of unique demand and demand unit, call the end-point once liq_epf_chunks = list( AnalysisUtil.chunks(grouped_liq_epf_items, 50)) for liq_epf_chunk in liq_epf_chunks: points = [] for liq_epf_id in liq_epf_chunk: location = GeoUtil.get_location(epfs[liq_epf_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction_vals = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, liq_input_demand_units, points) # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for liq_epf_id in liq_epf_chunk: liq_hazard_val = liquefaction_vals[i][ liq_input_demand_type] std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") liquefaction_prob = liquefaction_vals[i][ 'liqProbability'] selected_liq_fragility = liq_fragility_set[liq_epf_id] pgd_limit_states = selected_liq_fragility.calculate_limit_state( liq_hazard_val, std_dev=std_dev) # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit # states and dmg_interval for epf_result in epf_results: if epf_result['guid'] == epfs[liq_epf_id]['guid']: limit_states = { "ls-slight": epf_result['ls-slight'], "ls-moderat": epf_result['ls-moderat'], "ls-extensi": epf_result['ls-extensi'], "ls-complet": epf_result['ls-complet'] } liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) liq_dmg_interval = AnalysisUtil.calculate_damage_interval( liq_limit_states) epf_result.update(liq_limit_states) epf_result.update(liq_dmg_interval) epf_result[ 'liqhaztype'] = liq_input_demand_type epf_result['liqhazval'] = liq_hazard_val epf_result[ 'liqprobability'] = liquefaction_prob i = i + 1 unmapped_limit_states = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_limit_states) for epf_id, epf in epfs.items(): if epf_id not in processed_epf: unmapped_epf_result = collections.OrderedDict() unmapped_epf_result['guid'] = epf['properties']['guid'] unmapped_epf_result.update(unmapped_limit_states) unmapped_epf_result.update(unmapped_dmg_intervals) unmapped_epf_result["demandtype"] = "None" unmapped_epf_result['demandunits'] = "None" unmapped_epf_result["hazardtype"] = "None" unmapped_epf_result['hazardval'] = 0.0 unmapped_epf_result['liqhaztype'] = "NA" unmapped_epf_result['liqhazval'] = "NA" unmapped_epf_result['liqprobability'] = "NA" epf_results.append(unmapped_epf_result) return epf_results
def building_damage_analysis_bulk_input(self, buildings, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = dict() fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key) bldg_results = [] list_buildings = buildings buildings = dict() # Converting list of buildings into a dictionary for ease of reference for b in list_buildings: buildings[b["id"]] = b list_buildings = None # Clear as it's not needed anymore grouped_buildings = AnalysisUtil.group_by_demand_type(buildings, fragility_sets, hazard_type, is_building=True) for demand, grouped_bldgs in grouped_buildings.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once bldg_chunks = list(AnalysisUtil.chunks( grouped_bldgs, 50)) # TODO: Move to globals? for bldgs in bldg_chunks: points = [] for bldg_id in bldgs: location = GeoUtil.get_location(buildings[bldg_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'hurricane': # TODO implement hurricane print("hurricane not yet implemented") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for bldg_id in bldgs: bldg_result = collections.OrderedDict() building = buildings[bldg_id] hazard_val = hazard_vals[i]['hazardValue'] output_demand_type = hazard_vals[i]['demand'] if hazard_type == 'earthquake': period = float(hazard_vals[i]['period']) if period > 0: output_demand_type = str( hazard_vals[i] ['period']) + " " + output_demand_type num_stories = building['properties']['no_stories'] selected_fragility_set = fragility_sets[bldg_id] building_period = selected_fragility_set.fragility_curves[ 0].get_building_period(num_stories) dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, building_period) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) bldg_result['guid'] = building['properties']['guid'] bldg_result.update(dmg_probability) bldg_result.update(dmg_interval) bldg_result['demandtype'] = output_demand_type bldg_result['demandunits'] = input_demand_units bldg_result['hazardval'] = hazard_val bldg_results.append(bldg_result) del buildings[bldg_id] i = i + 1 unmapped_hazard_val = 0.0 unmapped_output_demand_type = "None" unmapped_output_demand_unit = "None" for unmapped_bldg_id, unmapped_bldg in buildings.items(): unmapped_bldg_result = collections.OrderedDict() unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid'] unmapped_bldg_result['demandtype'] = unmapped_output_demand_type unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit unmapped_bldg_result['hazardval'] = unmapped_hazard_val bldg_results.append(unmapped_bldg_result) return bldg_results
def cumulative_building_damage(self, eq_building_damage, tsunami_building_damage): """Run analysis for building damage results. Args: eq_building_damage (obj): A JSON description of an earthquake building damage. tsunami_building_damage (obj): Set of all tsunami building damage results. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ guid = eq_building_damage['guid'] tsunami_building = tsunami_building_damage.loc[ tsunami_building_damage['guid'] == guid] for idy, tsunami_building in tsunami_building.iterrows(): eq_limit_states = collections.OrderedDict() eq_limit_states['immocc'] = float(eq_building_damage["immocc"]) eq_limit_states['lifesfty'] = float(eq_building_damage["lifesfty"]) eq_limit_states['collprev'] = float(eq_building_damage["collprev"]) tsunami_limit_states = collections.OrderedDict() tsunami_limit_states['immocc'] = float(tsunami_building["immocc"]) tsunami_limit_states['lifesfty'] = float( tsunami_building["lifesfty"]) tsunami_limit_states['collprev'] = float( tsunami_building["collprev"]) limit_states = collections.OrderedDict() limit_states["immocc"] = \ eq_limit_states["immocc"] + tsunami_limit_states["immocc"] - \ eq_limit_states["immocc"] * tsunami_limit_states["immocc"] limit_states["lifesfty"] = \ eq_limit_states["lifesfty"] + tsunami_limit_states[ "lifesfty"] - \ eq_limit_states["lifesfty"] * tsunami_limit_states[ "lifesfty"] + \ ((eq_limit_states["immocc"] - eq_limit_states["lifesfty"]) * (tsunami_limit_states["immocc"] - tsunami_limit_states[ "lifesfty"])) limit_states["collprev"] = \ eq_limit_states["collprev"] + tsunami_limit_states[ "collprev"] - \ eq_limit_states["collprev"] * tsunami_limit_states[ "collprev"] + \ ((eq_limit_states["lifesfty"] - eq_limit_states["collprev"]) * (tsunami_limit_states["lifesfty"] - tsunami_limit_states[ "collprev"])) damage_state = AnalysisUtil.calculate_damage_interval(limit_states) bldg_results = collections.OrderedDict() bldg_results["guid"] = guid bldg_results.update(limit_states) bldg_results.update(damage_state) bldg_results["hazard"] = "Earthquake+Tsunami" return bldg_results
def waterfacility_damage_analysis(self, facility, fragility, liq_fragility, hazard_type, hazard_dataset_id, liq_geology_dataset_id, uncertainty): """Computes damage analysis for a single facility Args: facility (obj): A JSON mapping of a facility based on mapping attributes fragility (obj): A JSON description of fragility mapped to the building. liq_fragility (obj): A JSON description of liquefaction fragility mapped to the building. hazard_type (str): A string that indicates the hazard type hazard_dataset_id (str): Hazard id from the hazard service liq_geology_dataset_id (str): Geology dataset id from data service to use for liquefaction calculation, if applicable uncertainty (bool): Whether to use hazard standard deviation values for uncertainty Returns: OrderedDict: A dictionary with water facility damage values and other data/metadata. """ std_dev = 0 if uncertainty: std_dev = random.random() hazard_demand_type = fragility.demand_type demand_units = fragility.demand_units liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 location = GeoUtil.get_location(facility) point = str(location.y) + "," + str(location.x) if hazard_type == "earthquake": hazard_val_set = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) elif hazard_type == "tsunami": hazard_val_set = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported.") hazard_val = hazard_val_set[0]['hazardValue'] if hazard_val < 0: hazard_val = 0 limit_states = fragility.calculate_limit_state(hazard_val, std_dev) if liq_fragility is not None and liq_geology_dataset_id: liq_hazard_type = liq_fragility.demand_type pgd_demand_units = liq_fragility.demand_units point = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, pgd_demand_units, [point]) liq_hazard_val = liquefaction[0][liq_hazard_type] liquefaction_prob = liquefaction[0]['liqProbability'] pgd_limit_states = liq_fragility.calculate_limit_state(liq_hazard_val, std_dev) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = AnalysisUtil.calculate_damage_interval(limit_states) result = collections.OrderedDict() result = {**limit_states, **dmg_intervals} # Needs py 3.5+ metadata = collections.OrderedDict() metadata['guid'] = facility['properties']['guid'] metadata['hazardtype'] = hazard_type metadata['demandtype'] = hazard_demand_type metadata['hazardval'] = hazard_val metadata['liqhaztype'] = liq_hazard_type metadata['liqhazval'] = liq_hazard_val metadata['liqprobability'] = liquefaction_prob result = {**metadata, **result} return result
def building_damage_analysis(self, building, fragility_set_as, fragility_set_ds): """Calculates bridge damage results for a single building. Args: building (obj): A JSON-mapping of a geometric object from the inventory: current building. fragility_set_as (obj): A JSON description of acceleration-sensitive (AS) fragility assigned to the building. fragility_set_ds (obj): A JSON description of drift-sensitive (DS) fragility assigned to the building. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ building_results = collections.OrderedDict() dmg_probability_as = collections.OrderedDict() dmg_probability_ds = collections.OrderedDict() hazard_demand_type_as = None hazard_demand_type_ds = None hazard_val_as = 0.0 hazard_val_ds = 0.0 # read static parameters from object self hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") # Acceleration-Sensitive Fragility ID Code if fragility_set_as is not None: hazard_demand_type_as = AnalysisUtil.get_hazard_demand_type(building, fragility_set_as, 'earthquake') demand_units_as = fragility_set_as.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_as = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_as, demand_units_as, points=[point])[0]['hazardValue'] dmg_probability_as = fragility_set_as.calculate_limit_state(hazard_val_as) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_as = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_as['immocc'] = 0.0 dmg_probability_as['lifesfty'] = 0.0 dmg_probability_as['collprev'] = 0.0 dmg_interval_as = AnalysisUtil.calculate_damage_interval(dmg_probability_as) # Drift-Sensitive Fragility ID Code if fragility_set_ds is not None: hazard_demand_type_ds = AnalysisUtil.get_hazard_demand_type(building, fragility_set_ds, 'earthquake') demand_units_ds = fragility_set_ds.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_ds = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_ds, demand_units_ds, points=[point])[0]['hazardValue'] dmg_probability_ds = fragility_set_ds.calculate_limit_state(hazard_val_ds) # adjust hazard value for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_ds = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_ds['immocc'] = 0.0 dmg_probability_ds['lifesfty'] = 0.0 dmg_probability_ds['collprev'] = 0.0 dmg_interval_ds = AnalysisUtil.calculate_damage_interval(dmg_probability_ds) # put results in dictionary building_results['guid'] = building['properties']['guid'] building_results['immocc_as'] = dmg_probability_as['immocc'] building_results['lifsfty_as'] = dmg_probability_as['lifesfty'] building_results['collpre_as'] = dmg_probability_as['collprev'] building_results['insig_as'] = dmg_interval_as['insignific'] building_results['mod_as'] = dmg_interval_as['moderate'] building_results['heavy_as'] = dmg_interval_as['heavy'] building_results['comp_as'] = dmg_interval_as['complete'] building_results['immocc_ds'] = dmg_probability_ds['immocc'] building_results['lifsfty_ds'] = dmg_probability_ds['lifesfty'] building_results['collpre_ds'] = dmg_probability_ds['collprev'] building_results['insig_ds'] = dmg_interval_ds['insignific'] building_results['mod_ds'] = dmg_interval_ds['moderate'] building_results['heavy_ds'] = dmg_interval_ds['heavy'] building_results['comp_ds'] = dmg_interval_ds['complete'] building_results["hzrdtyp_as"] = hazard_demand_type_as building_results["hzrdval_as"] = hazard_val_as building_results["hzrdtyp_ds"] = hazard_demand_type_ds building_results["hzrdval_ds"] = hazard_val_ds return building_results
def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. """ road_results = [] fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) list_roads = roads # Converting list of roads into a dictionary for ease of reference roads = dict() for rd in list_roads: roads[rd["id"]] = rd del list_roads processed_roads = [] grouped_roads = AnalysisUtil.group_by_demand_type( roads, fragility_sets) for demand, grouped_road_items in grouped_roads.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once road_chunks = list(AnalysisUtil.chunks(grouped_road_items, 50)) for road_chunk in road_chunks: points = [] for road_id in road_chunk: location = GeoUtil.get_location(roads[road_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction = [] if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) if input_demand_type.lower( ) == 'pgd' and use_liquefaction and geology_dataset_id is not None: liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, input_demand_units, points) elif hazard_type == 'tornado': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'hurricane': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for road_id in road_chunk: road_result = collections.OrderedDict() road = roads[road_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") selected_fragility_set = fragility_sets[road_id] dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['guid'] = road['properties']['guid'] road_result.update(dmg_probability) road_result.update(dmg_interval) road_result['demandtype'] = input_demand_type road_result['demandunits'] = input_demand_units road_result['hazardtype'] = hazard_type road_result['hazardval'] = hazard_val # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if len(liquefaction) > 0: if input_demand_type in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type] elif input_demand_type.lower() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.lower()] elif input_demand_type.upper() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.upper] else: liquefaction_val = 0.0 dmg_probability = selected_fragility_set.calculate_limit_state( liquefaction_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['hazardval'] = liquefaction_val road_result.update(dmg_probability) road_result.update(dmg_interval) road_results.append(road_result) processed_roads.append(road_id) i = i + 1 unmapped_dmg_probability = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_dmg_probability) for road_id, rd in roads.items(): if road_id not in processed_roads: unmapped_rd_result = collections.OrderedDict() unmapped_rd_result['guid'] = rd['properties']['guid'] unmapped_rd_result.update(unmapped_dmg_probability) unmapped_rd_result.update(unmapped_dmg_intervals) unmapped_rd_result['demandtype'] = "None" unmapped_rd_result['demandunits'] = "None" unmapped_rd_result['hazardtype'] = "None" unmapped_rd_result['hazardval'] = 0.0 road_results.append(unmapped_rd_result) return road_results