def NBI_coordinate_mapping(NBI_file): """ coordinate in NBI is in format of xx(degree)xx(minutes)xx.xx(seconds) map it to traditional xx.xxxx in order to create shapefile :param NBI_file: :return: """ NBI = pd.read_csv(NBI_file) NBI['LONG_017'] = NBI['LONG_017'].apply(lambda x: -1 * (GeoUtil.degree_to_decimal(x))) NBI['LAT_016'] = NBI['LAT_016'].apply( lambda x: GeoUtil.degree_to_decimal(x)) return NBI
def get_average_daily_traffic(bridges, NBI_shapefile): NBI = InventoryDataset(NBI_shapefile) NBI_features = list(NBI.inventory_set) ADT = {} for bridge in bridges: # convert lon and lat to the right format bridge_coord = GeoUtil.get_location(bridge) nearest_feature, distance = GeoUtil.find_nearest_feature( NBI_features, bridge_coord) ADT[bridge['properties'] ['guid']] = nearest_feature['properties']['ADT_029'] return ADT
def NBI_coordinate_mapping(NBI_file): """Coordinate in NBI is in format of xx(degree)xx(minutes)xx.xx(seconds) map it to traditional xx.xxxx in order to create shapefile. Args: NBI_file (str): Filename of a NBI file. Returns: dict: NBI. """ NBI = pd.read_csv(NBI_file) NBI['LONG_017'] = NBI['LONG_017'].apply(lambda x: -1 * (GeoUtil.degree_to_decimal(x))) NBI['LAT_016'] = NBI['LAT_016'].apply( lambda x: GeoUtil.degree_to_decimal(x)) return NBI
def road_damage_analysis(self, road, distance, hazard_type, fragility_set, hazard_dataset_id): """Run road damage for a single road segment. Args: road (obj): a single road feature. distance (float): distance to shore from the road hazard_type (str): hazard type. fragility_set (obj): A JSON description of fragility assigned to the road. hazard_dataset_id (str): A hazard dataset to use. Returns: OrderedDict: A dictionary with probability of failure values and other data/metadata. """ road_results = collections.OrderedDict() if fragility_set is not None: demand_type = fragility_set.demand_type.lower() demand_units = fragility_set.demand_units location = GeoUtil.get_location(road) point = str(location.y) + "," + str(location.x) if hazard_type == 'hurricane': hazard_resp = self.hazardsvc.get_hurricane_values( hazard_dataset_id, "inundationDuration", demand_units, [point]) else: raise ValueError("Hazard type are not currently supported.") dur_q = hazard_resp[0]['hazardValue'] if dur_q <= 0.0: dur_q = 0.0 fragility_vars = {'x': dur_q, 'y': distance} pf = fragility_set.calculate_custom_limit_state( fragility_vars)['failure'] road_results['guid'] = road['properties']['guid'] road_results['failprob'] = pf road_results['demandtype'] = demand_type road_results['demandunits'] = demand_units road_results['hazardtype'] = hazard_type road_results['hazardval'] = dur_q return road_results
def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) bridge_results = [] list_bridges = bridges # Converting list of bridges into a dictionary for ease of reference bridges = dict() for br in list_bridges: bridges[br["id"]] = br list_bridges = None # Clear as it's not needed anymore processed_bridges = [] grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set) for demand, grouped_brs in grouped_bridges.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50)) # TODO: Move to globals? for brs in br_chunks: points = [] for br_id in brs: location = GeoUtil.get_location(bridges[br_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == "earthquake": hazard_vals = \ self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tsunami": hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tornado": hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == "hurricane": hazard_vals = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for br_id in brs: bridge_result = collections.OrderedDict() bridge = bridges[br_id] selected_fragility_set = fragility_set[br_id] hazard_val = hazard_vals[i]['hazardValue'] hazard_std_dev = 0.0 if use_hazard_uncertainty: # TODO Get this from API once implemented raise ValueError("Uncertainty Not Implemented!") adjusted_fragility_set = copy.deepcopy(selected_fragility_set) if use_liquefaction and 'liq' in bridge['properties']: for fragility in adjusted_fragility_set.fragility_curves: fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq']) dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability) bridge_result['guid'] = bridge['properties']['guid'] bridge_result.update(dmg_probability) bridge_result.update(dmg_intervals) bridge_result["retrofit"] = retrofit_type bridge_result["retrocost"] = retrofit_cost bridge_result["demandtype"] = input_demand_type bridge_result["demandunits"] = input_demand_units bridge_result["hazardtype"] = hazard_type bridge_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"]["spans"] \ is not None and bridge["properties"]["spans"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["spans"]) elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \ is not None and bridge["properties"]["SPANS"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["SPANS"]) else: bridge_result['spans'] = 1 bridge_results.append(bridge_result) processed_bridges.append(br_id) # remove processed bridges i = i + 1 unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0} unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability) for br_id, br in bridges.items(): if br_id not in processed_bridges: unmapped_br_result = collections.OrderedDict() unmapped_br_result['guid'] = br['properties']['guid'] unmapped_br_result.update(unmapped_dmg_probability) unmapped_br_result.update(unmapped_dmg_intervals) unmapped_br_result["retrofit"] = "Non-Retrofit" unmapped_br_result["retrocost"] = 0.0 unmapped_br_result["demandtype"] = "None" unmapped_br_result['demandunits'] = "None" unmapped_br_result["hazardtype"] = "None" unmapped_br_result['hazardval'] = 0.0 bridge_results.append(unmapped_br_result) return bridge_results
def building_damage_analysis_bulk_input(self, buildings, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = dict() fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key) bldg_results = [] list_buildings = buildings buildings = dict() # Converting list of buildings into a dictionary for ease of reference for b in list_buildings: buildings[b["id"]] = b list_buildings = None # Clear as it's not needed anymore grouped_buildings = AnalysisUtil.group_by_demand_type(buildings, fragility_sets, hazard_type, is_building=True) for demand, grouped_bldgs in grouped_buildings.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once bldg_chunks = list(AnalysisUtil.chunks( grouped_bldgs, 50)) # TODO: Move to globals? for bldgs in bldg_chunks: points = [] for bldg_id in bldgs: location = GeoUtil.get_location(buildings[bldg_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'hurricane': # TODO implement hurricane print("hurricane not yet implemented") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for bldg_id in bldgs: bldg_result = collections.OrderedDict() building = buildings[bldg_id] hazard_val = hazard_vals[i]['hazardValue'] output_demand_type = hazard_vals[i]['demand'] if hazard_type == 'earthquake': period = float(hazard_vals[i]['period']) if period > 0: output_demand_type = str( hazard_vals[i] ['period']) + " " + output_demand_type num_stories = building['properties']['no_stories'] selected_fragility_set = fragility_sets[bldg_id] building_period = selected_fragility_set.fragility_curves[ 0].get_building_period(num_stories) dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, building_period) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) bldg_result['guid'] = building['properties']['guid'] bldg_result.update(dmg_probability) bldg_result.update(dmg_interval) bldg_result['demandtype'] = output_demand_type bldg_result['demandunits'] = input_demand_units bldg_result['hazardval'] = hazard_val bldg_results.append(bldg_result) del buildings[bldg_id] i = i + 1 unmapped_hazard_val = 0.0 unmapped_output_demand_type = "None" unmapped_output_demand_unit = "None" for unmapped_bldg_id, unmapped_bldg in buildings.items(): unmapped_bldg_result = collections.OrderedDict() unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid'] unmapped_bldg_result['demandtype'] = unmapped_output_demand_type unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit unmapped_bldg_result['hazardval'] = unmapped_hazard_val bldg_results.append(unmapped_bldg_result) return bldg_results
def pipeline_damage_analysis(self, pipeline, hazard_type, fragility_set, fragility_set_liq, hazard_dataset_id, geology_dataset_id, use_liquefaction): """Run pipeline damage for a single pipeline. Args: pipeline (obj): a single pipeline. hazard_type (str): hazard type. fragility_set (obj): A JSON description of fragility assigned to the building. fragility_set_liq (obj): A JSON description of fragility assigned to the building with liqufaction. hazard_dataset_id (str): A hazard dataset to use. geology_dataset_id (str): A dataset id for geology dataset for liqufaction. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: OrderedDict: A dictionary with pipeline damage values and other data/metadata. """ pipeline_results = collections.OrderedDict() pgv_repairs = 0.0 pgd_repairs = 0.0 liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 if fragility_set is not None: demand_type = fragility_set.demand_type.lower() demand_units = fragility_set.demand_units location = GeoUtil.get_location(pipeline) point = str(location.y) + "," + str(location.x) if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tornado': hazard_resp = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, demand_units, [point]) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, demand_type, demand_units, [point]) else: raise ValueError("Hazard type are not currently supported.") hazard_val = hazard_resp[0]['hazardValue'] if hazard_val <= 0.0: hazard_val = 0.0 diameter = PipelineUtil.get_pipe_diameter(pipeline) fragility_vars = {'x': hazard_val, 'y': diameter} fragility_curve = fragility_set.fragility_curves[0] # TODO: here assume that custom fragility set only has one limit state pgv_repairs = fragility_set.calculate_custom_limit_state( fragility_vars)['failure'] # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.description, pgv_repairs) if use_liquefaction is True and fragility_set_liq is not None and geology_dataset_id is not None: liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_type = fragility_set_liq.demand_type pgd_demand_units = fragility_set_liq.demand_units # Get PGD hazard value from hazard service location_str = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, pgd_demand_units, [location_str]) liq_hazard_val = liquefaction[0]['pgd'] liquefaction_prob = liquefaction[0]['liqProbability'] liq_fragility_vars = { 'x': liq_hazard_val, 'y': liquefaction_prob } pgd_repairs = liq_fragility_curve.compute_custom_limit_state_probability( liq_fragility_vars) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.description, pgd_repairs) total_repair_rate = pgd_repairs + pgv_repairs break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs length = PipelineUtil.get_pipe_length(pipeline) failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_pgd_repairs = pgd_repairs * length num_pgv_repairs = pgv_repairs * length num_repairs = num_pgd_repairs + num_pgv_repairs pipeline_results['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: pipeline_results['pipeclass'] = "" pipeline_results['pgvrepairs'] = pgv_repairs pipeline_results['pgdrepairs'] = pgd_repairs pipeline_results['repairspkm'] = total_repair_rate pipeline_results['breakrate'] = break_rate pipeline_results['leakrate'] = leak_rate pipeline_results['failprob'] = failure_probability pipeline_results['demandtype'] = demand_type pipeline_results['hazardtype'] = hazard_type pipeline_results['hazardval'] = hazard_val pipeline_results['liqhaztype'] = liq_hazard_type pipeline_results['liqhazval'] = liq_hazard_val pipeline_results['liqprobability'] = liquefaction_prob pipeline_results['numpgvrpr'] = num_pgv_repairs pipeline_results['numpgdrpr'] = num_pgd_repairs pipeline_results['numrepairs'] = num_repairs return pipeline_results
def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, hazard_dataset_id): """Gets applicable fragilities and calculates damage Args: facilities (list): Multiple water facilities from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ # Liquefaction related variables use_liquefaction = False liquefaction_available = False fragility_sets_liq = None liquefaction_resp = None geology_dataset_id = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None loc = None # Obtain the fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: if hazard_type == 'tsunami': fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY elif hazard_type == 'earthquake': fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported." ) self.set_parameter("fragility_key", fragility_key) # Obtain the fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) # Obtain the liquefaction fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake": if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True # Determine whether to use hazard uncertainty uncertainty = self.get_parameter("use_hazard_uncertainty") # Setup fragility translation structures values_payload = [] values_payload_liq = [] unmapped_waterfacilities = [] mapped_waterfacilities = [] for facility in facilities: if facility["id"] in fragility_sets.keys(): # Fill in generic details fragility_set = fragility_sets[facility["id"]] location = GeoUtil.get_location(facility) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_waterfacilities.append(facility) # Fill in liquefaction parameters if liquefaction_available and facility[ "id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[facility["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_waterfacilities.append(facility) del facilities if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # Calculate LS and DS facility_results = [] damage_results = [] for i, facility in enumerate(mapped_waterfacilities): fragility_set = fragility_sets[facility["id"]] limit_states = dict() dmg_intervals = dict() # Setup conditions for the analysis hazard_std_dev = 0 if uncertainty: hazard_std_dev = random.random() if isinstance(fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): facility_args = fragility_set.construct_expression_args_from_inventory( facility) limit_states = \ fragility_set.calculate_limit_state(hval_dict, std_dev=hazard_std_dev, inventory_type='water_facility', **facility_args) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate( fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( facility) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='water_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # TODO: ideally, this goes into a single variable declaration section facility_result = { 'guid': facility['properties']['guid'], **limit_states, **dmg_intervals } facility_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals if use_liquefaction and fragility_sets_liq and geology_dataset_id: damage_result['liq_fragility_id'] = fragility_sets_liq[ facility["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) for facility in unmapped_waterfacilities: facility_result = dict() damage_result = dict() facility_result['guid'] = facility['properties']['guid'] damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) return facility_results, damage_results
def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key, retrofit_strategy) values_payload = [] unmapped_buildings = [] mapped_buildings = [] for b in buildings: bldg_id = b["id"] if bldg_id in fragility_sets: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types(b, fragility_sets[bldg_id], hazard_type) units = fragility_sets[bldg_id].demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_buildings.append(b) else: unmapped_buildings.append(b) # not needed anymore as they are already split into mapped and unmapped del buildings if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload, self.get_parameter('seed')) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") ds_results = [] damage_results = [] i = 0 for b in mapped_buildings: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_interval = dict() b_id = b["id"] selected_fragility_set = fragility_sets[b_id] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) b_demands = hazard_vals[i]["demands"] b_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 # To calculate damage, use demand type name from fragility that will be used in the expression, instead # of using what the hazard service returns. There could be a difference "SA" in DFR3 vs "1.07 SA" # from hazard for d in selected_fragility_set.demand_types: hval_dict[d] = b_haz_vals[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): building_args = selected_fragility_set.construct_expression_args_from_inventory(b) building_period = selected_fragility_set.fragility_curves[0].get_building_period( selected_fragility_set.curve_parameters, **building_args) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, **building_args, period=building_period) dmg_interval = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="building") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(b_haz_vals, hazard_type) damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtype'] = b_demands damage_result['demandunits'] = b_units damage_result['hazardval'] = b_haz_vals ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for b in unmapped_buildings: ds_result = dict() damage_result = dict() ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtype'] = None damage_result['demandunits'] = None damage_result['hazardval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def get_damage(self, node_dataset, link_dataset, tornado_dataset, tornado_id): """ Args: node_dataset (obj): Node dataset. link_dataset (obj): Link dataset. tornado_dataset (obj): Tornado dataset. tornado_id (str): Tornado id. """ self.set_tornado_variables(tornado_dataset) self.set_node_variables(node_dataset) # get fragility curves set - tower for transmission, pole for distribution fragility_set_tower = FragilityCurveSet( self.fragilitysvc.get_dfr3_set(self.fragility_tower_id)) assert fragility_set_tower.id == self.fragility_tower_id fragility_set_pole = FragilityCurveSet( self.fragilitysvc.get_dfr3_set(self.fragility_pole_id)) assert fragility_set_pole.id == self.fragility_pole_id # network test node_id_validation = NetworkUtil.validate_network_node_ids( node_dataset, link_dataset, self.fromnode_fld_name, self.tonode_fld_name, self.nodenwid_fld_name) if node_id_validation is False: print( "ID in from or to node field doesn't exist in the node dataset" ) os.exit(0) # getting network graph and node coordinates is_directed_graph = True graph, node_coords = NetworkUtil.create_network_graph_from_field( link_dataset, self.fromnode_fld_name, self.tonode_fld_name, is_directed_graph) # reverse the graph to acculate the damage to next to node graph = nx.DiGraph.reverse(graph, copy=True) # check the connection as a list connection_sets = [] if is_directed_graph: connection_sets = list(nx.weakly_connected_components(graph)) else: connection_sets = list(nx.connected_components(graph)) # check the first node of the each network line, this first node should lead each separated network # also convert connection set to list first_node_list = [] connection_list = [] for c in connection_sets: connection_list.append(list(c)) first_node_list.append(list(c)[0]) intersection_list = [] poly_list = [] totalcost2repair = [] totalpoles2repair = [] totaltime2repair = [] # construct guid field guid_list = [] nodenwid_list = [] for node_feature in node_dataset: # get guid colum guid_fld_val = '' if self.guid_fldname.lower() in node_feature['properties']: guid_fld_val = node_feature['properties'][ self.guid_fldname.lower()] elif self.guid_fldname in node_feature['properties']: guid_fld_val = node_feature['properties'][self.guid_fldname] guid_list.append(guid_fld_val) # get nodenwid colum nodenwid_fld_val = '' if self.nodenwid_fld_name.lower() in node_feature['properties']: nodenwid_fld_val = int( node_feature['properties'][self.nodenwid_fld_name.lower()]) elif self.nodenwid_fld_name in node_feature['properties']: nodenwid_fld_val = int( node_feature['properties'][self.nodenwid_fld_name]) nodenwid_list.append(nodenwid_fld_val) for z in range(self.nmcs): nodedam = [ 0 ] * self.nnode # placeholder for recording number of damaged pole for each node noderepair = [ 0 ] * self.nnode # placeholder for recording repair cost for each node poles2repair = [ 0 ] * self.nnode # placeholder for recording total number of poles to repair cost2repairpath = [ 0 ] * self.nnode # placeholder for recording total repair cost for the network time2repairpath = [ 0 ] * self.nnode # placeholder for recording total repair time for the network nodetimerep = [0] * self.nnode hazardval = [[ 0 ]] * self.nnode # placeholder for recording hazard values demandtypes = [[ "" ]] * self.nnode # placeholder for recording demand types demandunits = [[ "" ]] * self.nnode # placeholder for recording demand units # iterate link for line_feature in link_dataset: ndamage = 0 # number of damaged poles in each link repaircost = 0 # repair cost value repairtime = 0 # repair time value to_node_val = "" linetype_val = "" tor_hazard_values = [0] # random wind speed in EF demand_types = [""] demand_units = [""] if self.tonode_fld_name.lower() in line_feature['properties']: to_node_val = line_feature['properties'][ self.tonode_fld_name.lower()] elif self.tonode_fld_name in line_feature['properties']: to_node_val = line_feature['properties'][ self.tonode_fld_name] if self.linetype_fld_name in line_feature['properties']: linetype_val = line_feature['properties'][ self.linetype_fld_name] elif self.linetype_fld_name.lower( ) in line_feature['properties']: linetype_val = line_feature['properties'][ self.linetype_fld_name.lower()] line = shape(line_feature['geometry']) # iterate tornado for tornado_feature in tornado_dataset: resistivity_probability = 0 # resistivity value at the point of windSpeed random_resistivity = 0 # random resistivity value between 0 and one sim_fld_val = "" ef_fld_val = "" # get EF rating and simulation number column if self.tornado_sim_field_name.lower( ) in tornado_feature['properties']: sim_fld_val = int(tornado_feature['properties'][ self.tornado_sim_field_name.lower()]) elif self.tornado_sim_field_name in tornado_feature[ 'properties']: sim_fld_val = int(tornado_feature['properties'][ self.tornado_sim_field_name]) if self.tornado_ef_field_name.lower( ) in tornado_feature['properties']: ef_fld_val = tornado_feature['properties'][ self.tornado_ef_field_name.lower()] elif self.tornado_ef_field_name in tornado_feature[ 'properties']: ef_fld_val = tornado_feature['properties'][ self.tornado_ef_field_name] if sim_fld_val == "" or ef_fld_val == "": print( "unable to convert tornado simulation field value to integer" ) sys.exit(0) # get Tornado EF polygon # assumes that the polygon is not a multipolygon poly = shape(tornado_feature['geometry']) poly_list.append(poly) # loop for ef ranges for f in range(self.tornado_ef_rate): npoles = 0 # number of poles in tornado ef box poleresist = 0 # pole's resistance value # setting EF rate value string to match in the tornado dataset's attribute table ef_content = "EF" + str(f) # compute the intersections between link line and ef polygon # also figure out the length of the line that ovelapped with EF box # compute the intersection between tornado polygon and line if sim_fld_val == z and ef_fld_val.lower( ) == ef_content.lower(): if poly is not None and line is not None: if poly.intersects(line): intersection = poly.intersection(line) any_point = None intersection_length = intersection.length if intersection.length > 0: # print(intersection.__class__.__name__) # calculate the length of intersected line # since this is a geographic, it has to be projected to meters to be calcuated inter_length_meter = GeoUtil.calc_geog_distance_from_linestring( intersection) if isinstance(intersection, MultiLineString): intersection_list.append( intersection) for inter_line in intersection.geoms: any_point = inter_line.centroid break elif isinstance( intersection, LineString): intersection_list.append( intersection) any_point = intersection.centroid # also, random point can be possible # by changing the following lines value 0.5 # any_point = intersection.interpolate(0.5, normalized=True) if any_point is not None: # check if any_point is in the polygon if poly.contains(any_point) is False: # this is very hardly happen but should be needed just in case any_point = poly.centroid # check if the line is tower or transmission if linetype_val.lower( ) == self.line_transmission: fragility_set_used = fragility_set_tower else: fragility_set_used = fragility_set_pole values_payload = [{ "demands": [ x.lower() for x in fragility_set_used.demand_types ], "units": [ x.lower() for x in fragility_set_used.demand_units ], "loc": str(any_point.coords[0][1]) + "," + str(any_point.coords[0][0]) }] h_vals = self.hazardsvc.post_tornado_hazard_values( tornado_id, values_payload, self.get_parameter('seed')) tor_hazard_values = AnalysisUtil.update_precision_of_lists( h_vals[0]["hazardValues"]) demand_types = h_vals[0]["demands"] demand_units = h_vals[0]["units"] hval_dict = dict() j = 0 for d in h_vals[0]["demands"]: hval_dict[d] = tor_hazard_values[j] j += 1 if isinstance( fragility_set_used. fragility_curves[0], DFR3Curve): inventory_args = fragility_set_used.construct_expression_args_from_inventory( tornado_feature) resistivity_probability = \ fragility_set_used.calculate_limit_state( hval_dict, inventory_type=fragility_set_used.inventory_type, **inventory_args) else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. " "If you are seeing this please report the issue." ) # randomly generated capacity of each poles ; 1 m/s is 2.23694 mph poleresist = resistivity_probability.get( 'LS_0') * 2.23694 npoles = int( round(inter_length_meter / self.pole_distance)) repairtime_list = [] for k in range(npoles): repair_time = 0 random_resistivity = random.uniform( 0, 1) if random_resistivity <= poleresist: ndamage += 1 # following codes can't be converted from matlab to python # however, the cross product <=3 or == 24 almost doesn't happen # since the time and cost differs when it is pole or tower, # this could be changed by see if it is tower or pole # if numpy.cross(k, z) <= 3 or numpy.cross(k, z) == 24: if linetype_val.lower( ) == self.line_transmission: mu = self.mut sigma = self.sigmat tmu = self.tmut tsigma = self.tsigmat else: mu = self.mud sigma = self.sigmad tmu = self.tmud tsigma = self.tsigmad repairtime_list.append( numpy.random.normal( tmu, tsigma)) for k in range(ndamage): repaircost += numpy.random.lognormal( mu, sigma) # max of the repair time among different poles is taken # as the repair time for that line if len(repairtime_list) > 0: repairtime = max(repairtime_list) noderepair[to_node_val - 1] = repaircost nodedam[to_node_val - 1] = ndamage nodetimerep[to_node_val - 1] = repairtime hazardval[to_node_val - 1] = tor_hazard_values demandtypes[to_node_val - 1] = demand_types demandunits[to_node_val - 1] = demand_units # Calculate damage and repair cost based on network for i in range(len(first_node_list)): for j in range(len(connection_list[i])): # print(connection_list[i][j], first_node_list[i]) pathij = list( nx.all_simple_paths(graph, connection_list[i][j], first_node_list[i])) poler = 0 coster = 0 timer = [] # print(pathij) if len(pathij) > 0: for k in range(len(pathij)): for var1 in range(len(pathij[k])): poler = poler + nodedam[pathij[k][var1]] coster = coster + noderepair[pathij[k][var1]] # max of the time for different lines is taken as the repair time for that path. # -- path is constituted of different lines. timer.append(nodetimerep[pathij[k][var1]]) poles2repair[connection_list[i][j]] = poler cost2repairpath[connection_list[i][j]] = coster if len(timer) > 0: time2repairpath[connection_list[i][j]] = max(timer) else: time2repairpath[connection_list[i][j]] = 0 totalcost2repair.append(cost2repairpath) totalpoles2repair.append(poles2repair) totaltime2repair.append(time2repairpath) # create guid field from node dataset # calculate mean and standard deviation meanpoles = numpy.mean(numpy.asarray(totalpoles2repair), axis=0) stdpoles = numpy.std(numpy.asarray(totalpoles2repair), axis=0) meancost = numpy.mean(numpy.asarray(totalcost2repair), axis=0) stdcost = numpy.std(numpy.asarray(totalcost2repair), axis=0) meantime = numpy.mean(numpy.asarray(totaltime2repair), axis=0) stdtime = numpy.std(numpy.asarray(totaltime2repair), axis=0) # create result ds_results = [] damage_results = [] for i in range(len(meanpoles)): ds_result = dict() damage_result = dict() ds_result['guid'] = guid_list[i] ds_result["meanpoles"] = meanpoles[i] ds_result["stdpoles"] = stdpoles[i] ds_result["meancost"] = meancost[i] ds_result["stdcost"] = stdcost[i] ds_result["meantime"] = meantime[i] ds_result["stdtime"] = stdtime[i] ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazardval[i], "tornado") damage_result['guid'] = guid_list[i] damage_result["fragility_tower_id"] = self.fragility_tower_id damage_result["fragility_pole_id"] = self.fragility_pole_id damage_result["hazardtype"] = "Tornado" damage_result['hazardvals'] = hazardval[i] damage_result['demandtypes'] = demandtypes[i] damage_result['demandunits'] = demandunits[i] ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ use_liquefaction = False liquefaction_available = False fragility_key = self.get_parameter("fragility_key") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) if hazard_type == "earthquake": liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True values_payload = [] values_payload_liq = [] unmapped_epfs = [] mapped_epfs = [] for epf in epfs: epf_id = epf["id"] if epf_id in fragility_set: location = GeoUtil.get_location(epf) loc = str(location.y) + "," + str(location.x) demands = fragility_set[epf_id].demand_types units = fragility_set[epf_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_epfs.append(epf) if liquefaction_available and epf["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[epf["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_epfs.append(epf) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError('Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError("Missing hazard type.") liquefaction_resp = None if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) ds_results = [] damage_results = [] i = 0 for epf in mapped_epfs: ds_result = dict() damage_result = dict() selected_fragility_set = fragility_set[epf["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 epf_args = selected_fragility_set.construct_expression_args_from_inventory( epf) limit_states = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='electric_facility', **epf_args) if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[epf["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( epf) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, inventory_type="electric_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_interval = selected_fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='electric_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result["guid"] = epf["properties"]["guid"] ds_result.update(limit_states) ds_result.update(dmg_interval) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardvals"] = hazard_val if hazard_type == "earthquake" and use_liquefaction is True: if liquefaction_available: damage_result['liq_fragility_id'] = fragility_sets_liq[ epf["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) i += 1 ############################################################# # unmapped for epf in unmapped_epfs: ds_result = dict() damage_result = dict() ds_result['guid'] = epf['properties']['guid'] damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None if hazard_type == "earthquake" and use_liquefaction is True: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def building_damage_analysis_bulk_input(self, buildings): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ # read static parameters from object self hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") building_results = [] damage_results = [] fragility_sets_as = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_as")) fragility_sets_ds = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_ds")) values_payload_as = [] values_payload_ds = [] values_payload_liq = [] mapped_buildings = [] unmapped_buildings = [] for building in buildings: if building["id"] in fragility_sets_as and building[ "id"] in fragility_sets_ds: fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] location = GeoUtil.get_location(building) loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive demands_as = AnalysisUtil.get_hazard_demand_types( building, fragility_set_as, hazard_type) units_as = fragility_set_as.demand_units value_as = { "demands": demands_as, "units": units_as, "loc": loc } values_payload_as.append(value_as) # Drift-Sensitive demands_ds = AnalysisUtil.get_hazard_demand_types( building, fragility_set_ds, hazard_type) units_ds = fragility_set_ds.demand_units value_ds = { "demands": demands_ds, "units": units_ds, "loc": loc } values_payload_ds.append(value_ds) # liquefaction if use_liquefaction: value_liq = { "demands": ["pgd"], # implied... "units": ["in"], "loc": loc } values_payload_liq.append(value_liq) mapped_buildings.append(building) else: unmapped_buildings.append(building) del buildings # get hazard values and liquefaction if hazard_type == 'earthquake': hazard_resp_as = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_as) hazard_resp_ds = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_ds) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, values_payload_liq) else: raise ValueError( 'Hazard does not support liquefaction! Check to make sure you defined the ' 'liquefaction portion of your scenario earthquake.') else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # calculate LS and DS for i, building in enumerate(mapped_buildings): dmg_probability_as = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_as = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } dmg_probability_ds = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_ds = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') ############### # AS if isinstance(fragility_set_as.fragility_curves[0], DFR3Curve): hazard_vals_as = AnalysisUtil.update_precision_of_lists( hazard_resp_as[i]["hazardValues"]) demand_types_as = hazard_resp_as[i]["demands"] demand_units_as = hazard_resp_as[i]["units"] hval_dict_as = dict() for j, d in enumerate(fragility_set_as.demand_types): hval_dict_as[d] = hazard_vals_as[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_as[i]["hazardValues"]): building_args = fragility_set_as.construct_expression_args_from_inventory( building) dmg_probability_as = fragility_set_as. \ calculate_limit_state(hval_dict_as, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_as = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_as, liquefaction_dmg)) dmg_interval_as = fragility_set_ds.calculate_damage_interval( dmg_probability_as, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ############### # DS if isinstance(fragility_set_ds.fragility_curves[0], DFR3Curve): hazard_vals_ds = AnalysisUtil.update_precision_of_lists( hazard_resp_ds[i]["hazardValues"]) demand_types_ds = hazard_resp_ds[i]["demands"] demand_units_ds = hazard_resp_ds[i]["units"] hval_dict_ds = dict() for j, d in enumerate(fragility_set_ds.demand_types): hval_dict_ds[d] = hazard_vals_ds[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_ds[i]["hazardValues"]): building_args = fragility_set_ds.construct_expression_args_from_inventory( building) dmg_probability_ds = fragility_set_ds. \ calculate_limit_state(hval_dict_ds, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_ds = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_ds, liquefaction_dmg)) dmg_interval_ds = fragility_set_ds.calculate_damage_interval( dmg_probability_ds, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # put results in dictionary # AS denotes acceleration-sensitive fragility assigned to the building. # DS denotes drift-sensitive fragility assigned to the building. building_result = dict() building_result['guid'] = building['properties']['guid'] building_result['AS_LS_0'] = dmg_probability_as['LS_0'] building_result['AS_LS_1'] = dmg_probability_as['LS_1'] building_result['AS_LS_2'] = dmg_probability_as['LS_2'] building_result['AS_DS_0'] = dmg_interval_as['DS_0'] building_result['AS_DS_1'] = dmg_interval_as['DS_1'] building_result['AS_DS_2'] = dmg_interval_as['DS_2'] building_result['AS_DS_3'] = dmg_interval_as['DS_3'] building_result['DS_LS_0'] = dmg_probability_ds['LS_0'] building_result['DS_LS_1'] = dmg_probability_ds['LS_1'] building_result['DS_LS_2'] = dmg_probability_ds['LS_2'] building_result['DS_DS_0'] = dmg_interval_ds['DS_0'] building_result['DS_DS_1'] = dmg_interval_ds['DS_1'] building_result['DS_DS_2'] = dmg_interval_ds['DS_2'] building_result['DS_DS_3'] = dmg_interval_ds['DS_3'] building_result[ 'hazard_exposure_as'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_as, hazard_type) building_result[ 'hazard_exposure_ds'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_ds, hazard_type) # put damage results in dictionary damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = fragility_set_as.id damage_result['demandtypes_as'] = demand_types_as damage_result['demandunits_as'] = demand_units_as damage_result['fragility_id_ds'] = fragility_set_ds.id damage_result['demandtypes_ds'] = demand_types_ds damage_result['demandunits_ds'] = demand_units_ds damage_result['hazardtype'] = hazard_type damage_result['hazardvals_as'] = hazard_vals_as damage_result['hazardvals_ds'] = hazard_vals_ds building_results.append(building_result) damage_results.append(damage_result) for building in unmapped_buildings: building_result = dict() building_result['guid'] = building['properties']['guid'] damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = None damage_result['demandtypes_as'] = None damage_result['demandunits_as'] = None damage_result['fragility_id_ds'] = None damage_result['demandtypes_ds'] = None damage_result['demandunits_ds'] = None damage_result['hazardtype'] = None damage_result['hazardvals_as'] = None damage_result['hazardvals_ds'] = None building_results.append(building_result) damage_results.append(damage_result) return building_results, damage_results
def waterfacility_damage_analysis(self, facility, fragility, liq_fragility, hazard_type, hazard_dataset_id, liq_geology_dataset_id, uncertainty): """Computes damage analysis for a single facility Args: facility (obj): A JSON mapping of a facility based on mapping attributes fragility (obj): A JSON description of fragility mapped to the building. liq_fragility (obj): A JSON description of liquefaction fragility mapped to the building. hazard_type (str): A string that indicates the hazard type hazard_dataset_id (str): Hazard id from the hazard service liq_geology_dataset_id (str): Geology dataset id from data service to use for liquefaction calculation, if applicable uncertainty (bool): Whether to use hazard standard deviation values for uncertainty Returns: OrderedDict: A dictionary with water facility damage values and other data/metadata. """ std_dev = 0 if uncertainty: std_dev = random.random() hazard_demand_type = fragility.demand_type demand_units = fragility.demand_units liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 location = GeoUtil.get_location(facility) point = str(location.y) + "," + str(location.x) if hazard_type == "earthquake": hazard_val_set = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) elif hazard_type == "tsunami": hazard_val_set = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported.") hazard_val = hazard_val_set[0]['hazardValue'] if hazard_val < 0: hazard_val = 0 limit_states = fragility.calculate_limit_state(hazard_val, std_dev) if liq_fragility is not None and liq_geology_dataset_id: liq_hazard_type = liq_fragility.demand_type pgd_demand_units = liq_fragility.demand_units point = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, pgd_demand_units, [point]) liq_hazard_val = liquefaction[0][liq_hazard_type] liquefaction_prob = liquefaction[0]['liqProbability'] pgd_limit_states = liq_fragility.calculate_limit_state(liq_hazard_val, std_dev) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = AnalysisUtil.calculate_damage_interval(limit_states) result = collections.OrderedDict() result = {**limit_states, **dmg_intervals} # Needs py 3.5+ metadata = collections.OrderedDict() metadata['guid'] = facility['properties']['guid'] metadata['hazardtype'] = hazard_type metadata['demandtype'] = hazard_demand_type metadata['hazardval'] = hazard_val metadata['liqhaztype'] = liq_hazard_type metadata['liqhazval'] = liq_hazard_val metadata['liqprobability'] = liquefaction_prob result = {**metadata, **result} return result
def building_damage_analysis(self, building, fragility_set_as, fragility_set_ds): """Calculates bridge damage results for a single building. Args: building (obj): A JSON-mapping of a geometric object from the inventory: current building. fragility_set_as (obj): A JSON description of acceleration-sensitive (AS) fragility assigned to the building. fragility_set_ds (obj): A JSON description of drift-sensitive (DS) fragility assigned to the building. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ building_results = collections.OrderedDict() dmg_probability_as = collections.OrderedDict() dmg_probability_ds = collections.OrderedDict() hazard_demand_type_as = None hazard_demand_type_ds = None hazard_val_as = 0.0 hazard_val_ds = 0.0 # read static parameters from object self hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") # Acceleration-Sensitive Fragility ID Code if fragility_set_as is not None: hazard_demand_type_as = AnalysisUtil.get_hazard_demand_type(building, fragility_set_as, 'earthquake') demand_units_as = fragility_set_as.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_as = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_as, demand_units_as, points=[point])[0]['hazardValue'] dmg_probability_as = fragility_set_as.calculate_limit_state(hazard_val_as) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_as = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_as['immocc'] = 0.0 dmg_probability_as['lifesfty'] = 0.0 dmg_probability_as['collprev'] = 0.0 dmg_interval_as = AnalysisUtil.calculate_damage_interval(dmg_probability_as) # Drift-Sensitive Fragility ID Code if fragility_set_ds is not None: hazard_demand_type_ds = AnalysisUtil.get_hazard_demand_type(building, fragility_set_ds, 'earthquake') demand_units_ds = fragility_set_ds.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_ds = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_ds, demand_units_ds, points=[point])[0]['hazardValue'] dmg_probability_ds = fragility_set_ds.calculate_limit_state(hazard_val_ds) # adjust hazard value for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_ds = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_ds['immocc'] = 0.0 dmg_probability_ds['lifesfty'] = 0.0 dmg_probability_ds['collprev'] = 0.0 dmg_interval_ds = AnalysisUtil.calculate_damage_interval(dmg_probability_ds) # put results in dictionary building_results['guid'] = building['properties']['guid'] building_results['immocc_as'] = dmg_probability_as['immocc'] building_results['lifsfty_as'] = dmg_probability_as['lifesfty'] building_results['collpre_as'] = dmg_probability_as['collprev'] building_results['insig_as'] = dmg_interval_as['insignific'] building_results['mod_as'] = dmg_interval_as['moderate'] building_results['heavy_as'] = dmg_interval_as['heavy'] building_results['comp_as'] = dmg_interval_as['complete'] building_results['immocc_ds'] = dmg_probability_ds['immocc'] building_results['lifsfty_ds'] = dmg_probability_ds['lifesfty'] building_results['collpre_ds'] = dmg_probability_ds['collprev'] building_results['insig_ds'] = dmg_interval_ds['insignific'] building_results['mod_ds'] = dmg_interval_ds['moderate'] building_results['heavy_ds'] = dmg_interval_ds['heavy'] building_results['comp_ds'] = dmg_interval_ds['complete'] building_results["hzrdtyp_as"] = hazard_demand_type_as building_results["hzrdval_as"] = hazard_val_as building_results["hzrdtyp_ds"] = hazard_demand_type_ds building_results["hzrdval_ds"] = hazard_val_ds return building_results
def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): Multiple pipelines from pipeline dataset. hazard_type (str): Hazard type (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. Returns: dict: An ordered dictionaries with pipeline damage values. dict: An ordered dictionaries with other pipeline data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" if hazard_type == 'tsunami' else "pgv" self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) values_payload = [] unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types( pipeline, fragility_set, hazard_type) units = fragility_sets[pipeline["id"]].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) else: unmapped_pipelines.append(pipeline) # not needed anymore as they are already split into mapped and unmapped del pipelines if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'flood': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) pipeline_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): limit_states = dict() dmg_intervals = dict() pipeline_result = dict() fragility_set = fragility_sets[pipeline["id"]] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility haz_vals = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) demand_types = hazard_vals[i]["demands"] demand_units = hazard_vals[i]["units"] # construct hazard_value dictionary {"demand_type":"hazard_value", ...} hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = haz_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) limit_states = fragility_set.calculate_limit_state( hval_dict, inventory_type="pipeline", **pipeline_args) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type="pipeline") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") pipeline_result['guid'] = pipeline['properties']['guid'] pipeline_result.update(limit_states) pipeline_result.update(dmg_intervals) pipeline_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( haz_vals, hazard_type) damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = haz_vals pipeline_results.append(pipeline_result) damage_results.append(damage_result) # for pipeline does not have matching fragility curves, default to None for pipeline in unmapped_pipelines: pipeline_result = dict() damage_result = dict() pipeline_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None pipeline_results.append(pipeline_result) damage_results.append(damage_result) return pipeline_results, damage_results
def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, use_hazard_uncertainty, use_liquefaction, liq_geology_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty (bool): Hazard uncertainty. True for using uncertainty when computing damage, False otherwise. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ result = [] fragility_key = self.get_parameter("fragility_key") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) epf_results = [] # Converting list of epfs into a dictionary for ease of reference list_epfs = epfs epfs = dict() for epf in list_epfs: epfs[epf["id"]] = epf del list_epfs # Clear as it's not needed anymore processed_epf = [] grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set) for demand, grouped_epf_items in grouped_epfs.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50)) for epf_chunk in epf_chunks: points = [] for epf_id in epf_chunk: location = GeoUtil.get_location(epfs[epf_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError( 'Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for epf_id in epf_chunk: epf_result = collections.OrderedDict() epf = epfs[epf_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") selected_fragility_set = fragility_set[epf_id] limit_states = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( limit_states) epf_result['guid'] = epf['properties']['guid'] epf_result.update(limit_states) epf_result.update(dmg_interval) epf_result['demandtype'] = input_demand_type epf_result['demandunits'] = input_demand_units epf_result['hazardtype'] = hazard_type epf_result['hazardval'] = hazard_val epf_results.append(epf_result) processed_epf.append(epf_id) i = i + 1 # when there is liquefaction, limit state need to be modified if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: liq_fragility_key = self.get_parameter( "liquefaction_fragility_key") if liq_fragility_key is None: liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY liq_fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liq_fragility_key) grouped_liq_epfs = AnalysisUtil.group_by_demand_type( epfs, liq_fragility_set) for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items(): liq_input_demand_type = liq_demand[0] liq_input_demand_units = liq_demand[1] # For every group of unique demand and demand unit, call the end-point once liq_epf_chunks = list( AnalysisUtil.chunks(grouped_liq_epf_items, 50)) for liq_epf_chunk in liq_epf_chunks: points = [] for liq_epf_id in liq_epf_chunk: location = GeoUtil.get_location(epfs[liq_epf_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction_vals = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, liq_input_demand_units, points) # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for liq_epf_id in liq_epf_chunk: liq_hazard_val = liquefaction_vals[i][ liq_input_demand_type] std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") liquefaction_prob = liquefaction_vals[i][ 'liqProbability'] selected_liq_fragility = liq_fragility_set[liq_epf_id] pgd_limit_states = selected_liq_fragility.calculate_limit_state( liq_hazard_val, std_dev=std_dev) # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit # states and dmg_interval for epf_result in epf_results: if epf_result['guid'] == epfs[liq_epf_id]['guid']: limit_states = { "ls-slight": epf_result['ls-slight'], "ls-moderat": epf_result['ls-moderat'], "ls-extensi": epf_result['ls-extensi'], "ls-complet": epf_result['ls-complet'] } liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) liq_dmg_interval = AnalysisUtil.calculate_damage_interval( liq_limit_states) epf_result.update(liq_limit_states) epf_result.update(liq_dmg_interval) epf_result[ 'liqhaztype'] = liq_input_demand_type epf_result['liqhazval'] = liq_hazard_val epf_result[ 'liqprobability'] = liquefaction_prob i = i + 1 unmapped_limit_states = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_limit_states) for epf_id, epf in epfs.items(): if epf_id not in processed_epf: unmapped_epf_result = collections.OrderedDict() unmapped_epf_result['guid'] = epf['properties']['guid'] unmapped_epf_result.update(unmapped_limit_states) unmapped_epf_result.update(unmapped_dmg_intervals) unmapped_epf_result["demandtype"] = "None" unmapped_epf_result['demandunits'] = "None" unmapped_epf_result["hazardtype"] = "None" unmapped_epf_result['hazardval'] = 0.0 unmapped_epf_result['liqhaztype'] = "NA" unmapped_epf_result['liqhazval'] = "NA" unmapped_epf_result['liqprobability'] = "NA" epf_results.append(unmapped_epf_result) return epf_results
def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. list: A list of ordered dictionaries with other road data/metadata. """ fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) values_payload = [] mapped_roads = [] unmapped_roads = [] pgd_flag = True # for liquefaction liquefaction_resp = None for road in roads: if road["id"] in fragility_sets.keys(): fragility_set = fragility_sets[road["id"]] location = GeoUtil.get_location(road) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types # for liquefaction if any(demand.lower() != 'pgd' for demand in demands): pgd_flag = False units = fragility_set.demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_roads.append(road) else: unmapped_roads.append(road) del roads # get hazard and liquefaction values if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) if pgd_flag and use_liquefaction and geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") # calculate LS and DS ds_results = [] damage_results = [] for i, road in enumerate(mapped_roads): dmg_probability = dict() dmg_interval = dict() demand_types_liq = None demand_units_liq = None liq_hazard_vals = None liquefaction_prob = None selected_fragility_set = fragility_sets[road["id"]] hazard_std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(selected_fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): road_args = selected_fragility_set.construct_expression_args_from_inventory(road) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='road', **road_args) # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if liquefaction_resp is not None and len(liquefaction_resp) > 0: liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) demand_types_liq = liquefaction_resp[i]['demands'] demand_units_liq = liquefaction_resp[i]['units'] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] dmg_probability = selected_fragility_set.calculate_limit_state( liq_hval_dict, inventory_type='road', **road_args) dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="road") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result = dict() ds_result['guid'] = road['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals damage_result['liqdemandtypes'] = demand_types_liq damage_result['liqdemandunits'] = demand_units_liq damage_result['liqhazvals'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob ds_results.append(ds_result) damage_results.append(damage_result) for road in unmapped_roads: ds_result = dict() damage_result = dict() ds_result['guid'] = road['properties']['guid'] damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazvals'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) values_payload = [] unmapped_bridges = [] mapped_bridges = [] for b in bridges: bridge_id = b["id"] if bridge_id in fragility_set: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = fragility_set[bridge_id].demand_types units = fragility_set[bridge_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_bridges.append(b) else: unmapped_bridges.append(b) # not needed anymore as they are already split into mapped and unmapped del bridges if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) ds_results = [] damage_results = [] i = 0 for bridge in mapped_bridges: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_intervals = dict() selected_fragility_set = fragility_set[bridge["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): bridge_args = selected_fragility_set.construct_expression_args_from_inventory( bridge) dmg_probability = \ selected_fragility_set.calculate_limit_state(hval_dict, inventory_type="bridge", **bridge_args) dmg_intervals = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="bridge") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) ds_result['guid'] = bridge['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_intervals) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = bridge['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["retrofit"] = retrofit_type damage_result["retrocost"] = retrofit_cost damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"][ "spans"] is not None: if isinstance(bridge["properties"]["spans"], str) and bridge["properties"]["spans"].isdigit(): damage_result['spans'] = int(bridge["properties"]["spans"]) elif isinstance(bridge["properties"]["spans"], int): damage_result['spans'] = bridge["properties"]["spans"] elif "SPANS" in bridge["properties"] and bridge["properties"][ "SPANS"] is not None: if isinstance(bridge["properties"]["SPANS"], str) and bridge["properties"]["SPANS"].isdigit(): damage_result['SPANS'] = int(bridge["properties"]["SPANS"]) elif isinstance(bridge["properties"]["SPANS"], int): damage_result['SPANS'] = bridge["properties"]["SPANS"] else: damage_result['spans'] = 1 ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for bridge in unmapped_bridges: ds_result = dict() damage_result = dict() ds_result['guid'] = bridge['properties']['guid'] damage_result['guid'] = bridge['properties']['guid'] damage_result["retrofit"] = None damage_result["retrocost"] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None damage_result['spans'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata. damage_results (list): A list of ordered dictionaries with pipeline damage metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") fragility_sets_liq = None if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) values_payload = [] values_payload_liq = [] # for liquefaction if used unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) # Check if liquefaction is applicable if use_liquefaction and \ geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[pipeline["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_pipelines.append(pipeline) del pipelines if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if use_liquefaction is True and \ fragility_sets_liq is not None and \ geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # calculate LS and DS ds_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): # default pgv_repairs = None pgd_repairs = 0.0 total_repair_rate = None break_rate = None leak_rate = None failure_probability = None num_pgv_repairs = None num_pgd_repairs = 0.0 num_repairs = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None ds_result = dict() damage_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] fragility_set = fragility_sets[pipeline["id"]] # TODO assume there is only one curve fragility_curve = fragility_set.fragility_curves[0] hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) pgv_repairs = \ fragility_curve.solve_curve_expression( hval_dict, fragility_set.curve_parameters, **pipeline_args) # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.return_type["unit"], pgv_repairs) length = PipelineUtil.get_pipe_length(pipeline) # Number of PGV repairs num_pgv_repairs = pgv_repairs * length # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None \ and liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[pipeline["id"]] # TODO assume there is only one curve liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability pipeline_args = fragility_set_liq.construct_expression_args_from_inventory( pipeline) pgd_repairs = \ liq_fragility_curve.solve_curve_expression( liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.return_type["unit"], pgd_repairs) num_pgd_repairs = pgd_repairs * length # record results if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs total_repair_rate = pgd_repairs + pgv_repairs failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_repairs = num_pgd_repairs + num_pgv_repairs ds_result['pgvrepairs'] = pgv_repairs ds_result['pgdrepairs'] = pgd_repairs ds_result['repairspkm'] = total_repair_rate ds_result['breakrate'] = break_rate ds_result['leakrate'] = leak_rate ds_result['failprob'] = failure_probability ds_result['numpgvrpr'] = num_pgv_repairs ds_result['numpgdrpr'] = num_pgd_repairs ds_result['numrepairs'] = num_repairs ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = hazard_vals # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None: damage_result['liq_fragility_id'] = fragility_sets_liq[ pipeline["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) # pipelines do not have matched mappings for pipeline in unmapped_pipelines: ds_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties']['pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardval'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqhazval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. """ road_results = [] fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) list_roads = roads # Converting list of roads into a dictionary for ease of reference roads = dict() for rd in list_roads: roads[rd["id"]] = rd del list_roads processed_roads = [] grouped_roads = AnalysisUtil.group_by_demand_type( roads, fragility_sets) for demand, grouped_road_items in grouped_roads.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once road_chunks = list(AnalysisUtil.chunks(grouped_road_items, 50)) for road_chunk in road_chunks: points = [] for road_id in road_chunk: location = GeoUtil.get_location(roads[road_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction = [] if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) if input_demand_type.lower( ) == 'pgd' and use_liquefaction and geology_dataset_id is not None: liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, input_demand_units, points) elif hazard_type == 'tornado': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'hurricane': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for road_id in road_chunk: road_result = collections.OrderedDict() road = roads[road_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") selected_fragility_set = fragility_sets[road_id] dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['guid'] = road['properties']['guid'] road_result.update(dmg_probability) road_result.update(dmg_interval) road_result['demandtype'] = input_demand_type road_result['demandunits'] = input_demand_units road_result['hazardtype'] = hazard_type road_result['hazardval'] = hazard_val # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if len(liquefaction) > 0: if input_demand_type in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type] elif input_demand_type.lower() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.lower()] elif input_demand_type.upper() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.upper] else: liquefaction_val = 0.0 dmg_probability = selected_fragility_set.calculate_limit_state( liquefaction_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['hazardval'] = liquefaction_val road_result.update(dmg_probability) road_result.update(dmg_interval) road_results.append(road_result) processed_roads.append(road_id) i = i + 1 unmapped_dmg_probability = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_dmg_probability) for road_id, rd in roads.items(): if road_id not in processed_roads: unmapped_rd_result = collections.OrderedDict() unmapped_rd_result['guid'] = rd['properties']['guid'] unmapped_rd_result.update(unmapped_dmg_probability) unmapped_rd_result.update(unmapped_dmg_intervals) unmapped_rd_result['demandtype'] = "None" unmapped_rd_result['demandunits'] = "None" unmapped_rd_result['hazardtype'] = "None" unmapped_rd_result['hazardval'] = 0.0 road_results.append(unmapped_rd_result) return road_results