def sample_damage_interval(self, dmg, damage_interval_keys, num_samples, seed): """ Dylan Sanderson code to calculate the Monte Carlo simulations of damage state. Args: dmg (dict): Damage results that contains dmg interval values. damage_interval_keys (list): Keys of the damage states. num_samples (int): Number of simulation. seed (int): Random number generator seed for reproducibility. Returns: dict: A dictionary of damage states. """ ds = {} random_generator = np.random.RandomState(seed) for i in range(num_samples): # each sample should have a unique seed rnd_num = random_generator.uniform(0, 1) prob_val = 0 flag = True for ds_name in damage_interval_keys: if rnd_num < prob_val + AnalysisUtil.float_to_decimal( dmg[ds_name]): ds['sample_{}'.format(i)] = ds_name flag = False break else: prob_val += AnalysisUtil.float_to_decimal(dmg[ds_name]) if flag: print("cannot determine MC damage state!") return ds
def run(self): """Executes mc failure probability analysis.""" # read in file and parameters damage = self.get_input_dataset("damage").get_csv_reader() damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) # setting number of cpus to use user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(damage_result), user_defined_cpu) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] count = 0 inventory_list = damage_result seed = self.get_parameter("seed") seed_list = [] if seed is not None: while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) seed_list.append([ seed + i for i in range(count - 1, count + avg_bulk_input_size - 1) ]) count += avg_bulk_input_size else: while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) seed_list.append([ None for i in range(count - 1, count + avg_bulk_input_size - 1) ]) count += avg_bulk_input_size fs_results, fp_results, samples_results = self.monte_carlo_failure_probability_concurrent_future( self.monte_carlo_failure_probability_bulk_input, num_workers, inventory_args, seed_list) self.set_result_csv_data("sample_failure_state", fs_results, name=self.get_parameter("result_name") + "_failure_state") self.set_result_csv_data("failure_probability", fp_results, name=self.get_parameter("result_name") + "_failure_probability") self.set_result_csv_data("sample_damage_states", samples_results, name=self.get_parameter("result_name") + "_sample_damage_states") return True
def mean_damage(self, dmg, dmg_ratio_tbl, damage_interval_keys, is_bridge): """Calculates mean damage based on damage probabilities and damage ratios Args: dmg (obj): dmg analysis output for a single entity in the built environment dmg_ratio_tbl (list): dmg ratio table. damage_interval_keys (list): damage interval keys is_bridge (bool): a boolean to indicate if the inventory type is bridge. Bridge has its own way of calculating mean damage Returns: OrderedDict: A dictionary with mean damage, deviation, and other data/metadata. """ results = collections.OrderedDict() results.update(dmg) if is_bridge: # need to calculate bridge span if "spans" in dmg.keys() and dmg['spans'] is not None \ and dmg["spans"].isdigit(): bridge_spans = int(dmg["spans"]) else: bridge_spans = 1 if bridge_spans > 10: bridge_spans = 10 print("A bridge was found with greater than 10 spans: " + dmg['guid'] + ". Default to 10 bridge spans.") mean_damage = AnalysisUtil.calculate_mean_damage( dmg_ratio_tbl, dmg, damage_interval_keys, is_bridge, bridge_spans) else: mean_damage = AnalysisUtil.calculate_mean_damage( dmg_ratio_tbl, dmg, damage_interval_keys, is_bridge) results.update(mean_damage) # bridge doesn't calculates deviation if not is_bridge: mean_damage_dev = AnalysisUtil.calculate_mean_damage_std_deviation( dmg_ratio_tbl, dmg, mean_damage['meandamage'], damage_interval_keys) results.update(mean_damage_dev) else: expected_damage = AnalysisUtil.get_expected_damage( mean_damage['meandamage'], dmg_ratio_tbl) results['expectval'] = expected_damage return results
def run(self): """Executes Cumulative Building Damage Analysis""" eq_damage_set = self.get_input_dataset("eq_bldg_dmg").get_csv_reader() eq_damage_df = pd.DataFrame(list(eq_damage_set)) tsunami_damage_set = self.get_input_dataset( "tsunami_bldg_dmg").get_csv_reader() tsunami_damage_df = pd.DataFrame(list(tsunami_damage_set)) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(eq_damage_df), user_defined_cpu) avg_bulk_input_size = int(len(eq_damage_df) / num_workers) eq_damage_args = [] count = 0 while count < len(eq_damage_df): eq_damage_args.append(eq_damage_df[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.cumulative_building_damage_concurrent_future( self.cumulative_building_damage_bulk_input, num_workers, eq_damage_args, repeat(tsunami_damage_df)) self.set_result_csv_data("combined-result", results, name=self.get_parameter("result_name")) return True
def run(self): """Executes bridge damage analysis.""" # Bridge dataset bridge_set = self.get_input_dataset("bridges").get_inventory_reader() # Get hazard input hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len( bridge_set), user_defined_cpu) avg_bulk_input_size = int(len(bridge_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bridge_set) while count < len(inventory_list): inventory_args.append( inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.bridge_damage_concurrent_future( self.bridge_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True
def run(self): """Executes building damage analysis.""" # Building dataset building_set = self.get_input_dataset( "buildings").get_inventory_reader() # set Default Fragility key fragility_key_as = self.get_parameter("fragility_key_as") if fragility_key_as is None: self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) fragility_key_ds = self.get_parameter("fragility_key_ds") if fragility_key_ds is None: self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") if use_hazard_uncertainty is None: self.set_parameter("use_hazard_uncertainty", False) # Set Default Liquefaction use_liquefaction = self.get_parameter("use_liquefaction") if use_liquefaction is None: self.set_parameter("use_liquefaction", False) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(building_set), user_defined_cpu) avg_bulk_input_size = int(len(building_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(building_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.building_damage_concurrent_future( self.building_damage_analysis_bulk_input, num_workers, inventory_args) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("damage_result", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True
def run(self): """Executes electric power facility damage analysis.""" epf_set = self.get_input_dataset("epfs").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type, note this is here for future use if additional hazards are supported by this analysis hazard_type = self.get_parameter("hazard_type") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") liq_geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(epf_set), user_defined_cpu) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.epf_damage_concurrent_future( self.epf_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(use_liquefaction), repeat(liq_geology_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True
def run(self): """Executes building damage analysis.""" # Building dataset bldg_set = self.get_input_dataset("buildings").get_inventory_reader() # building retrofit strategy retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") if retrofit_strategy_dataset is not None: retrofit_strategy = list(retrofit_strategy_dataset.get_csv_reader()) else: retrofit_strategy = None # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BuildingUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu) avg_bulk_input_size = int(len(bldg_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bldg_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, num_workers, inventory_args, repeat(retrofit_strategy), repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("damage_result", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True
def test_fragility_set_small_overlap(): fragility_set = get_fragility_set("fragility_curve.json") # Test Case 1 - single overlap limit_states = collections.OrderedDict([("LS_0", 0.9692754643), ("LS_1", 0.0001444974), ("LS_2", 0.0004277083)]) limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = fragility_set._3ls_to_4ds(limit_states) assert damage_states['DS_0'] == AnalysisUtil.float_to_decimal(0.0307245357) and \ damage_states['DS_1'] == AnalysisUtil.float_to_decimal(0.968847756) and \ damage_states['DS_2'] == AnalysisUtil.float_to_decimal(0.0) and \ damage_states['DS_3'] == AnalysisUtil.float_to_decimal(0.0004277083) # Test Case 2 - double overlap limit_states = collections.OrderedDict([("LS_0", 0.12), ("LS_1", 0.64), ("LS_2", 0.8)]) limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = fragility_set._3ls_to_4ds(limit_states) assert damage_states['DS_0'] == AnalysisUtil.float_to_decimal(0.2) and \ damage_states['DS_1'] == AnalysisUtil.float_to_decimal(0.0) and \ damage_states['DS_2'] == AnalysisUtil.float_to_decimal(0.0) and \ damage_states['DS_3'] == AnalysisUtil.float_to_decimal(0.8)
def run(self): """Performs Water facility damage analysis by using the parameters from the spec and creates an output dataset in csv format Returns: bool: True if successful, False otherwise """ # Facility dataset inventory_set = self.get_input_dataset( "water_facilities").get_inventory_reader() # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(inventory_set), user_defined_cpu) avg_bulk_input_size = int(len(inventory_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(inventory_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.waterfacility_damage_concurrent_futures( self.waterfacilityset_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True
def run(self): """Executes mean damage calculation.""" # read in file and parameters damage = self.get_input_dataset("damage").get_csv_reader() damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) dmg_ratio_csv = self.get_input_dataset("dmg_ratios").get_csv_reader() dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows(dmg_ratio_csv) # setting number of cpus to use user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(damage_result), user_defined_cpu) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] count = 0 inventory_list = damage_result while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.mean_damage_concurrent_future( self.mean_damage_bulk_input, num_workers, inventory_args, repeat(dmg_ratio_tbl)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True
def run(self): """Execute road damage analysis """ # road dataset road_dataset = self.get_input_dataset("roads").get_inventory_reader() # distance to shore table data frame distance_df = self.get_input_dataset( "distance_table").get_dataframe_from_csv() # TODO this has to be changed when semantic service lanuched based on it # set distance field name in the table distance_field_name = "distance" # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(road_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(road_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.road_damage_concurrent_future( self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(distance_df), repeat(distance_field_name), repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True
def run(self): """Execute pipeline damage analysis """ # Pipeline dataset pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True
def run(self): """Executes pipeline restoration analysis.""" pipelines_df = self.get_input_dataset( "pipeline").get_dataframe_from_shapefile() pipeline_dmg = self.get_input_dataset( "pipeline_damage").get_csv_reader() pipelines_dmg_df = pd.DataFrame(list(pipeline_dmg)) damage_result = pipelines_dmg_df.merge(pipelines_df, on='guid') damage_result = damage_result.to_dict(orient='records') user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(damage_result), user_defined_cpu) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] count = 0 inventory_list = damage_result while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size restoration_results = self.pipeline_restoration_concurrent_future( self.pipeline_restoration_bulk_input, num_workers, inventory_args) self.set_result_csv_data("pipeline_restoration", restoration_results, name=self.get_parameter("result_name")) return True
def show_gdocstr_docs(self): return AnalysisUtil.create_gdocstr_from_spec(self.get_spec())
def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata. damage_results (list): A list of ordered dictionaries with pipeline damage metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") fragility_sets_liq = None if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) values_payload = [] values_payload_liq = [] # for liquefaction if used unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) # Check if liquefaction is applicable if use_liquefaction and \ geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[pipeline["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_pipelines.append(pipeline) del pipelines if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if use_liquefaction is True and \ fragility_sets_liq is not None and \ geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # calculate LS and DS ds_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): # default pgv_repairs = None pgd_repairs = 0.0 total_repair_rate = None break_rate = None leak_rate = None failure_probability = None num_pgv_repairs = None num_pgd_repairs = 0.0 num_repairs = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None ds_result = dict() damage_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] fragility_set = fragility_sets[pipeline["id"]] # TODO assume there is only one curve fragility_curve = fragility_set.fragility_curves[0] hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) pgv_repairs = \ fragility_curve.solve_curve_expression( hval_dict, fragility_set.curve_parameters, **pipeline_args) # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.return_type["unit"], pgv_repairs) length = PipelineUtil.get_pipe_length(pipeline) # Number of PGV repairs num_pgv_repairs = pgv_repairs * length # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None \ and liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[pipeline["id"]] # TODO assume there is only one curve liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability pipeline_args = fragility_set_liq.construct_expression_args_from_inventory( pipeline) pgd_repairs = \ liq_fragility_curve.solve_curve_expression( liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.return_type["unit"], pgd_repairs) num_pgd_repairs = pgd_repairs * length # record results if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs total_repair_rate = pgd_repairs + pgv_repairs failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_repairs = num_pgd_repairs + num_pgv_repairs ds_result['pgvrepairs'] = pgv_repairs ds_result['pgdrepairs'] = pgd_repairs ds_result['repairspkm'] = total_repair_rate ds_result['breakrate'] = break_rate ds_result['leakrate'] = leak_rate ds_result['failprob'] = failure_probability ds_result['numpgvrpr'] = num_pgv_repairs ds_result['numpgdrpr'] = num_pgd_repairs ds_result['numrepairs'] = num_repairs ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = hazard_vals # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None: damage_result['liq_fragility_id'] = fragility_sets_liq[ pipeline["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) # pipelines do not have matched mappings for pipeline in unmapped_pipelines: ds_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties']['pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardval'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqhazval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, hazard_dataset_id): """Gets applicable fragilities and calculates damage Args: facilities (list): Multiple water facilities from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ # Liquefaction related variables use_liquefaction = False liquefaction_available = False fragility_sets_liq = None liquefaction_resp = None geology_dataset_id = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None loc = None # Obtain the fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: if hazard_type == 'tsunami': fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY elif hazard_type == 'earthquake': fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported." ) self.set_parameter("fragility_key", fragility_key) # Obtain the fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) # Obtain the liquefaction fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake": if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True # Determine whether to use hazard uncertainty uncertainty = self.get_parameter("use_hazard_uncertainty") # Setup fragility translation structures values_payload = [] values_payload_liq = [] unmapped_waterfacilities = [] mapped_waterfacilities = [] for facility in facilities: if facility["id"] in fragility_sets.keys(): # Fill in generic details fragility_set = fragility_sets[facility["id"]] location = GeoUtil.get_location(facility) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_waterfacilities.append(facility) # Fill in liquefaction parameters if liquefaction_available and facility[ "id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[facility["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_waterfacilities.append(facility) del facilities if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # Calculate LS and DS facility_results = [] damage_results = [] for i, facility in enumerate(mapped_waterfacilities): fragility_set = fragility_sets[facility["id"]] limit_states = dict() dmg_intervals = dict() # Setup conditions for the analysis hazard_std_dev = 0 if uncertainty: hazard_std_dev = random.random() if isinstance(fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): facility_args = fragility_set.construct_expression_args_from_inventory( facility) limit_states = \ fragility_set.calculate_limit_state(hval_dict, std_dev=hazard_std_dev, inventory_type='water_facility', **facility_args) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate( fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( facility) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='water_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # TODO: ideally, this goes into a single variable declaration section facility_result = { 'guid': facility['properties']['guid'], **limit_states, **dmg_intervals } facility_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals if use_liquefaction and fragility_sets_liq and geology_dataset_id: damage_result['liq_fragility_id'] = fragility_sets_liq[ facility["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) for facility in unmapped_waterfacilities: facility_result = dict() damage_result = dict() facility_result['guid'] = facility['properties']['guid'] damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) return facility_results, damage_results
def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. list: A list of ordered dictionaries with other road data/metadata. """ fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) values_payload = [] mapped_roads = [] unmapped_roads = [] pgd_flag = True # for liquefaction liquefaction_resp = None for road in roads: if road["id"] in fragility_sets.keys(): fragility_set = fragility_sets[road["id"]] location = GeoUtil.get_location(road) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types # for liquefaction if any(demand.lower() != 'pgd' for demand in demands): pgd_flag = False units = fragility_set.demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_roads.append(road) else: unmapped_roads.append(road) del roads # get hazard and liquefaction values if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) if pgd_flag and use_liquefaction and geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") # calculate LS and DS ds_results = [] damage_results = [] for i, road in enumerate(mapped_roads): dmg_probability = dict() dmg_interval = dict() demand_types_liq = None demand_units_liq = None liq_hazard_vals = None liquefaction_prob = None selected_fragility_set = fragility_sets[road["id"]] hazard_std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(selected_fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): road_args = selected_fragility_set.construct_expression_args_from_inventory(road) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='road', **road_args) # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if liquefaction_resp is not None and len(liquefaction_resp) > 0: liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) demand_types_liq = liquefaction_resp[i]['demands'] demand_units_liq = liquefaction_resp[i]['units'] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] dmg_probability = selected_fragility_set.calculate_limit_state( liq_hval_dict, inventory_type='road', **road_args) dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="road") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result = dict() ds_result['guid'] = road['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals damage_result['liqdemandtypes'] = demand_types_liq damage_result['liqdemandunits'] = demand_units_liq damage_result['liqhazvals'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob ds_results.append(ds_result) damage_results.append(damage_result) for road in unmapped_roads: ds_result = dict() damage_result = dict() ds_result['guid'] = road['properties']['guid'] damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazvals'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def run(self): """Executes road damage analysis.""" # Road dataset road_set = self.get_input_dataset("roads").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Get hazard type hazard_type = self.get_parameter("hazard_type") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset for liquefaction geology_dataset_id = None if self.get_parameter("liquefaction_geology_dataset_id") is not None: geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") user_defined_cpu = 1 if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu) avg_bulk_input_size = int(len(road_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(road_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(geology_dataset_id), repeat(fragility_key), repeat(use_liquefaction)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True
def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, use_hazard_uncertainty, use_liquefaction, liq_geology_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty (bool): Hazard uncertainty. True for using uncertainty when computing damage, False otherwise. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ result = [] fragility_key = self.get_parameter("fragility_key") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) epf_results = [] # Converting list of epfs into a dictionary for ease of reference list_epfs = epfs epfs = dict() for epf in list_epfs: epfs[epf["id"]] = epf del list_epfs # Clear as it's not needed anymore processed_epf = [] grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set) for demand, grouped_epf_items in grouped_epfs.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50)) for epf_chunk in epf_chunks: points = [] for epf_id in epf_chunk: location = GeoUtil.get_location(epfs[epf_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError( 'Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for epf_id in epf_chunk: epf_result = collections.OrderedDict() epf = epfs[epf_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") selected_fragility_set = fragility_set[epf_id] limit_states = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( limit_states) epf_result['guid'] = epf['properties']['guid'] epf_result.update(limit_states) epf_result.update(dmg_interval) epf_result['demandtype'] = input_demand_type epf_result['demandunits'] = input_demand_units epf_result['hazardtype'] = hazard_type epf_result['hazardval'] = hazard_val epf_results.append(epf_result) processed_epf.append(epf_id) i = i + 1 # when there is liquefaction, limit state need to be modified if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: liq_fragility_key = self.get_parameter( "liquefaction_fragility_key") if liq_fragility_key is None: liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY liq_fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liq_fragility_key) grouped_liq_epfs = AnalysisUtil.group_by_demand_type( epfs, liq_fragility_set) for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items(): liq_input_demand_type = liq_demand[0] liq_input_demand_units = liq_demand[1] # For every group of unique demand and demand unit, call the end-point once liq_epf_chunks = list( AnalysisUtil.chunks(grouped_liq_epf_items, 50)) for liq_epf_chunk in liq_epf_chunks: points = [] for liq_epf_id in liq_epf_chunk: location = GeoUtil.get_location(epfs[liq_epf_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction_vals = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, liq_input_demand_units, points) # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for liq_epf_id in liq_epf_chunk: liq_hazard_val = liquefaction_vals[i][ liq_input_demand_type] std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") liquefaction_prob = liquefaction_vals[i][ 'liqProbability'] selected_liq_fragility = liq_fragility_set[liq_epf_id] pgd_limit_states = selected_liq_fragility.calculate_limit_state( liq_hazard_val, std_dev=std_dev) # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit # states and dmg_interval for epf_result in epf_results: if epf_result['guid'] == epfs[liq_epf_id]['guid']: limit_states = { "ls-slight": epf_result['ls-slight'], "ls-moderat": epf_result['ls-moderat'], "ls-extensi": epf_result['ls-extensi'], "ls-complet": epf_result['ls-complet'] } liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) liq_dmg_interval = AnalysisUtil.calculate_damage_interval( liq_limit_states) epf_result.update(liq_limit_states) epf_result.update(liq_dmg_interval) epf_result[ 'liqhaztype'] = liq_input_demand_type epf_result['liqhazval'] = liq_hazard_val epf_result[ 'liqprobability'] = liquefaction_prob i = i + 1 unmapped_limit_states = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_limit_states) for epf_id, epf in epfs.items(): if epf_id not in processed_epf: unmapped_epf_result = collections.OrderedDict() unmapped_epf_result['guid'] = epf['properties']['guid'] unmapped_epf_result.update(unmapped_limit_states) unmapped_epf_result.update(unmapped_dmg_intervals) unmapped_epf_result["demandtype"] = "None" unmapped_epf_result['demandunits'] = "None" unmapped_epf_result["hazardtype"] = "None" unmapped_epf_result['hazardval'] = 0.0 unmapped_epf_result['liqhaztype'] = "NA" unmapped_epf_result['liqhazval'] = "NA" unmapped_epf_result['liqprobability'] = "NA" epf_results.append(unmapped_epf_result) return epf_results
def waterfacility_damage_analysis(self, facility, fragility, liq_fragility, hazard_type, hazard_dataset_id, liq_geology_dataset_id, uncertainty): """Computes damage analysis for a single facility Args: facility (obj): A JSON mapping of a facility based on mapping attributes fragility (obj): A JSON description of fragility mapped to the building. liq_fragility (obj): A JSON description of liquefaction fragility mapped to the building. hazard_type (str): A string that indicates the hazard type hazard_dataset_id (str): Hazard id from the hazard service liq_geology_dataset_id (str): Geology dataset id from data service to use for liquefaction calculation, if applicable uncertainty (bool): Whether to use hazard standard deviation values for uncertainty Returns: OrderedDict: A dictionary with water facility damage values and other data/metadata. """ std_dev = 0 if uncertainty: std_dev = random.random() hazard_demand_type = fragility.demand_type demand_units = fragility.demand_units liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 location = GeoUtil.get_location(facility) point = str(location.y) + "," + str(location.x) if hazard_type == "earthquake": hazard_val_set = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) elif hazard_type == "tsunami": hazard_val_set = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported.") hazard_val = hazard_val_set[0]['hazardValue'] if hazard_val < 0: hazard_val = 0 limit_states = fragility.calculate_limit_state(hazard_val, std_dev) if liq_fragility is not None and liq_geology_dataset_id: liq_hazard_type = liq_fragility.demand_type pgd_demand_units = liq_fragility.demand_units point = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, pgd_demand_units, [point]) liq_hazard_val = liquefaction[0][liq_hazard_type] liquefaction_prob = liquefaction[0]['liqProbability'] pgd_limit_states = liq_fragility.calculate_limit_state(liq_hazard_val, std_dev) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = AnalysisUtil.calculate_damage_interval(limit_states) result = collections.OrderedDict() result = {**limit_states, **dmg_intervals} # Needs py 3.5+ metadata = collections.OrderedDict() metadata['guid'] = facility['properties']['guid'] metadata['hazardtype'] = hazard_type metadata['demandtype'] = hazard_demand_type metadata['hazardval'] = hazard_val metadata['liqhaztype'] = liq_hazard_type metadata['liqhazval'] = liq_hazard_val metadata['liqprobability'] = liquefaction_prob result = {**metadata, **result} return result
def building_damage_analysis_bulk_input(self, buildings): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ # read static parameters from object self hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") building_results = [] damage_results = [] fragility_sets_as = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_as")) fragility_sets_ds = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_ds")) values_payload_as = [] values_payload_ds = [] values_payload_liq = [] mapped_buildings = [] unmapped_buildings = [] for building in buildings: if building["id"] in fragility_sets_as and building[ "id"] in fragility_sets_ds: fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] location = GeoUtil.get_location(building) loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive demands_as = AnalysisUtil.get_hazard_demand_types( building, fragility_set_as, hazard_type) units_as = fragility_set_as.demand_units value_as = { "demands": demands_as, "units": units_as, "loc": loc } values_payload_as.append(value_as) # Drift-Sensitive demands_ds = AnalysisUtil.get_hazard_demand_types( building, fragility_set_ds, hazard_type) units_ds = fragility_set_ds.demand_units value_ds = { "demands": demands_ds, "units": units_ds, "loc": loc } values_payload_ds.append(value_ds) # liquefaction if use_liquefaction: value_liq = { "demands": ["pgd"], # implied... "units": ["in"], "loc": loc } values_payload_liq.append(value_liq) mapped_buildings.append(building) else: unmapped_buildings.append(building) del buildings # get hazard values and liquefaction if hazard_type == 'earthquake': hazard_resp_as = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_as) hazard_resp_ds = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_ds) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, values_payload_liq) else: raise ValueError( 'Hazard does not support liquefaction! Check to make sure you defined the ' 'liquefaction portion of your scenario earthquake.') else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # calculate LS and DS for i, building in enumerate(mapped_buildings): dmg_probability_as = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_as = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } dmg_probability_ds = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_ds = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') ############### # AS if isinstance(fragility_set_as.fragility_curves[0], DFR3Curve): hazard_vals_as = AnalysisUtil.update_precision_of_lists( hazard_resp_as[i]["hazardValues"]) demand_types_as = hazard_resp_as[i]["demands"] demand_units_as = hazard_resp_as[i]["units"] hval_dict_as = dict() for j, d in enumerate(fragility_set_as.demand_types): hval_dict_as[d] = hazard_vals_as[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_as[i]["hazardValues"]): building_args = fragility_set_as.construct_expression_args_from_inventory( building) dmg_probability_as = fragility_set_as. \ calculate_limit_state(hval_dict_as, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_as = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_as, liquefaction_dmg)) dmg_interval_as = fragility_set_ds.calculate_damage_interval( dmg_probability_as, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ############### # DS if isinstance(fragility_set_ds.fragility_curves[0], DFR3Curve): hazard_vals_ds = AnalysisUtil.update_precision_of_lists( hazard_resp_ds[i]["hazardValues"]) demand_types_ds = hazard_resp_ds[i]["demands"] demand_units_ds = hazard_resp_ds[i]["units"] hval_dict_ds = dict() for j, d in enumerate(fragility_set_ds.demand_types): hval_dict_ds[d] = hazard_vals_ds[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_ds[i]["hazardValues"]): building_args = fragility_set_ds.construct_expression_args_from_inventory( building) dmg_probability_ds = fragility_set_ds. \ calculate_limit_state(hval_dict_ds, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_ds = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_ds, liquefaction_dmg)) dmg_interval_ds = fragility_set_ds.calculate_damage_interval( dmg_probability_ds, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # put results in dictionary # AS denotes acceleration-sensitive fragility assigned to the building. # DS denotes drift-sensitive fragility assigned to the building. building_result = dict() building_result['guid'] = building['properties']['guid'] building_result['AS_LS_0'] = dmg_probability_as['LS_0'] building_result['AS_LS_1'] = dmg_probability_as['LS_1'] building_result['AS_LS_2'] = dmg_probability_as['LS_2'] building_result['AS_DS_0'] = dmg_interval_as['DS_0'] building_result['AS_DS_1'] = dmg_interval_as['DS_1'] building_result['AS_DS_2'] = dmg_interval_as['DS_2'] building_result['AS_DS_3'] = dmg_interval_as['DS_3'] building_result['DS_LS_0'] = dmg_probability_ds['LS_0'] building_result['DS_LS_1'] = dmg_probability_ds['LS_1'] building_result['DS_LS_2'] = dmg_probability_ds['LS_2'] building_result['DS_DS_0'] = dmg_interval_ds['DS_0'] building_result['DS_DS_1'] = dmg_interval_ds['DS_1'] building_result['DS_DS_2'] = dmg_interval_ds['DS_2'] building_result['DS_DS_3'] = dmg_interval_ds['DS_3'] building_result[ 'hazard_exposure_as'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_as, hazard_type) building_result[ 'hazard_exposure_ds'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_ds, hazard_type) # put damage results in dictionary damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = fragility_set_as.id damage_result['demandtypes_as'] = demand_types_as damage_result['demandunits_as'] = demand_units_as damage_result['fragility_id_ds'] = fragility_set_ds.id damage_result['demandtypes_ds'] = demand_types_ds damage_result['demandunits_ds'] = demand_units_ds damage_result['hazardtype'] = hazard_type damage_result['hazardvals_as'] = hazard_vals_as damage_result['hazardvals_ds'] = hazard_vals_ds building_results.append(building_result) damage_results.append(damage_result) for building in unmapped_buildings: building_result = dict() building_result['guid'] = building['properties']['guid'] damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = None damage_result['demandtypes_as'] = None damage_result['demandunits_as'] = None damage_result['fragility_id_ds'] = None damage_result['demandtypes_ds'] = None damage_result['demandunits_ds'] = None damage_result['hazardtype'] = None damage_result['hazardvals_as'] = None damage_result['hazardvals_ds'] = None building_results.append(building_result) damage_results.append(damage_result) return building_results, damage_results
def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) bridge_results = [] list_bridges = bridges # Converting list of bridges into a dictionary for ease of reference bridges = dict() for br in list_bridges: bridges[br["id"]] = br list_bridges = None # Clear as it's not needed anymore processed_bridges = [] grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set) for demand, grouped_brs in grouped_bridges.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50)) # TODO: Move to globals? for brs in br_chunks: points = [] for br_id in brs: location = GeoUtil.get_location(bridges[br_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == "earthquake": hazard_vals = \ self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tsunami": hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tornado": hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == "hurricane": hazard_vals = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for br_id in brs: bridge_result = collections.OrderedDict() bridge = bridges[br_id] selected_fragility_set = fragility_set[br_id] hazard_val = hazard_vals[i]['hazardValue'] hazard_std_dev = 0.0 if use_hazard_uncertainty: # TODO Get this from API once implemented raise ValueError("Uncertainty Not Implemented!") adjusted_fragility_set = copy.deepcopy(selected_fragility_set) if use_liquefaction and 'liq' in bridge['properties']: for fragility in adjusted_fragility_set.fragility_curves: fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq']) dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability) bridge_result['guid'] = bridge['properties']['guid'] bridge_result.update(dmg_probability) bridge_result.update(dmg_intervals) bridge_result["retrofit"] = retrofit_type bridge_result["retrocost"] = retrofit_cost bridge_result["demandtype"] = input_demand_type bridge_result["demandunits"] = input_demand_units bridge_result["hazardtype"] = hazard_type bridge_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"]["spans"] \ is not None and bridge["properties"]["spans"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["spans"]) elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \ is not None and bridge["properties"]["SPANS"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["SPANS"]) else: bridge_result['spans'] = 1 bridge_results.append(bridge_result) processed_bridges.append(br_id) # remove processed bridges i = i + 1 unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0} unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability) for br_id, br in bridges.items(): if br_id not in processed_bridges: unmapped_br_result = collections.OrderedDict() unmapped_br_result['guid'] = br['properties']['guid'] unmapped_br_result.update(unmapped_dmg_probability) unmapped_br_result.update(unmapped_dmg_intervals) unmapped_br_result["retrofit"] = "Non-Retrofit" unmapped_br_result["retrocost"] = 0.0 unmapped_br_result["demandtype"] = "None" unmapped_br_result['demandunits'] = "None" unmapped_br_result["hazardtype"] = "None" unmapped_br_result['hazardval'] = 0.0 bridge_results.append(unmapped_br_result) return bridge_results
def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ use_liquefaction = False liquefaction_available = False fragility_key = self.get_parameter("fragility_key") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) if hazard_type == "earthquake": liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True values_payload = [] values_payload_liq = [] unmapped_epfs = [] mapped_epfs = [] for epf in epfs: epf_id = epf["id"] if epf_id in fragility_set: location = GeoUtil.get_location(epf) loc = str(location.y) + "," + str(location.x) demands = fragility_set[epf_id].demand_types units = fragility_set[epf_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_epfs.append(epf) if liquefaction_available and epf["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[epf["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_epfs.append(epf) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError('Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError("Missing hazard type.") liquefaction_resp = None if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) ds_results = [] damage_results = [] i = 0 for epf in mapped_epfs: ds_result = dict() damage_result = dict() selected_fragility_set = fragility_set[epf["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 epf_args = selected_fragility_set.construct_expression_args_from_inventory( epf) limit_states = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='electric_facility', **epf_args) if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[epf["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( epf) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, inventory_type="electric_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_interval = selected_fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='electric_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result["guid"] = epf["properties"]["guid"] ds_result.update(limit_states) ds_result.update(dmg_interval) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardvals"] = hazard_val if hazard_type == "earthquake" and use_liquefaction is True: if liquefaction_available: damage_result['liq_fragility_id'] = fragility_sets_liq[ epf["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) i += 1 ############################################################# # unmapped for epf in unmapped_epfs: ds_result = dict() damage_result = dict() ds_result['guid'] = epf['properties']['guid'] damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None if hazard_type == "earthquake" and use_liquefaction is True: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key, retrofit_strategy) values_payload = [] unmapped_buildings = [] mapped_buildings = [] for b in buildings: bldg_id = b["id"] if bldg_id in fragility_sets: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types(b, fragility_sets[bldg_id], hazard_type) units = fragility_sets[bldg_id].demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_buildings.append(b) else: unmapped_buildings.append(b) # not needed anymore as they are already split into mapped and unmapped del buildings if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload, self.get_parameter('seed')) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") ds_results = [] damage_results = [] i = 0 for b in mapped_buildings: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_interval = dict() b_id = b["id"] selected_fragility_set = fragility_sets[b_id] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) b_demands = hazard_vals[i]["demands"] b_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 # To calculate damage, use demand type name from fragility that will be used in the expression, instead # of using what the hazard service returns. There could be a difference "SA" in DFR3 vs "1.07 SA" # from hazard for d in selected_fragility_set.demand_types: hval_dict[d] = b_haz_vals[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): building_args = selected_fragility_set.construct_expression_args_from_inventory(b) building_period = selected_fragility_set.fragility_curves[0].get_building_period( selected_fragility_set.curve_parameters, **building_args) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, **building_args, period=building_period) dmg_interval = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="building") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(b_haz_vals, hazard_type) damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtype'] = b_demands damage_result['demandunits'] = b_units damage_result['hazardval'] = b_haz_vals ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for b in unmapped_buildings: ds_result = dict() damage_result = dict() ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtype'] = None damage_result['demandunits'] = None damage_result['hazardval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def building_damage_analysis(self, building, fragility_set_as, fragility_set_ds): """Calculates bridge damage results for a single building. Args: building (obj): A JSON-mapping of a geometric object from the inventory: current building. fragility_set_as (obj): A JSON description of acceleration-sensitive (AS) fragility assigned to the building. fragility_set_ds (obj): A JSON description of drift-sensitive (DS) fragility assigned to the building. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ building_results = collections.OrderedDict() dmg_probability_as = collections.OrderedDict() dmg_probability_ds = collections.OrderedDict() hazard_demand_type_as = None hazard_demand_type_ds = None hazard_val_as = 0.0 hazard_val_ds = 0.0 # read static parameters from object self hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") # Acceleration-Sensitive Fragility ID Code if fragility_set_as is not None: hazard_demand_type_as = AnalysisUtil.get_hazard_demand_type(building, fragility_set_as, 'earthquake') demand_units_as = fragility_set_as.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_as = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_as, demand_units_as, points=[point])[0]['hazardValue'] dmg_probability_as = fragility_set_as.calculate_limit_state(hazard_val_as) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_as = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_as['immocc'] = 0.0 dmg_probability_as['lifesfty'] = 0.0 dmg_probability_as['collprev'] = 0.0 dmg_interval_as = AnalysisUtil.calculate_damage_interval(dmg_probability_as) # Drift-Sensitive Fragility ID Code if fragility_set_ds is not None: hazard_demand_type_ds = AnalysisUtil.get_hazard_demand_type(building, fragility_set_ds, 'earthquake') demand_units_ds = fragility_set_ds.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_ds = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_ds, demand_units_ds, points=[point])[0]['hazardValue'] dmg_probability_ds = fragility_set_ds.calculate_limit_state(hazard_val_ds) # adjust hazard value for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_ds = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_ds['immocc'] = 0.0 dmg_probability_ds['lifesfty'] = 0.0 dmg_probability_ds['collprev'] = 0.0 dmg_interval_ds = AnalysisUtil.calculate_damage_interval(dmg_probability_ds) # put results in dictionary building_results['guid'] = building['properties']['guid'] building_results['immocc_as'] = dmg_probability_as['immocc'] building_results['lifsfty_as'] = dmg_probability_as['lifesfty'] building_results['collpre_as'] = dmg_probability_as['collprev'] building_results['insig_as'] = dmg_interval_as['insignific'] building_results['mod_as'] = dmg_interval_as['moderate'] building_results['heavy_as'] = dmg_interval_as['heavy'] building_results['comp_as'] = dmg_interval_as['complete'] building_results['immocc_ds'] = dmg_probability_ds['immocc'] building_results['lifsfty_ds'] = dmg_probability_ds['lifesfty'] building_results['collpre_ds'] = dmg_probability_ds['collprev'] building_results['insig_ds'] = dmg_interval_ds['insignific'] building_results['mod_ds'] = dmg_interval_ds['moderate'] building_results['heavy_ds'] = dmg_interval_ds['heavy'] building_results['comp_ds'] = dmg_interval_ds['complete'] building_results["hzrdtyp_as"] = hazard_demand_type_as building_results["hzrdval_as"] = hazard_val_as building_results["hzrdtyp_ds"] = hazard_demand_type_ds building_results["hzrdval_ds"] = hazard_val_ds return building_results
def cumulative_building_damage(self, eq_building_damage, tsunami_building_damage): """Run analysis for building damage results. Args: eq_building_damage (obj): A JSON description of an earthquake building damage. tsunami_building_damage (obj): Set of all tsunami building damage results. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ guid = eq_building_damage['guid'] tsunami_building = tsunami_building_damage.loc[ tsunami_building_damage['guid'] == guid] for idy, tsunami_building in tsunami_building.iterrows(): eq_limit_states = collections.OrderedDict() eq_limit_states['immocc'] = float(eq_building_damage["immocc"]) eq_limit_states['lifesfty'] = float(eq_building_damage["lifesfty"]) eq_limit_states['collprev'] = float(eq_building_damage["collprev"]) tsunami_limit_states = collections.OrderedDict() tsunami_limit_states['immocc'] = float(tsunami_building["immocc"]) tsunami_limit_states['lifesfty'] = float( tsunami_building["lifesfty"]) tsunami_limit_states['collprev'] = float( tsunami_building["collprev"]) limit_states = collections.OrderedDict() limit_states["immocc"] = \ eq_limit_states["immocc"] + tsunami_limit_states["immocc"] - \ eq_limit_states["immocc"] * tsunami_limit_states["immocc"] limit_states["lifesfty"] = \ eq_limit_states["lifesfty"] + tsunami_limit_states[ "lifesfty"] - \ eq_limit_states["lifesfty"] * tsunami_limit_states[ "lifesfty"] + \ ((eq_limit_states["immocc"] - eq_limit_states["lifesfty"]) * (tsunami_limit_states["immocc"] - tsunami_limit_states[ "lifesfty"])) limit_states["collprev"] = \ eq_limit_states["collprev"] + tsunami_limit_states[ "collprev"] - \ eq_limit_states["collprev"] * tsunami_limit_states[ "collprev"] + \ ((eq_limit_states["lifesfty"] - eq_limit_states["collprev"]) * (tsunami_limit_states["lifesfty"] - tsunami_limit_states[ "collprev"])) damage_state = AnalysisUtil.calculate_damage_interval(limit_states) bldg_results = collections.OrderedDict() bldg_results["guid"] = guid bldg_results.update(limit_states) bldg_results.update(damage_state) bldg_results["hazard"] = "Earthquake+Tsunami" return bldg_results
def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) values_payload = [] unmapped_bridges = [] mapped_bridges = [] for b in bridges: bridge_id = b["id"] if bridge_id in fragility_set: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = fragility_set[bridge_id].demand_types units = fragility_set[bridge_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_bridges.append(b) else: unmapped_bridges.append(b) # not needed anymore as they are already split into mapped and unmapped del bridges if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) ds_results = [] damage_results = [] i = 0 for bridge in mapped_bridges: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_intervals = dict() selected_fragility_set = fragility_set[bridge["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): bridge_args = selected_fragility_set.construct_expression_args_from_inventory( bridge) dmg_probability = \ selected_fragility_set.calculate_limit_state(hval_dict, inventory_type="bridge", **bridge_args) dmg_intervals = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="bridge") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) ds_result['guid'] = bridge['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_intervals) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = bridge['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["retrofit"] = retrofit_type damage_result["retrocost"] = retrofit_cost damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"][ "spans"] is not None: if isinstance(bridge["properties"]["spans"], str) and bridge["properties"]["spans"].isdigit(): damage_result['spans'] = int(bridge["properties"]["spans"]) elif isinstance(bridge["properties"]["spans"], int): damage_result['spans'] = bridge["properties"]["spans"] elif "SPANS" in bridge["properties"] and bridge["properties"][ "SPANS"] is not None: if isinstance(bridge["properties"]["SPANS"], str) and bridge["properties"]["SPANS"].isdigit(): damage_result['SPANS'] = int(bridge["properties"]["SPANS"]) elif isinstance(bridge["properties"]["SPANS"], int): damage_result['SPANS'] = bridge["properties"]["SPANS"] else: damage_result['spans'] = 1 ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for bridge in unmapped_bridges: ds_result = dict() damage_result = dict() ds_result['guid'] = bridge['properties']['guid'] damage_result['guid'] = bridge['properties']['guid'] damage_result["retrofit"] = None damage_result["retrocost"] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None damage_result['spans'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results
def building_damage_analysis_bulk_input(self, buildings, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = dict() fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key) bldg_results = [] list_buildings = buildings buildings = dict() # Converting list of buildings into a dictionary for ease of reference for b in list_buildings: buildings[b["id"]] = b list_buildings = None # Clear as it's not needed anymore grouped_buildings = AnalysisUtil.group_by_demand_type(buildings, fragility_sets, hazard_type, is_building=True) for demand, grouped_bldgs in grouped_buildings.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once bldg_chunks = list(AnalysisUtil.chunks( grouped_bldgs, 50)) # TODO: Move to globals? for bldgs in bldg_chunks: points = [] for bldg_id in bldgs: location = GeoUtil.get_location(buildings[bldg_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'hurricane': # TODO implement hurricane print("hurricane not yet implemented") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for bldg_id in bldgs: bldg_result = collections.OrderedDict() building = buildings[bldg_id] hazard_val = hazard_vals[i]['hazardValue'] output_demand_type = hazard_vals[i]['demand'] if hazard_type == 'earthquake': period = float(hazard_vals[i]['period']) if period > 0: output_demand_type = str( hazard_vals[i] ['period']) + " " + output_demand_type num_stories = building['properties']['no_stories'] selected_fragility_set = fragility_sets[bldg_id] building_period = selected_fragility_set.fragility_curves[ 0].get_building_period(num_stories) dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, building_period) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) bldg_result['guid'] = building['properties']['guid'] bldg_result.update(dmg_probability) bldg_result.update(dmg_interval) bldg_result['demandtype'] = output_demand_type bldg_result['demandunits'] = input_demand_units bldg_result['hazardval'] = hazard_val bldg_results.append(bldg_result) del buildings[bldg_id] i = i + 1 unmapped_hazard_val = 0.0 unmapped_output_demand_type = "None" unmapped_output_demand_unit = "None" for unmapped_bldg_id, unmapped_bldg in buildings.items(): unmapped_bldg_result = collections.OrderedDict() unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid'] unmapped_bldg_result['demandtype'] = unmapped_output_demand_type unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit unmapped_bldg_result['hazardval'] = unmapped_hazard_val bldg_results.append(unmapped_bldg_result) return bldg_results
def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): Multiple pipelines from pipeline dataset. hazard_type (str): Hazard type (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. Returns: dict: An ordered dictionaries with pipeline damage values. dict: An ordered dictionaries with other pipeline data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" if hazard_type == 'tsunami' else "pgv" self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) values_payload = [] unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types( pipeline, fragility_set, hazard_type) units = fragility_sets[pipeline["id"]].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) else: unmapped_pipelines.append(pipeline) # not needed anymore as they are already split into mapped and unmapped del pipelines if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'flood': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) pipeline_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): limit_states = dict() dmg_intervals = dict() pipeline_result = dict() fragility_set = fragility_sets[pipeline["id"]] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility haz_vals = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) demand_types = hazard_vals[i]["demands"] demand_units = hazard_vals[i]["units"] # construct hazard_value dictionary {"demand_type":"hazard_value", ...} hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = haz_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) limit_states = fragility_set.calculate_limit_state( hval_dict, inventory_type="pipeline", **pipeline_args) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type="pipeline") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") pipeline_result['guid'] = pipeline['properties']['guid'] pipeline_result.update(limit_states) pipeline_result.update(dmg_intervals) pipeline_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( haz_vals, hazard_type) damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = haz_vals pipeline_results.append(pipeline_result) damage_results.append(damage_result) # for pipeline does not have matching fragility curves, default to None for pipeline in unmapped_pipelines: pipeline_result = dict() damage_result = dict() pipeline_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None pipeline_results.append(pipeline_result) damage_results.append(damage_result) return pipeline_results, damage_results