class BridgeDamage(BaseAnalysis): """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(BridgeDamage, self).__init__(incore_client) def run(self): """Executes bridge damage analysis.""" # Bridge dataset bridge_set = self.get_input_dataset("bridges").get_inventory_reader() # Get hazard input hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len( bridge_set), user_defined_cpu) avg_bulk_input_size = int(len(bridge_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bridge_set) while count < len(inventory_list): inventory_args.append( inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.bridge_damage_concurrent_future( self.bridge_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def bridge_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) bridge_results = [] list_bridges = bridges # Converting list of bridges into a dictionary for ease of reference bridges = dict() for br in list_bridges: bridges[br["id"]] = br list_bridges = None # Clear as it's not needed anymore processed_bridges = [] grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set) for demand, grouped_brs in grouped_bridges.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50)) # TODO: Move to globals? for brs in br_chunks: points = [] for br_id in brs: location = GeoUtil.get_location(bridges[br_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == "earthquake": hazard_vals = \ self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tsunami": hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tornado": hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == "hurricane": hazard_vals = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for br_id in brs: bridge_result = collections.OrderedDict() bridge = bridges[br_id] selected_fragility_set = fragility_set[br_id] hazard_val = hazard_vals[i]['hazardValue'] hazard_std_dev = 0.0 if use_hazard_uncertainty: # TODO Get this from API once implemented raise ValueError("Uncertainty Not Implemented!") adjusted_fragility_set = copy.deepcopy(selected_fragility_set) if use_liquefaction and 'liq' in bridge['properties']: for fragility in adjusted_fragility_set.fragility_curves: fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq']) dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability) bridge_result['guid'] = bridge['properties']['guid'] bridge_result.update(dmg_probability) bridge_result.update(dmg_intervals) bridge_result["retrofit"] = retrofit_type bridge_result["retrocost"] = retrofit_cost bridge_result["demandtype"] = input_demand_type bridge_result["demandunits"] = input_demand_units bridge_result["hazardtype"] = hazard_type bridge_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"]["spans"] \ is not None and bridge["properties"]["spans"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["spans"]) elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \ is not None and bridge["properties"]["SPANS"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["SPANS"]) else: bridge_result['spans'] = 1 bridge_results.append(bridge_result) processed_bridges.append(br_id) # remove processed bridges i = i + 1 unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0} unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability) for br_id, br in bridges.items(): if br_id not in processed_bridges: unmapped_br_result = collections.OrderedDict() unmapped_br_result['guid'] = br['properties']['guid'] unmapped_br_result.update(unmapped_dmg_probability) unmapped_br_result.update(unmapped_dmg_intervals) unmapped_br_result["retrofit"] = "Non-Retrofit" unmapped_br_result["retrocost"] = 0.0 unmapped_br_result["demandtype"] = "None" unmapped_br_result['demandunits'] = "None" unmapped_br_result["hazardtype"] = "None" unmapped_br_result['hazardval'] = 0.0 bridge_results.append(unmapped_br_result) return bridge_results def get_spec(self): """Get specifications of the bridge damage analysis. Returns: obj: A JSON object of specifications of the bridge damage analysis. """ return { 'name': 'bridge-damage', 'description': 'bridge damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'bridges', 'required': True, 'description': 'Bridge Inventory', 'type': ['ergo:bridges'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'bridges', 'description': 'CSV file of bridge structural damage', 'type': 'ergo:bridgeDamage' } ] }
class PipelineDamageRepairRate(BaseAnalysis): """Computes pipeline damage for a hazard. Args: incore_client: Service client with authentication info """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(PipelineDamageRepairRate, self).__init__(incore_client) def run(self): """Execute pipeline damage analysis """ # Pipeline dataset pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with pipeline damage values and other data/metadata. """ result = [] # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) for pipeline in pipelines: if pipeline["id"] in fragility_sets.keys(): liq_fragility_set = None # Check if mapping contains liquefaction fragility if geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: liq_fragility_set = fragility_sets_liq[pipeline["id"]] result.append( self.pipeline_damage_analysis( pipeline, hazard_type, fragility_sets[pipeline["id"]], liq_fragility_set, hazard_dataset_id, geology_dataset_id, use_liquefaction)) return result def pipeline_damage_analysis(self, pipeline, hazard_type, fragility_set, fragility_set_liq, hazard_dataset_id, geology_dataset_id, use_liquefaction): """Run pipeline damage for a single pipeline. Args: pipeline (obj): a single pipeline. hazard_type (str): hazard type. fragility_set (obj): A JSON description of fragility assigned to the building. fragility_set_liq (obj): A JSON description of fragility assigned to the building with liqufaction. hazard_dataset_id (str): A hazard dataset to use. geology_dataset_id (str): A dataset id for geology dataset for liqufaction. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: OrderedDict: A dictionary with pipeline damage values and other data/metadata. """ pipeline_results = collections.OrderedDict() pgv_repairs = 0.0 pgd_repairs = 0.0 liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 if fragility_set is not None: demand_type = fragility_set.demand_type.lower() demand_units = fragility_set.demand_units location = GeoUtil.get_location(pipeline) point = str(location.y) + "," + str(location.x) if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tornado': hazard_resp = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, demand_units, [point]) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, demand_type, demand_units, [point]) else: raise ValueError("Hazard type are not currently supported.") hazard_val = hazard_resp[0]['hazardValue'] if hazard_val <= 0.0: hazard_val = 0.0 diameter = PipelineUtil.get_pipe_diameter(pipeline) fragility_vars = {'x': hazard_val, 'y': diameter} fragility_curve = fragility_set.fragility_curves[0] # TODO: here assume that custom fragility set only has one limit state pgv_repairs = fragility_set.calculate_custom_limit_state( fragility_vars)['failure'] # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.description, pgv_repairs) if use_liquefaction is True and fragility_set_liq is not None and geology_dataset_id is not None: liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_type = fragility_set_liq.demand_type pgd_demand_units = fragility_set_liq.demand_units # Get PGD hazard value from hazard service location_str = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, pgd_demand_units, [location_str]) liq_hazard_val = liquefaction[0]['pgd'] liquefaction_prob = liquefaction[0]['liqProbability'] liq_fragility_vars = { 'x': liq_hazard_val, 'y': liquefaction_prob } pgd_repairs = liq_fragility_curve.compute_custom_limit_state_probability( liq_fragility_vars) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.description, pgd_repairs) total_repair_rate = pgd_repairs + pgv_repairs break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs length = PipelineUtil.get_pipe_length(pipeline) failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_pgd_repairs = pgd_repairs * length num_pgv_repairs = pgv_repairs * length num_repairs = num_pgd_repairs + num_pgv_repairs pipeline_results['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: pipeline_results['pipeclass'] = "" pipeline_results['pgvrepairs'] = pgv_repairs pipeline_results['pgdrepairs'] = pgd_repairs pipeline_results['repairspkm'] = total_repair_rate pipeline_results['breakrate'] = break_rate pipeline_results['leakrate'] = leak_rate pipeline_results['failprob'] = failure_probability pipeline_results['demandtype'] = demand_type pipeline_results['hazardtype'] = hazard_type pipeline_results['hazardval'] = hazard_val pipeline_results['liqhaztype'] = liq_hazard_type pipeline_results['liqhazval'] = liq_hazard_val pipeline_results['liqprobability'] = liquefaction_prob pipeline_results['numpgvrpr'] = num_pgv_repairs pipeline_results['numpgdrpr'] = num_pgd_repairs pipeline_results['numrepairs'] = num_repairs return pipeline_results def get_spec(self): """Get specifications of the pipeline damage analysis. Returns: obj: A JSON object of specifications of the pipeline damage analysis. """ return { 'name': 'pipeline-damage', 'description': 'buried pipeline damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Geology dataset id', 'type': str, }], 'input_datasets': [{ 'id': 'pipeline', 'required': True, 'description': 'Pipeline Inventory', 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'pipeline', 'type': 'ergo:pipelineDamage' }] }