class PipelineDamageRepairRate(BaseAnalysis): """Computes pipeline damage for a hazard. Args: incore_client: Service client with authentication info """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(PipelineDamageRepairRate, self).__init__(incore_client) def run(self): """Execute pipeline damage analysis """ # Pipeline dataset pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata. damage_results (list): A list of ordered dictionaries with pipeline damage metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") fragility_sets_liq = None if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) values_payload = [] values_payload_liq = [] # for liquefaction if used unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) # Check if liquefaction is applicable if use_liquefaction and \ geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[pipeline["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_pipelines.append(pipeline) del pipelines if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if use_liquefaction is True and \ fragility_sets_liq is not None and \ geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # calculate LS and DS ds_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): # default pgv_repairs = None pgd_repairs = 0.0 total_repair_rate = None break_rate = None leak_rate = None failure_probability = None num_pgv_repairs = None num_pgd_repairs = 0.0 num_repairs = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None ds_result = dict() damage_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] fragility_set = fragility_sets[pipeline["id"]] # TODO assume there is only one curve fragility_curve = fragility_set.fragility_curves[0] hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) pgv_repairs = \ fragility_curve.solve_curve_expression( hval_dict, fragility_set.curve_parameters, **pipeline_args) # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.return_type["unit"], pgv_repairs) length = PipelineUtil.get_pipe_length(pipeline) # Number of PGV repairs num_pgv_repairs = pgv_repairs * length # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None \ and liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[pipeline["id"]] # TODO assume there is only one curve liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability pipeline_args = fragility_set_liq.construct_expression_args_from_inventory( pipeline) pgd_repairs = \ liq_fragility_curve.solve_curve_expression( liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.return_type["unit"], pgd_repairs) num_pgd_repairs = pgd_repairs * length # record results if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs total_repair_rate = pgd_repairs + pgv_repairs failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_repairs = num_pgd_repairs + num_pgv_repairs ds_result['pgvrepairs'] = pgv_repairs ds_result['pgdrepairs'] = pgd_repairs ds_result['repairspkm'] = total_repair_rate ds_result['breakrate'] = break_rate ds_result['leakrate'] = leak_rate ds_result['failprob'] = failure_probability ds_result['numpgvrpr'] = num_pgv_repairs ds_result['numpgdrpr'] = num_pgd_repairs ds_result['numrepairs'] = num_repairs ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = hazard_vals # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None: damage_result['liq_fragility_id'] = fragility_sets_liq[ pipeline["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) # pipelines do not have matched mappings for pipeline in unmapped_pipelines: ds_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties']['pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardval'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqhazval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the pipeline damage analysis. Returns: obj: A JSON object of specifications of the pipeline damage analysis. """ return { 'name': 'pipeline-damage', 'description': 'buried pipeline damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Geology dataset id', 'type': str, }], 'input_datasets': [{ 'id': 'pipeline', 'required': True, 'description': 'Pipeline Inventory', 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'pipeline', 'type': 'ergo:pipelineDamageVer3' }, { 'id': 'metadata', 'parent_type': 'pipeline', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:pipelineDamageSupplement' }] }
class RoadDamage(BaseAnalysis): """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(RoadDamage, self).__init__(incore_client) def run(self): """Executes road damage analysis.""" # Road dataset road_set = self.get_input_dataset("roads").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Get hazard type hazard_type = self.get_parameter("hazard_type") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset for liquefaction geology_dataset_id = None if self.get_parameter("liquefaction_geology_dataset_id") is not None: geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") user_defined_cpu = 1 if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu) avg_bulk_input_size = int(len(road_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(road_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(geology_dataset_id), repeat(fragility_key), repeat(use_liquefaction)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def road_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: output_ds: A list of ordered dictionaries with road damage values output_dmg: A list of ordered dictionaries with other road data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. list: A list of ordered dictionaries with other road data/metadata. """ fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) values_payload = [] mapped_roads = [] unmapped_roads = [] pgd_flag = True # for liquefaction liquefaction_resp = None for road in roads: if road["id"] in fragility_sets.keys(): fragility_set = fragility_sets[road["id"]] location = GeoUtil.get_location(road) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types # for liquefaction if any(demand.lower() != 'pgd' for demand in demands): pgd_flag = False units = fragility_set.demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_roads.append(road) else: unmapped_roads.append(road) del roads # get hazard and liquefaction values if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) if pgd_flag and use_liquefaction and geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") # calculate LS and DS ds_results = [] damage_results = [] for i, road in enumerate(mapped_roads): dmg_probability = dict() dmg_interval = dict() demand_types_liq = None demand_units_liq = None liq_hazard_vals = None liquefaction_prob = None selected_fragility_set = fragility_sets[road["id"]] hazard_std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(selected_fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): road_args = selected_fragility_set.construct_expression_args_from_inventory(road) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='road', **road_args) # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if liquefaction_resp is not None and len(liquefaction_resp) > 0: liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) demand_types_liq = liquefaction_resp[i]['demands'] demand_units_liq = liquefaction_resp[i]['units'] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] dmg_probability = selected_fragility_set.calculate_limit_state( liq_hval_dict, inventory_type='road', **road_args) dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="road") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result = dict() ds_result['guid'] = road['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals damage_result['liqdemandtypes'] = demand_types_liq damage_result['liqdemandunits'] = demand_units_liq damage_result['liqhazvals'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob ds_results.append(ds_result) damage_results.append(damage_result) for road in unmapped_roads: ds_result = dict() damage_result = dict() ds_result['guid'] = road['properties']['guid'] damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazvals'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the road damage analysis. Returns: obj: A JSON object of specifications of the road damage analysis. """ return { 'name': 'road-damage', 'description': 'road damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'roads', 'required': True, 'description': 'Road Inventory', 'type': ['ergo:roadLinkTopo', 'incore:roads', 'ergo:roadLinkTopoVer2'] }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'roads', 'description': 'CSV file of road structural damage', 'type': 'ergo:roadDamageVer3' }, { 'id': 'metadata', 'parent_type': 'roads', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:roadDamageSupplement' } ] }
class WaterFacilityDamage(BaseAnalysis): """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure. """ DEFAULT_EQ_FRAGILITY_KEY = "pga" DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code" DEFAULT_LIQ_FRAGILITY_KEY = "pgd" def __init__(self, incore_client): # Create Hazard and Fragility service self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(WaterFacilityDamage, self).__init__(incore_client) def run(self): """Performs Water facility damage analysis by using the parameters from the spec and creates an output dataset in csv format Returns: bool: True if successful, False otherwise """ # Facility dataset inventory_set = self.get_input_dataset( "water_facilities").get_inventory_reader() # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(inventory_set), user_defined_cpu) avg_bulk_input_size = int(len(inventory_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(inventory_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.waterfacility_damage_concurrent_futures( self.waterfacilityset_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def waterfacility_damage_concurrent_futures(self, function_name, parallel_processes, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallel_processes (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=parallel_processes) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, hazard_dataset_id): """Gets applicable fragilities and calculates damage Args: facilities (list): Multiple water facilities from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ # Liquefaction related variables use_liquefaction = False liquefaction_available = False fragility_sets_liq = None liquefaction_resp = None geology_dataset_id = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None loc = None # Obtain the fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: if hazard_type == 'tsunami': fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY elif hazard_type == 'earthquake': fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported." ) self.set_parameter("fragility_key", fragility_key) # Obtain the fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) # Obtain the liquefaction fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake": if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True # Determine whether to use hazard uncertainty uncertainty = self.get_parameter("use_hazard_uncertainty") # Setup fragility translation structures values_payload = [] values_payload_liq = [] unmapped_waterfacilities = [] mapped_waterfacilities = [] for facility in facilities: if facility["id"] in fragility_sets.keys(): # Fill in generic details fragility_set = fragility_sets[facility["id"]] location = GeoUtil.get_location(facility) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_waterfacilities.append(facility) # Fill in liquefaction parameters if liquefaction_available and facility[ "id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[facility["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_waterfacilities.append(facility) del facilities if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # Calculate LS and DS facility_results = [] damage_results = [] for i, facility in enumerate(mapped_waterfacilities): fragility_set = fragility_sets[facility["id"]] limit_states = dict() dmg_intervals = dict() # Setup conditions for the analysis hazard_std_dev = 0 if uncertainty: hazard_std_dev = random.random() if isinstance(fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): facility_args = fragility_set.construct_expression_args_from_inventory( facility) limit_states = \ fragility_set.calculate_limit_state(hval_dict, std_dev=hazard_std_dev, inventory_type='water_facility', **facility_args) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate( fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( facility) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='water_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # TODO: ideally, this goes into a single variable declaration section facility_result = { 'guid': facility['properties']['guid'], **limit_states, **dmg_intervals } facility_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals if use_liquefaction and fragility_sets_liq and geology_dataset_id: damage_result['liq_fragility_id'] = fragility_sets_liq[ facility["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) for facility in unmapped_waterfacilities: facility_result = dict() damage_result = dict() facility_result['guid'] = facility['properties']['guid'] damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) return facility_results, damage_results def get_spec(self): return { 'name': 'water-facility-damage', 'description': 'water facility damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': False, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'water_facilities', 'required': True, 'description': 'Water Facility Inventory', 'type': ['ergo:waterFacilityTopo'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'water_facilities', 'description': 'A csv file with limit state probabilities and damage states ' 'for each water facility', 'type': 'ergo:waterFacilityDamageVer6' }, { 'id': 'metadata', 'parent_type': 'water_facilities', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:waterFacilityDamageSupplement' }] }
class BridgeDamage(BaseAnalysis): """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(BridgeDamage, self).__init__(incore_client) def run(self): """Executes bridge damage analysis.""" # Bridge dataset bridge_set = self.get_input_dataset("bridges").get_inventory_reader() # Get hazard input hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len( bridge_set), user_defined_cpu) avg_bulk_input_size = int(len(bridge_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bridge_set) while count < len(inventory_list): inventory_args.append( inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.bridge_damage_concurrent_future( self.bridge_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def bridge_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) bridge_results = [] list_bridges = bridges # Converting list of bridges into a dictionary for ease of reference bridges = dict() for br in list_bridges: bridges[br["id"]] = br list_bridges = None # Clear as it's not needed anymore processed_bridges = [] grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set) for demand, grouped_brs in grouped_bridges.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50)) # TODO: Move to globals? for brs in br_chunks: points = [] for br_id in brs: location = GeoUtil.get_location(bridges[br_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == "earthquake": hazard_vals = \ self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tsunami": hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == "tornado": hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == "hurricane": hazard_vals = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for br_id in brs: bridge_result = collections.OrderedDict() bridge = bridges[br_id] selected_fragility_set = fragility_set[br_id] hazard_val = hazard_vals[i]['hazardValue'] hazard_std_dev = 0.0 if use_hazard_uncertainty: # TODO Get this from API once implemented raise ValueError("Uncertainty Not Implemented!") adjusted_fragility_set = copy.deepcopy(selected_fragility_set) if use_liquefaction and 'liq' in bridge['properties']: for fragility in adjusted_fragility_set.fragility_curves: fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq']) dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability) bridge_result['guid'] = bridge['properties']['guid'] bridge_result.update(dmg_probability) bridge_result.update(dmg_intervals) bridge_result["retrofit"] = retrofit_type bridge_result["retrocost"] = retrofit_cost bridge_result["demandtype"] = input_demand_type bridge_result["demandunits"] = input_demand_units bridge_result["hazardtype"] = hazard_type bridge_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"]["spans"] \ is not None and bridge["properties"]["spans"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["spans"]) elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \ is not None and bridge["properties"]["SPANS"].isdigit(): bridge_result['spans'] = int(bridge["properties"]["SPANS"]) else: bridge_result['spans'] = 1 bridge_results.append(bridge_result) processed_bridges.append(br_id) # remove processed bridges i = i + 1 unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0} unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability) for br_id, br in bridges.items(): if br_id not in processed_bridges: unmapped_br_result = collections.OrderedDict() unmapped_br_result['guid'] = br['properties']['guid'] unmapped_br_result.update(unmapped_dmg_probability) unmapped_br_result.update(unmapped_dmg_intervals) unmapped_br_result["retrofit"] = "Non-Retrofit" unmapped_br_result["retrocost"] = 0.0 unmapped_br_result["demandtype"] = "None" unmapped_br_result['demandunits'] = "None" unmapped_br_result["hazardtype"] = "None" unmapped_br_result['hazardval'] = 0.0 bridge_results.append(unmapped_br_result) return bridge_results def get_spec(self): """Get specifications of the bridge damage analysis. Returns: obj: A JSON object of specifications of the bridge damage analysis. """ return { 'name': 'bridge-damage', 'description': 'bridge damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'bridges', 'required': True, 'description': 'Bridge Inventory', 'type': ['ergo:bridges'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'bridges', 'description': 'CSV file of bridge structural damage', 'type': 'ergo:bridgeDamage' } ] }
class EpfDamage(BaseAnalysis): """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_LIQ_FRAGILITY_KEY = "pgd" DEFAULT_FRAGILITY_KEY = "pga" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(EpfDamage, self).__init__(incore_client) def run(self): """Executes electric power facility damage analysis.""" epf_set = self.get_input_dataset("epfs").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type, note this is here for future use if additional hazards are supported by this analysis hazard_type = self.get_parameter("hazard_type") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") liq_geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(epf_set), user_defined_cpu) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.epf_damage_concurrent_future( self.epf_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(use_liquefaction), repeat(liq_geology_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def epf_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, use_hazard_uncertainty, use_liquefaction, liq_geology_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty (bool): Hazard uncertainty. True for using uncertainty when computing damage, False otherwise. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ result = [] fragility_key = self.get_parameter("fragility_key") fragility_set = dict() fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) epf_results = [] # Converting list of epfs into a dictionary for ease of reference list_epfs = epfs epfs = dict() for epf in list_epfs: epfs[epf["id"]] = epf del list_epfs # Clear as it's not needed anymore processed_epf = [] grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set) for demand, grouped_epf_items in grouped_epfs.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50)) for epf_chunk in epf_chunks: points = [] for epf_id in epf_chunk: location = GeoUtil.get_location(epfs[epf_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError( 'Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for epf_id in epf_chunk: epf_result = collections.OrderedDict() epf = epfs[epf_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") selected_fragility_set = fragility_set[epf_id] limit_states = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( limit_states) epf_result['guid'] = epf['properties']['guid'] epf_result.update(limit_states) epf_result.update(dmg_interval) epf_result['demandtype'] = input_demand_type epf_result['demandunits'] = input_demand_units epf_result['hazardtype'] = hazard_type epf_result['hazardval'] = hazard_val epf_results.append(epf_result) processed_epf.append(epf_id) i = i + 1 # when there is liquefaction, limit state need to be modified if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: liq_fragility_key = self.get_parameter( "liquefaction_fragility_key") if liq_fragility_key is None: liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY liq_fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liq_fragility_key) grouped_liq_epfs = AnalysisUtil.group_by_demand_type( epfs, liq_fragility_set) for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items(): liq_input_demand_type = liq_demand[0] liq_input_demand_units = liq_demand[1] # For every group of unique demand and demand unit, call the end-point once liq_epf_chunks = list( AnalysisUtil.chunks(grouped_liq_epf_items, 50)) for liq_epf_chunk in liq_epf_chunks: points = [] for liq_epf_id in liq_epf_chunk: location = GeoUtil.get_location(epfs[liq_epf_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction_vals = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, liq_input_demand_units, points) # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for liq_epf_id in liq_epf_chunk: liq_hazard_val = liquefaction_vals[i][ liq_input_demand_type] std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented!") liquefaction_prob = liquefaction_vals[i][ 'liqProbability'] selected_liq_fragility = liq_fragility_set[liq_epf_id] pgd_limit_states = selected_liq_fragility.calculate_limit_state( liq_hazard_val, std_dev=std_dev) # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit # states and dmg_interval for epf_result in epf_results: if epf_result['guid'] == epfs[liq_epf_id]['guid']: limit_states = { "ls-slight": epf_result['ls-slight'], "ls-moderat": epf_result['ls-moderat'], "ls-extensi": epf_result['ls-extensi'], "ls-complet": epf_result['ls-complet'] } liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) liq_dmg_interval = AnalysisUtil.calculate_damage_interval( liq_limit_states) epf_result.update(liq_limit_states) epf_result.update(liq_dmg_interval) epf_result[ 'liqhaztype'] = liq_input_demand_type epf_result['liqhazval'] = liq_hazard_val epf_result[ 'liqprobability'] = liquefaction_prob i = i + 1 unmapped_limit_states = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_limit_states) for epf_id, epf in epfs.items(): if epf_id not in processed_epf: unmapped_epf_result = collections.OrderedDict() unmapped_epf_result['guid'] = epf['properties']['guid'] unmapped_epf_result.update(unmapped_limit_states) unmapped_epf_result.update(unmapped_dmg_intervals) unmapped_epf_result["demandtype"] = "None" unmapped_epf_result['demandunits'] = "None" unmapped_epf_result["hazardtype"] = "None" unmapped_epf_result['hazardval'] = 0.0 unmapped_epf_result['liqhaztype'] = "NA" unmapped_epf_result['liqhazval'] = "NA" unmapped_epf_result['liqprobability'] = "NA" epf_results.append(unmapped_epf_result) return epf_results def get_spec(self): """Get specifications of the epf damage analysis. Returns: obj: A JSON object of specifications of the epf damage analysis. """ return { 'name': 'epf-damage', 'description': 'Electric Power Facility damage analysis.', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'A name of the resulting dataset', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard type (e.g. earthquake).', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID which defines the particular hazard (e.g. New madrid earthquake ' 'using Atkinson Boore 1995).', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset ()', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use a ground liquifacition to modify damage interval.', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request.', 'type': int }, ], 'input_datasets': [{ 'id': 'epfs', 'required': True, 'description': 'Electric Power Facility Inventory', 'type': ['incore:epf', 'ergo:epf'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'epfs', 'type': 'incore:epfDamage' }] }
class RoadFailure(BaseAnalysis): """Computes road damage by hurricane inundation. Args: incore_client: Service client with authentication info """ DEFAULT_HURRICANE_FRAGILITY_KEY = "inundationDuration" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(RoadFailure, self).__init__(incore_client) def run(self): """Execute road damage analysis """ # road dataset road_dataset = self.get_input_dataset("roads").get_inventory_reader() # distance to shore table data frame distance_df = self.get_input_dataset( "distance_table").get_dataframe_from_csv() # TODO this has to be changed when semantic service lanuched based on it # set distance field name in the table distance_field_name = "distance" # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(road_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(road_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.road_damage_concurrent_future( self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(distance_df), repeat(distance_field_name), repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def road_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def road_damage_analysis_bulk_input(self, roads, distance_df, distance_field_name, hazard_type, hazard_dataset_id): """Run road damage analysis by hurricane inundation. Args: roads (list): multiple roads from road dataset. distance_df (object): data frame for distance to shore table distance_field_name (str): field name representing the distance to shore hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with failure probability of road and other data/metadata. """ result = [] # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_HURRICANE_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) for road in roads: if road["id"] in fragility_sets.keys(): # find out distance value distance = float( distance_df.loc[distance_df['guid'] == road['properties'] ["guid"]][distance_field_name]) result.append( self.road_damage_analysis(road, distance, hazard_type, fragility_sets[road["id"]], hazard_dataset_id)) return result def road_damage_analysis(self, road, distance, hazard_type, fragility_set, hazard_dataset_id): """Run road damage for a single road segment. Args: road (obj): a single road feature. distance (float): distance to shore from the road hazard_type (str): hazard type. fragility_set (obj): A JSON description of fragility assigned to the road. hazard_dataset_id (str): A hazard dataset to use. Returns: OrderedDict: A dictionary with probability of failure values and other data/metadata. """ road_results = collections.OrderedDict() if fragility_set is not None: demand_type = fragility_set.demand_type.lower() demand_units = fragility_set.demand_units location = GeoUtil.get_location(road) point = str(location.y) + "," + str(location.x) if hazard_type == 'hurricane': hazard_resp = self.hazardsvc.get_hurricane_values( hazard_dataset_id, "inundationDuration", demand_units, [point]) else: raise ValueError("Hazard type are not currently supported.") dur_q = hazard_resp[0]['hazardValue'] if dur_q <= 0.0: dur_q = 0.0 fragility_vars = {'x': dur_q, 'y': distance} pf = fragility_set.calculate_custom_limit_state( fragility_vars)['failure'] road_results['guid'] = road['properties']['guid'] road_results['failprob'] = pf road_results['demandtype'] = demand_type road_results['demandunits'] = demand_units road_results['hazardtype'] = hazard_type road_results['hazardval'] = dur_q return road_results def get_spec(self): """Get specifications of the road damage analysis. Returns: obj: A JSON object of specifications of the road damage analysis. """ return { 'name': 'road-damage', 'description': 'road damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }], 'input_datasets': [{ 'id': 'roads', 'required': True, 'description': 'Road Inventory', 'type': ['ergo:roadLinkTopo', 'ergo:roads'], }, { 'id': 'distance_table', 'required': True, 'description': 'Distance to Shore Table', 'type': ['incore:distanceToShore'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'roads', 'type': 'incore:roadFailure' }] }
class BuildingDamage(BaseAnalysis): """Building Damage Analysis calculates the probability of building damage based on different hazard type such as earthquake, tsunami, and tornado. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(BuildingDamage, self).__init__(incore_client) def run(self): """Executes building damage analysis.""" # Building dataset bldg_set = self.get_input_dataset("buildings").get_inventory_reader() # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BuildingUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(bldg_set), user_defined_cpu) avg_bulk_input_size = int(len(bldg_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bldg_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.building_damage_concurrent_future( self.building_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def building_damage_concurrent_future(self, function_name, parallelism, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallelism (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=parallelism) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def building_damage_analysis_bulk_input(self, buildings, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = dict() fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key) bldg_results = [] list_buildings = buildings buildings = dict() # Converting list of buildings into a dictionary for ease of reference for b in list_buildings: buildings[b["id"]] = b list_buildings = None # Clear as it's not needed anymore grouped_buildings = AnalysisUtil.group_by_demand_type(buildings, fragility_sets, hazard_type, is_building=True) for demand, grouped_bldgs in grouped_buildings.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once bldg_chunks = list(AnalysisUtil.chunks( grouped_bldgs, 50)) # TODO: Move to globals? for bldgs in bldg_chunks: points = [] for bldg_id in bldgs: location = GeoUtil.get_location(buildings[bldg_id]) points.append(str(location.y) + "," + str(location.x)) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, input_demand_units, points) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) elif hazard_type == 'hurricane': # TODO implement hurricane print("hurricane not yet implemented") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for bldg_id in bldgs: bldg_result = collections.OrderedDict() building = buildings[bldg_id] hazard_val = hazard_vals[i]['hazardValue'] output_demand_type = hazard_vals[i]['demand'] if hazard_type == 'earthquake': period = float(hazard_vals[i]['period']) if period > 0: output_demand_type = str( hazard_vals[i] ['period']) + " " + output_demand_type num_stories = building['properties']['no_stories'] selected_fragility_set = fragility_sets[bldg_id] building_period = selected_fragility_set.fragility_curves[ 0].get_building_period(num_stories) dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, building_period) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) bldg_result['guid'] = building['properties']['guid'] bldg_result.update(dmg_probability) bldg_result.update(dmg_interval) bldg_result['demandtype'] = output_demand_type bldg_result['demandunits'] = input_demand_units bldg_result['hazardval'] = hazard_val bldg_results.append(bldg_result) del buildings[bldg_id] i = i + 1 unmapped_hazard_val = 0.0 unmapped_output_demand_type = "None" unmapped_output_demand_unit = "None" for unmapped_bldg_id, unmapped_bldg in buildings.items(): unmapped_bldg_result = collections.OrderedDict() unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid'] unmapped_bldg_result['demandtype'] = unmapped_output_demand_type unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit unmapped_bldg_result['hazardval'] = unmapped_hazard_val bldg_results.append(unmapped_bldg_result) return bldg_results def get_spec(self): """Get specifications of the building damage analysis. Returns: obj: A JSON object of specifications of the building damage analysis. """ return { 'name': 'building-damage', 'description': 'building damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'buildings', 'required': True, 'description': 'Building Inventory', 'type': [ 'ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6' ], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'buildings', 'description': 'CSV file of building structural damage', 'type': 'ergo:buildingDamageVer4' }] }
class BuildingDamage(BaseAnalysis): """Building Damage Analysis calculates the probability of building damage based on different hazard type such as earthquake, tsunami, and tornado. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(BuildingDamage, self).__init__(incore_client) def run(self): """Executes building damage analysis.""" # Building dataset bldg_set = self.get_input_dataset("buildings").get_inventory_reader() # building retrofit strategy retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") if retrofit_strategy_dataset is not None: retrofit_strategy = list(retrofit_strategy_dataset.get_csv_reader()) else: retrofit_strategy = None # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BuildingUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu) avg_bulk_input_size = int(len(bldg_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bldg_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, num_workers, inventory_args, repeat(retrofit_strategy), repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("damage_result", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def building_damage_concurrent_future(self, function_name, parallelism, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallelism (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional hazard_type (str): Hazard type, either earthquake, tornado, or tsunami. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ fragility_key = self.get_parameter("fragility_key") fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key, retrofit_strategy) values_payload = [] unmapped_buildings = [] mapped_buildings = [] for b in buildings: bldg_id = b["id"] if bldg_id in fragility_sets: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types(b, fragility_sets[bldg_id], hazard_type) units = fragility_sets[bldg_id].demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_buildings.append(b) else: unmapped_buildings.append(b) # not needed anymore as they are already split into mapped and unmapped del buildings if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload, self.get_parameter('seed')) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") ds_results = [] damage_results = [] i = 0 for b in mapped_buildings: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_interval = dict() b_id = b["id"] selected_fragility_set = fragility_sets[b_id] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) b_demands = hazard_vals[i]["demands"] b_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 # To calculate damage, use demand type name from fragility that will be used in the expression, instead # of using what the hazard service returns. There could be a difference "SA" in DFR3 vs "1.07 SA" # from hazard for d in selected_fragility_set.demand_types: hval_dict[d] = b_haz_vals[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): building_args = selected_fragility_set.construct_expression_args_from_inventory(b) building_period = selected_fragility_set.fragility_curves[0].get_building_period( selected_fragility_set.curve_parameters, **building_args) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, **building_args, period=building_period) dmg_interval = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="building") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(b_haz_vals, hazard_type) damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtype'] = b_demands damage_result['demandunits'] = b_units damage_result['hazardval'] = b_haz_vals ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for b in unmapped_buildings: ds_result = dict() damage_result = dict() ds_result['guid'] = b['properties']['guid'] damage_result['guid'] = b['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtype'] = None damage_result['demandunits'] = None damage_result['hazardval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the building damage analysis. Returns: obj: A JSON object of specifications of the building damage analysis. """ return { 'name': 'building-damage', 'description': 'building damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'seed', 'required': False, 'description': 'Initial seed for the tornado hazard value', 'type': int } ], 'input_datasets': [ { 'id': 'buildings', 'required': True, 'description': 'Building Inventory', 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }, { 'id': 'retrofit_strategy', 'required': False, 'description': 'Building retrofit strategy that contains guid and retrofit method', 'type': ['incore:retrofitStrategy'] } ], 'output_datasets': [ { 'id': 'ds_result', 'parent_type': 'buildings', 'description': 'CSV file of damage states for building structural damage', 'type': 'ergo:buildingDamageVer6' }, { 'id': 'damage_result', 'parent_type': 'buildings', 'description': 'Json file with information about applied hazard value and fragility', 'type': 'incore:buildingDamageSupplement' } ] }
class BridgeDamage(BaseAnalysis): """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(BridgeDamage, self).__init__(incore_client) def run(self): """Executes bridge damage analysis.""" # Bridge dataset bridge_set = self.get_input_dataset("bridges").get_inventory_reader() # Get hazard input hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(bridge_set), user_defined_cpu) avg_bulk_input_size = int(len(bridge_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bridge_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.bridge_damage_concurrent_future( self.bridge_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def bridge_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, hazard_dataset_id): """Run analysis for multiple bridges. Args: bridges (list): Multiple bridges from input inventory set. hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane. hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with bridge damage values and other data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ BridgeUtil.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Hazard Uncertainty use_hazard_uncertainty = False if hazard_type == "earthquake" and self.get_parameter( "use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key) values_payload = [] unmapped_bridges = [] mapped_bridges = [] for b in bridges: bridge_id = b["id"] if bridge_id in fragility_set: location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) demands = fragility_set[bridge_id].demand_types units = fragility_set[bridge_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_bridges.append(b) else: unmapped_bridges.append(b) # not needed anymore as they are already split into mapped and unmapped del bridges if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_vals = self.hazardsvc.post_hurricane_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'flood': hazard_vals = self.hazardsvc.post_flood_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) ds_results = [] damage_results = [] i = 0 for bridge in mapped_bridges: ds_result = dict() damage_result = dict() dmg_probability = dict() dmg_intervals = dict() selected_fragility_set = fragility_set[bridge["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): bridge_args = selected_fragility_set.construct_expression_args_from_inventory( bridge) dmg_probability = \ selected_fragility_set.calculate_limit_state(hval_dict, inventory_type="bridge", **bridge_args) dmg_intervals = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="bridge") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) ds_result['guid'] = bridge['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_intervals) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = bridge['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["retrofit"] = retrofit_type damage_result["retrocost"] = retrofit_cost damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardval"] = hazard_val # add spans to bridge output so mean damage calculation can use that info if "spans" in bridge["properties"] and bridge["properties"][ "spans"] is not None: if isinstance(bridge["properties"]["spans"], str) and bridge["properties"]["spans"].isdigit(): damage_result['spans'] = int(bridge["properties"]["spans"]) elif isinstance(bridge["properties"]["spans"], int): damage_result['spans'] = bridge["properties"]["spans"] elif "SPANS" in bridge["properties"] and bridge["properties"][ "SPANS"] is not None: if isinstance(bridge["properties"]["SPANS"], str) and bridge["properties"]["SPANS"].isdigit(): damage_result['SPANS'] = int(bridge["properties"]["SPANS"]) elif isinstance(bridge["properties"]["SPANS"], int): damage_result['SPANS'] = bridge["properties"]["SPANS"] else: damage_result['spans'] = 1 ds_results.append(ds_result) damage_results.append(damage_result) i += 1 for bridge in unmapped_bridges: ds_result = dict() damage_result = dict() ds_result['guid'] = bridge['properties']['guid'] damage_result['guid'] = bridge['properties']['guid'] damage_result["retrofit"] = None damage_result["retrocost"] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None damage_result['spans'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the bridge damage analysis. Returns: obj: A JSON object of specifications of the bridge damage analysis. """ return { 'name': 'bridge-damage', 'description': 'bridge damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'bridges', 'required': True, 'description': 'Bridge Inventory', 'type': ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'bridges', 'description': 'CSV file of bridge structural damage', 'type': 'ergo:bridgeDamageVer3' }, { 'id': 'metadata', 'parent_type': 'bridges', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:bridgeDamageSupplement' }] }
class NonStructBuildingDamage(BaseAnalysis): """Computes non-structural structural building damage for an earthquake hazard. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(NonStructBuildingDamage, self).__init__(incore_client) def run(self): """Executes building damage analysis.""" # Building dataset building_set = self.get_input_dataset( "buildings").get_inventory_reader() # set Default Fragility key fragility_key_as = self.get_parameter("fragility_key_as") if fragility_key_as is None: self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) fragility_key_ds = self.get_parameter("fragility_key_ds") if fragility_key_ds is None: self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") if use_hazard_uncertainty is None: self.set_parameter("use_hazard_uncertainty", False) # Set Default Liquefaction use_liquefaction = self.get_parameter("use_liquefaction") if use_liquefaction is None: self.set_parameter("use_liquefaction", False) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(building_set), user_defined_cpu) avg_bulk_input_size = int(len(building_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(building_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.building_damage_concurrent_future( self.building_damage_analysis_bulk_input, num_workers, inventory_args) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("damage_result", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def building_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def building_damage_analysis_bulk_input(self, buildings): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ # read static parameters from object self hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") building_results = [] damage_results = [] fragility_sets_as = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_as")) fragility_sets_ds = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_ds")) values_payload_as = [] values_payload_ds = [] values_payload_liq = [] mapped_buildings = [] unmapped_buildings = [] for building in buildings: if building["id"] in fragility_sets_as and building[ "id"] in fragility_sets_ds: fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] location = GeoUtil.get_location(building) loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive demands_as = AnalysisUtil.get_hazard_demand_types( building, fragility_set_as, hazard_type) units_as = fragility_set_as.demand_units value_as = { "demands": demands_as, "units": units_as, "loc": loc } values_payload_as.append(value_as) # Drift-Sensitive demands_ds = AnalysisUtil.get_hazard_demand_types( building, fragility_set_ds, hazard_type) units_ds = fragility_set_ds.demand_units value_ds = { "demands": demands_ds, "units": units_ds, "loc": loc } values_payload_ds.append(value_ds) # liquefaction if use_liquefaction: value_liq = { "demands": ["pgd"], # implied... "units": ["in"], "loc": loc } values_payload_liq.append(value_liq) mapped_buildings.append(building) else: unmapped_buildings.append(building) del buildings # get hazard values and liquefaction if hazard_type == 'earthquake': hazard_resp_as = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_as) hazard_resp_ds = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_ds) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, values_payload_liq) else: raise ValueError( 'Hazard does not support liquefaction! Check to make sure you defined the ' 'liquefaction portion of your scenario earthquake.') else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # calculate LS and DS for i, building in enumerate(mapped_buildings): dmg_probability_as = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_as = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } dmg_probability_ds = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_ds = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') ############### # AS if isinstance(fragility_set_as.fragility_curves[0], DFR3Curve): hazard_vals_as = AnalysisUtil.update_precision_of_lists( hazard_resp_as[i]["hazardValues"]) demand_types_as = hazard_resp_as[i]["demands"] demand_units_as = hazard_resp_as[i]["units"] hval_dict_as = dict() for j, d in enumerate(fragility_set_as.demand_types): hval_dict_as[d] = hazard_vals_as[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_as[i]["hazardValues"]): building_args = fragility_set_as.construct_expression_args_from_inventory( building) dmg_probability_as = fragility_set_as. \ calculate_limit_state(hval_dict_as, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_as = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_as, liquefaction_dmg)) dmg_interval_as = fragility_set_ds.calculate_damage_interval( dmg_probability_as, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ############### # DS if isinstance(fragility_set_ds.fragility_curves[0], DFR3Curve): hazard_vals_ds = AnalysisUtil.update_precision_of_lists( hazard_resp_ds[i]["hazardValues"]) demand_types_ds = hazard_resp_ds[i]["demands"] demand_units_ds = hazard_resp_ds[i]["units"] hval_dict_ds = dict() for j, d in enumerate(fragility_set_ds.demand_types): hval_dict_ds[d] = hazard_vals_ds[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_ds[i]["hazardValues"]): building_args = fragility_set_ds.construct_expression_args_from_inventory( building) dmg_probability_ds = fragility_set_ds. \ calculate_limit_state(hval_dict_ds, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_ds = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_ds, liquefaction_dmg)) dmg_interval_ds = fragility_set_ds.calculate_damage_interval( dmg_probability_ds, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # put results in dictionary # AS denotes acceleration-sensitive fragility assigned to the building. # DS denotes drift-sensitive fragility assigned to the building. building_result = dict() building_result['guid'] = building['properties']['guid'] building_result['AS_LS_0'] = dmg_probability_as['LS_0'] building_result['AS_LS_1'] = dmg_probability_as['LS_1'] building_result['AS_LS_2'] = dmg_probability_as['LS_2'] building_result['AS_DS_0'] = dmg_interval_as['DS_0'] building_result['AS_DS_1'] = dmg_interval_as['DS_1'] building_result['AS_DS_2'] = dmg_interval_as['DS_2'] building_result['AS_DS_3'] = dmg_interval_as['DS_3'] building_result['DS_LS_0'] = dmg_probability_ds['LS_0'] building_result['DS_LS_1'] = dmg_probability_ds['LS_1'] building_result['DS_LS_2'] = dmg_probability_ds['LS_2'] building_result['DS_DS_0'] = dmg_interval_ds['DS_0'] building_result['DS_DS_1'] = dmg_interval_ds['DS_1'] building_result['DS_DS_2'] = dmg_interval_ds['DS_2'] building_result['DS_DS_3'] = dmg_interval_ds['DS_3'] building_result[ 'hazard_exposure_as'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_as, hazard_type) building_result[ 'hazard_exposure_ds'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_ds, hazard_type) # put damage results in dictionary damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = fragility_set_as.id damage_result['demandtypes_as'] = demand_types_as damage_result['demandunits_as'] = demand_units_as damage_result['fragility_id_ds'] = fragility_set_ds.id damage_result['demandtypes_ds'] = demand_types_ds damage_result['demandunits_ds'] = demand_units_ds damage_result['hazardtype'] = hazard_type damage_result['hazardvals_as'] = hazard_vals_as damage_result['hazardvals_ds'] = hazard_vals_ds building_results.append(building_result) damage_results.append(damage_result) for building in unmapped_buildings: building_result = dict() building_result['guid'] = building['properties']['guid'] damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = None damage_result['demandtypes_as'] = None damage_result['demandunits_as'] = None damage_result['fragility_id_ds'] = None damage_result['demandtypes_ds'] = None damage_result['demandunits_ds'] = None damage_result['hazardtype'] = None damage_result['hazardvals_as'] = None damage_result['hazardvals_ds'] = None building_results.append(building_result) damage_results.append(damage_result) return building_results, damage_results def get_spec(self): """Get specifications of the building damage analysis. Returns: obj: A JSON object of specifications of the building damage analysis. """ return { 'name': 'building-damage', 'description': 'building damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key_as', 'required': False, 'description': 'Acceleration-Sensitive Fragility key to use in mapping dataset', 'type': str }, { 'id': 'fragility_key_ds', 'required': False, 'description': 'Drift-Sensitive Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liq_geology_dataset_id', 'required': False, 'description': 'liquefaction geology dataset id, \ if use liquefaction, you have to provide this id', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'buildings', 'required': True, 'description': 'building Inventory', 'type': ['ergo:buildingInventoryVer4'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'buildings', 'description': 'CSV file of damage states for building non-structural damage', 'type': 'ergo:nsBuildingInventoryDamageVer3' }, { 'id': 'damage_result', 'parent_type': 'buildings', 'description': 'Json file with information about applied hazard value and fragility', 'type': 'incore:nsBuildingInventoryDamageSupplement' }] }
class EpfDamage(BaseAnalysis): """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_LIQ_FRAGILITY_KEY = "pgd" DEFAULT_FRAGILITY_KEY = "pga" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(EpfDamage, self).__init__(incore_client) def run(self): """Executes electric power facility damage analysis.""" epf_set = self.get_input_dataset("epfs").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type, note this is here for future use if additional hazards are supported by this analysis hazard_type = self.get_parameter("hazard_type") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") if use_hazard_uncertainty: raise ValueError("Uncertainty is not implemented yet.") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(epf_set), user_defined_cpu) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.epf_damage_concurrent_future( self.epf_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def epf_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ use_liquefaction = False liquefaction_available = False fragility_key = self.get_parameter("fragility_key") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) if hazard_type == "earthquake": liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True values_payload = [] values_payload_liq = [] unmapped_epfs = [] mapped_epfs = [] for epf in epfs: epf_id = epf["id"] if epf_id in fragility_set: location = GeoUtil.get_location(epf) loc = str(location.y) + "," + str(location.x) demands = fragility_set[epf_id].demand_types units = fragility_set[epf_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_epfs.append(epf) if liquefaction_available and epf["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[epf["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_epfs.append(epf) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError('Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError("Missing hazard type.") liquefaction_resp = None if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) ds_results = [] damage_results = [] i = 0 for epf in mapped_epfs: ds_result = dict() damage_result = dict() selected_fragility_set = fragility_set[epf["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 epf_args = selected_fragility_set.construct_expression_args_from_inventory( epf) limit_states = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='electric_facility', **epf_args) if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[epf["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( epf) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, inventory_type="electric_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_interval = selected_fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='electric_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result["guid"] = epf["properties"]["guid"] ds_result.update(limit_states) ds_result.update(dmg_interval) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardvals"] = hazard_val if hazard_type == "earthquake" and use_liquefaction is True: if liquefaction_available: damage_result['liq_fragility_id'] = fragility_sets_liq[ epf["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) i += 1 ############################################################# # unmapped for epf in unmapped_epfs: ds_result = dict() damage_result = dict() ds_result['guid'] = epf['properties']['guid'] damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None if hazard_type == "earthquake" and use_liquefaction is True: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the epf damage analysis. Returns: obj: A JSON object of specifications of the epf damage analysis. """ return { 'name': 'epf-damage', 'description': 'Electric Power Facility damage analysis.', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'A name of the resulting dataset', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard type (e.g. earthquake).', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID which defines the particular hazard (e.g. New madrid earthquake ' 'using Atkinson Boore 1995).', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset ()', 'type': str }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use a ground liquifacition to modify damage interval.', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request.', 'type': int }, ], 'input_datasets': [{ 'id': 'epfs', 'required': True, 'description': 'Electric Power Facility Inventory', 'type': ['incore:epf', 'ergo:epf'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'epfs', 'type': 'incore:epfDamageVer3' }, { 'id': 'metadata', 'parent_type': 'epfs', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:epfDamageSupplement' }] }
class WaterFacilityDamage(BaseAnalysis): """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure. """ DEFAULT_EQ_FRAGILITY_KEY = "pga" DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code" DEFAULT_LIQ_FRAGILITY_KEY = "pgd" def __init__(self, incore_client): # Create Hazard and Fragility service self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(WaterFacilityDamage, self).__init__(incore_client) def get_spec(self): return { 'name': 'water-facility-damage', 'description': 'water facility damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': False, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'water_facilities', 'required': True, 'description': 'Water Facility Inventory', 'type': ['ergo:waterFacilityTopo'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'water_facilities', 'description': 'A csv file with limit state probabilities and damage states ' 'for each water facility', 'type': 'ergo:waterFacilityDamageVer4' } ] } def run(self): """Performs Water facility damage analysis by using the parameters from the spec and creates an output dataset in csv format Returns: bool: True if successful, False otherwise """ # Facility dataset inventory_set = self.get_input_dataset( "water_facilities").get_inventory_reader() # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len( inventory_set), user_defined_cpu) avg_bulk_input_size = int(len(inventory_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(inventory_set) while count < len(inventory_list): inventory_args.append( inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.waterfacility_damage_concurrent_execution( self.waterfacilityset_damage_analysis, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def waterfacility_damage_concurrent_execution(self, function_name, parallel_processes, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallel_processes (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with damage results and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=parallel_processes) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def waterfacilityset_damage_analysis(self, facilities, hazard_type, hazard_dataset_id): """Gets applicable fragilities and calculates damage Args: facilities (list): Multiple water facilities from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with water facility damage values and metadata. """ result = [] liq_fragility = None use_liquefaction = self.get_parameter("use_liquefaction") liq_geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") uncertainty = self.get_parameter("use_hazard_uncertainty") fragility_key = self.get_parameter("fragility_key") if hazard_type == 'earthquake': if fragility_key is None: fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY pga_fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) liq_fragility_set = [] if use_liquefaction and liq_geology_dataset_id is not None: liq_fragility_key = self.get_parameter( "liquefaction_fragility_key") if liq_fragility_key is None: liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY liq_fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset( "dfr3_mapping_set"), facilities, liq_fragility_key) for facility in facilities: fragility = pga_fragility_set[facility["id"]] if facility["id"] in liq_fragility_set: liq_fragility = liq_fragility_set[facility["id"]] result.append( self.waterfacility_damage_analysis(facility, fragility, liq_fragility, hazard_type, hazard_dataset_id, liq_geology_dataset_id, uncertainty)) elif hazard_type == 'tsunami': if fragility_key is None: fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY inundation_fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) for facility in facilities: fragility = inundation_fragility_set[facility["id"]] result.append( self.waterfacility_damage_analysis(facility, fragility, [], hazard_type, hazard_dataset_id, "", False)) else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported.") return result def waterfacility_damage_analysis(self, facility, fragility, liq_fragility, hazard_type, hazard_dataset_id, liq_geology_dataset_id, uncertainty): """Computes damage analysis for a single facility Args: facility (obj): A JSON mapping of a facility based on mapping attributes fragility (obj): A JSON description of fragility mapped to the building. liq_fragility (obj): A JSON description of liquefaction fragility mapped to the building. hazard_type (str): A string that indicates the hazard type hazard_dataset_id (str): Hazard id from the hazard service liq_geology_dataset_id (str): Geology dataset id from data service to use for liquefaction calculation, if applicable uncertainty (bool): Whether to use hazard standard deviation values for uncertainty Returns: OrderedDict: A dictionary with water facility damage values and other data/metadata. """ std_dev = 0 if uncertainty: std_dev = random.random() hazard_demand_type = fragility.demand_type demand_units = fragility.demand_units liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 location = GeoUtil.get_location(facility) point = str(location.y) + "," + str(location.x) if hazard_type == "earthquake": hazard_val_set = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) elif hazard_type == "tsunami": hazard_val_set = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, hazard_demand_type, demand_units, [point]) else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported.") hazard_val = hazard_val_set[0]['hazardValue'] if hazard_val < 0: hazard_val = 0 limit_states = fragility.calculate_limit_state(hazard_val, std_dev) if liq_fragility is not None and liq_geology_dataset_id: liq_hazard_type = liq_fragility.demand_type pgd_demand_units = liq_fragility.demand_units point = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, pgd_demand_units, [point]) liq_hazard_val = liquefaction[0][liq_hazard_type] liquefaction_prob = liquefaction[0]['liqProbability'] pgd_limit_states = liq_fragility.calculate_limit_state(liq_hazard_val, std_dev) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = AnalysisUtil.calculate_damage_interval(limit_states) result = collections.OrderedDict() result = {**limit_states, **dmg_intervals} # Needs py 3.5+ metadata = collections.OrderedDict() metadata['guid'] = facility['properties']['guid'] metadata['hazardtype'] = hazard_type metadata['demandtype'] = hazard_demand_type metadata['hazardval'] = hazard_val metadata['liqhaztype'] = liq_hazard_type metadata['liqhazval'] = liq_hazard_val metadata['liqprobability'] = liquefaction_prob result = {**metadata, **result} return result
class NonStructBuildingDamage(BaseAnalysis): """Computes non-structural structural building damage for an earthquake hazard. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(NonStructBuildingDamage, self).__init__(incore_client) def run(self): """Executes building damage analysis.""" # Building dataset building_set = self.get_input_dataset("buildings").get_inventory_reader() # set Default Fragility key fragility_key_as = self.get_parameter("fragility_key_as") if fragility_key_as is None: self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) fragility_key_ds = self.get_parameter("fragility_key_ds") if fragility_key_ds is None: self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") if use_hazard_uncertainty is None: self.set_parameter("use_hazard_uncertainty", False) # Set Default Liquefaction use_liquefaction = self.get_parameter("use_liquefaction") if use_liquefaction is None: self.set_parameter("use_liquefaction", False) results = [] user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(building_set), user_defined_cpu) avg_bulk_input_size = int(len(building_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(building_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, num_workers, inventory_args) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def building_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def building_damage_analysis_bulk_input(self, buildings): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ result = [] fragility_sets_as = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_as")) fragility_sets_ds = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_ds")) for building in buildings: fragility_set_as = None fragility_set_ds = None if building["id"] in fragility_sets_as \ and building["id"] in fragility_sets_ds: fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] result.append(self.building_damage_analysis(building, fragility_set_as, fragility_set_ds)) return result def building_damage_analysis(self, building, fragility_set_as, fragility_set_ds): """Calculates bridge damage results for a single building. Args: building (obj): A JSON-mapping of a geometric object from the inventory: current building. fragility_set_as (obj): A JSON description of acceleration-sensitive (AS) fragility assigned to the building. fragility_set_ds (obj): A JSON description of drift-sensitive (DS) fragility assigned to the building. Returns: OrderedDict: A dictionary with building damage values and other data/metadata. """ building_results = collections.OrderedDict() dmg_probability_as = collections.OrderedDict() dmg_probability_ds = collections.OrderedDict() hazard_demand_type_as = None hazard_demand_type_ds = None hazard_val_as = 0.0 hazard_val_ds = 0.0 # read static parameters from object self hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") # Acceleration-Sensitive Fragility ID Code if fragility_set_as is not None: hazard_demand_type_as = AnalysisUtil.get_hazard_demand_type(building, fragility_set_as, 'earthquake') demand_units_as = fragility_set_as.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_as = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_as, demand_units_as, points=[point])[0]['hazardValue'] dmg_probability_as = fragility_set_as.calculate_limit_state(hazard_val_as) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_as = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_as['immocc'] = 0.0 dmg_probability_as['lifesfty'] = 0.0 dmg_probability_as['collprev'] = 0.0 dmg_interval_as = AnalysisUtil.calculate_damage_interval(dmg_probability_as) # Drift-Sensitive Fragility ID Code if fragility_set_ds is not None: hazard_demand_type_ds = AnalysisUtil.get_hazard_demand_type(building, fragility_set_ds, 'earthquake') demand_units_ds = fragility_set_ds.demand_units location = GeoUtil.get_location(building) point = str(location.y) + "," + str(location.x) hazard_val_ds = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, hazard_demand_type_ds, demand_units_ds, points=[point])[0]['hazardValue'] dmg_probability_ds = fragility_set_ds.calculate_limit_state(hazard_val_ds) # adjust hazard value for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liqufaction_dmg = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, 'in', points=[point])[0][ 'groundFailureProb'] else: raise ValueError('Hazard does not support liquefaction! \ Check to make sure you defined the liquefaction\ portion of your scenario earthquake.') dmg_probability_ds = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds, liqufaction_dmg) # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') else: dmg_probability_ds['immocc'] = 0.0 dmg_probability_ds['lifesfty'] = 0.0 dmg_probability_ds['collprev'] = 0.0 dmg_interval_ds = AnalysisUtil.calculate_damage_interval(dmg_probability_ds) # put results in dictionary building_results['guid'] = building['properties']['guid'] building_results['immocc_as'] = dmg_probability_as['immocc'] building_results['lifsfty_as'] = dmg_probability_as['lifesfty'] building_results['collpre_as'] = dmg_probability_as['collprev'] building_results['insig_as'] = dmg_interval_as['insignific'] building_results['mod_as'] = dmg_interval_as['moderate'] building_results['heavy_as'] = dmg_interval_as['heavy'] building_results['comp_as'] = dmg_interval_as['complete'] building_results['immocc_ds'] = dmg_probability_ds['immocc'] building_results['lifsfty_ds'] = dmg_probability_ds['lifesfty'] building_results['collpre_ds'] = dmg_probability_ds['collprev'] building_results['insig_ds'] = dmg_interval_ds['insignific'] building_results['mod_ds'] = dmg_interval_ds['moderate'] building_results['heavy_ds'] = dmg_interval_ds['heavy'] building_results['comp_ds'] = dmg_interval_ds['complete'] building_results["hzrdtyp_as"] = hazard_demand_type_as building_results["hzrdval_as"] = hazard_val_as building_results["hzrdtyp_ds"] = hazard_demand_type_ds building_results["hzrdval_ds"] = hazard_val_ds return building_results def get_spec(self): """Get specifications of the building damage analysis. Returns: obj: A JSON object of specifications of the building damage analysis. """ return { 'name': 'building-damage', 'description': 'building damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key_as', 'required': False, 'description': 'AS Fragility key to use in mapping dataset', 'type': str }, { 'id': 'fragility_key_ds', 'required': False, 'description': 'DS Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liq_geology_dataset_id', 'required': False, 'description': 'liquefaction geology dataset id, \ if use liquefaction, you have to provide this id', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'buildings', 'required': True, 'description': 'building Inventory', 'type': ['ergo:buildingInventoryVer4'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'buildings', 'description': 'CSV file of building non-structural damage', 'type': 'ergo:nsBuildingInventoryDamage' } ] }
class PipelineDamage(BaseAnalysis): """Computes pipeline damage for an earthquake or a tsunami). Args: incore_client: Service client with authentication info. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(PipelineDamage, self).__init__(incore_client) def run(self): """Execute pipeline damage analysis """ pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard input hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (results, damage_results) = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: dict: An ordered dictionaries with pipeline damage values. dict: An ordered dictionaries with other pipeline data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): Multiple pipelines from pipeline dataset. hazard_type (str): Hazard type (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. Returns: dict: An ordered dictionaries with pipeline damage values. dict: An ordered dictionaries with other pipeline data/metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" if hazard_type == 'tsunami' else "pgv" self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) values_payload = [] unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = AnalysisUtil.get_hazard_demand_types( pipeline, fragility_set, hazard_type) units = fragility_sets[pipeline["id"]].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) else: unmapped_pipelines.append(pipeline) # not needed anymore as they are already split into mapped and unmapped del pipelines if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) elif hazard_type == 'flood': raise ValueError( "The provided hazard type is not supported yet by this analysis" ) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) pipeline_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): limit_states = dict() dmg_intervals = dict() pipeline_result = dict() fragility_set = fragility_sets[pipeline["id"]] # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility haz_vals = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) demand_types = hazard_vals[i]["demands"] demand_units = hazard_vals[i]["units"] # construct hazard_value dictionary {"demand_type":"hazard_value", ...} hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = haz_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_vals[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) limit_states = fragility_set.calculate_limit_state( hval_dict, inventory_type="pipeline", **pipeline_args) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type="pipeline") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") pipeline_result['guid'] = pipeline['properties']['guid'] pipeline_result.update(limit_states) pipeline_result.update(dmg_intervals) pipeline_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( haz_vals, hazard_type) damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = haz_vals pipeline_results.append(pipeline_result) damage_results.append(damage_result) # for pipeline does not have matching fragility curves, default to None for pipeline in unmapped_pipelines: pipeline_result = dict() damage_result = dict() pipeline_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None pipeline_results.append(pipeline_result) damage_results.append(damage_result) return pipeline_results, damage_results def get_spec(self): """Get specifications of the pipeline damage analysis. Returns: obj: A JSON object of specifications of the pipeline damage analysis. """ return { 'name': 'pipeline-damage', 'description': 'Buried pipeline damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'Result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Geology dataset id', 'type': str, }], 'input_datasets': [{ 'id': 'pipeline', 'required': True, 'description': 'Pipeline Inventory', 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'pipeline', 'description': 'CSV file of damage states for pipeline damage', 'type': 'incore:pipelineDamageVer3' }, { 'id': 'metadata', 'parent_type': 'pipeline', 'description': 'Json file with information about applied hazard value and fragility', 'type': 'incore:pipelineDamageSupplement' }] }
class PipelineDamageRepairRate(BaseAnalysis): """Computes pipeline damage for a hazard. Args: incore_client: Service client with authentication info """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(PipelineDamageRepairRate, self).__init__(incore_client) def run(self): """Execute pipeline damage analysis """ # Pipeline dataset pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with pipeline damage values and other data/metadata. """ result = [] # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) for pipeline in pipelines: if pipeline["id"] in fragility_sets.keys(): liq_fragility_set = None # Check if mapping contains liquefaction fragility if geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: liq_fragility_set = fragility_sets_liq[pipeline["id"]] result.append( self.pipeline_damage_analysis( pipeline, hazard_type, fragility_sets[pipeline["id"]], liq_fragility_set, hazard_dataset_id, geology_dataset_id, use_liquefaction)) return result def pipeline_damage_analysis(self, pipeline, hazard_type, fragility_set, fragility_set_liq, hazard_dataset_id, geology_dataset_id, use_liquefaction): """Run pipeline damage for a single pipeline. Args: pipeline (obj): a single pipeline. hazard_type (str): hazard type. fragility_set (obj): A JSON description of fragility assigned to the building. fragility_set_liq (obj): A JSON description of fragility assigned to the building with liqufaction. hazard_dataset_id (str): A hazard dataset to use. geology_dataset_id (str): A dataset id for geology dataset for liqufaction. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: OrderedDict: A dictionary with pipeline damage values and other data/metadata. """ pipeline_results = collections.OrderedDict() pgv_repairs = 0.0 pgd_repairs = 0.0 liq_hazard_type = "" liq_hazard_val = 0.0 liquefaction_prob = 0.0 if fragility_set is not None: demand_type = fragility_set.demand_type.lower() demand_units = fragility_set.demand_units location = GeoUtil.get_location(pipeline) point = str(location.y) + "," + str(location.x) if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, demand_type, demand_units, [point]) elif hazard_type == 'tornado': hazard_resp = self.hazardsvc.get_tornado_hazard_values( hazard_dataset_id, demand_units, [point]) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.get_hurricanewf_values( hazard_dataset_id, demand_type, demand_units, [point]) else: raise ValueError("Hazard type are not currently supported.") hazard_val = hazard_resp[0]['hazardValue'] if hazard_val <= 0.0: hazard_val = 0.0 diameter = PipelineUtil.get_pipe_diameter(pipeline) fragility_vars = {'x': hazard_val, 'y': diameter} fragility_curve = fragility_set.fragility_curves[0] # TODO: here assume that custom fragility set only has one limit state pgv_repairs = fragility_set.calculate_custom_limit_state( fragility_vars)['failure'] # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.description, pgv_repairs) if use_liquefaction is True and fragility_set_liq is not None and geology_dataset_id is not None: liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_type = fragility_set_liq.demand_type pgd_demand_units = fragility_set_liq.demand_units # Get PGD hazard value from hazard service location_str = str(location.y) + "," + str(location.x) liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, pgd_demand_units, [location_str]) liq_hazard_val = liquefaction[0]['pgd'] liquefaction_prob = liquefaction[0]['liqProbability'] liq_fragility_vars = { 'x': liq_hazard_val, 'y': liquefaction_prob } pgd_repairs = liq_fragility_curve.compute_custom_limit_state_probability( liq_fragility_vars) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.description, pgd_repairs) total_repair_rate = pgd_repairs + pgv_repairs break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs length = PipelineUtil.get_pipe_length(pipeline) failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_pgd_repairs = pgd_repairs * length num_pgv_repairs = pgv_repairs * length num_repairs = num_pgd_repairs + num_pgv_repairs pipeline_results['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: pipeline_results['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: pipeline_results['pipeclass'] = "" pipeline_results['pgvrepairs'] = pgv_repairs pipeline_results['pgdrepairs'] = pgd_repairs pipeline_results['repairspkm'] = total_repair_rate pipeline_results['breakrate'] = break_rate pipeline_results['leakrate'] = leak_rate pipeline_results['failprob'] = failure_probability pipeline_results['demandtype'] = demand_type pipeline_results['hazardtype'] = hazard_type pipeline_results['hazardval'] = hazard_val pipeline_results['liqhaztype'] = liq_hazard_type pipeline_results['liqhazval'] = liq_hazard_val pipeline_results['liqprobability'] = liquefaction_prob pipeline_results['numpgvrpr'] = num_pgv_repairs pipeline_results['numpgdrpr'] = num_pgd_repairs pipeline_results['numrepairs'] = num_repairs return pipeline_results def get_spec(self): """Get specifications of the pipeline damage analysis. Returns: obj: A JSON object of specifications of the pipeline damage analysis. """ return { 'name': 'pipeline-damage', 'description': 'buried pipeline damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Geology dataset id', 'type': str, }], 'input_datasets': [{ 'id': 'pipeline', 'required': True, 'description': 'Pipeline Inventory', 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'pipeline', 'type': 'ergo:pipelineDamage' }] }
class RoadDamage(BaseAnalysis): """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(RoadDamage, self).__init__(incore_client) def run(self): """Executes road damage analysis.""" # Road dataset road_set = self.get_input_dataset("roads").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Get hazard type hazard_type = self.get_parameter("hazard_type") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset for liquefaction geology_dataset_id = None if self.get_parameter("liquefaction_geology_dataset_id") is not None: geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") user_defined_cpu = 1 if self.get_parameter( "num_cpu") is not None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(road_set), user_defined_cpu) avg_bulk_input_size = int(len(road_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(road_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.road_damage_concurrent_future( self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(geology_dataset_id), repeat(fragility_key), repeat(use_liquefaction)) self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) return True def road_damage_concurrent_future(self, function_name, parallelism, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallelism (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. """ output = [] with concurrent.futures.ProcessPoolExecutor( max_workers=parallelism) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. """ road_results = [] fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) list_roads = roads # Converting list of roads into a dictionary for ease of reference roads = dict() for rd in list_roads: roads[rd["id"]] = rd del list_roads processed_roads = [] grouped_roads = AnalysisUtil.group_by_demand_type( roads, fragility_sets) for demand, grouped_road_items in grouped_roads.items(): input_demand_type = demand[0] input_demand_units = demand[1] # For every group of unique demand and demand unit, call the end-point once road_chunks = list(AnalysisUtil.chunks(grouped_road_items, 50)) for road_chunk in road_chunks: points = [] for road_id in road_chunk: location = GeoUtil.get_location(roads[road_id]) points.append(str(location.y) + "," + str(location.x)) liquefaction = [] if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.get_earthquake_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) if input_demand_type.lower( ) == 'pgd' and use_liquefaction and geology_dataset_id is not None: liquefaction = self.hazardsvc.get_liquefaction_values( hazard_dataset_id, geology_dataset_id, input_demand_units, points) elif hazard_type == 'tornado': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'hurricane': raise ValueError( 'Earthquake and tsunamis are the only hazards supported for road damage' ) elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.get_tsunami_hazard_values( hazard_dataset_id, input_demand_type, input_demand_units, points) else: raise ValueError("Missing hazard type.") # Parse the batch hazard value results and map them back to the building and fragility. # This is a potential pitfall as we are relying on the order of the returned results i = 0 for road_id in road_chunk: road_result = collections.OrderedDict() road = roads[road_id] hazard_val = hazard_vals[i]['hazardValue'] # Sometimes the geotiffs give large negative values for out of bounds instead of 0 if hazard_val <= 0.0: hazard_val = 0.0 std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") selected_fragility_set = fragility_sets[road_id] dmg_probability = selected_fragility_set.calculate_limit_state( hazard_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['guid'] = road['properties']['guid'] road_result.update(dmg_probability) road_result.update(dmg_interval) road_result['demandtype'] = input_demand_type road_result['demandunits'] = input_demand_units road_result['hazardtype'] = hazard_type road_result['hazardval'] = hazard_val # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if len(liquefaction) > 0: if input_demand_type in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type] elif input_demand_type.lower() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.lower()] elif input_demand_type.upper() in liquefaction[i]: liquefaction_val = liquefaction[i][ input_demand_type.upper] else: liquefaction_val = 0.0 dmg_probability = selected_fragility_set.calculate_limit_state( liquefaction_val, std_dev=std_dev) dmg_interval = AnalysisUtil.calculate_damage_interval( dmg_probability) road_result['hazardval'] = liquefaction_val road_result.update(dmg_probability) road_result.update(dmg_interval) road_results.append(road_result) processed_roads.append(road_id) i = i + 1 unmapped_dmg_probability = { "ls-slight": 0.0, "ls-moderat": 0.0, "ls-extensi": 0.0, "ls-complet": 0.0 } unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval( unmapped_dmg_probability) for road_id, rd in roads.items(): if road_id not in processed_roads: unmapped_rd_result = collections.OrderedDict() unmapped_rd_result['guid'] = rd['properties']['guid'] unmapped_rd_result.update(unmapped_dmg_probability) unmapped_rd_result.update(unmapped_dmg_intervals) unmapped_rd_result['demandtype'] = "None" unmapped_rd_result['demandunits'] = "None" unmapped_rd_result['hazardtype'] = "None" unmapped_rd_result['hazardval'] = 0.0 road_results.append(unmapped_rd_result) return road_results def get_spec(self): """Get specifications of the road damage analysis. Returns: obj: A JSON object of specifications of the road damage analysis. """ return { 'name': 'road-damage', 'description': 'road damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'roads', 'required': True, 'description': 'Road Inventory', 'type': ['ergo:roadLinkTopo', 'incore:roads'] }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'roads', 'description': 'CSV file of road structural damage', 'type': 'ergo:roadDamage' }] }