class PipelineDamageRepairRate(BaseAnalysis): """Computes pipeline damage for a hazard. Args: incore_client: Service client with authentication info """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(PipelineDamageRepairRate, self).__init__(incore_client) def run(self): """Execute pipeline damage analysis """ # Pipeline dataset pipeline_dataset = self.get_input_dataset( "pipeline").get_inventory_reader() # Get hazard type hazard_type = self.get_parameter("hazard_type") # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) num_workers = AnalysisUtil.determine_parallelism_locally( self, dataset_size, user_defined_cpu) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.pipeline_damage_concurrent_future( self.pipeline_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with building damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, hazard_dataset_id): """Run pipeline damage analysis for multiple pipelines. Args: pipelines (list): multiple pipelines from pieline dataset. hazard_type (str): Hazard type hazard_dataset_id (str): An id of the hazard exposure. Returns: ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata. damage_results (list): A list of ordered dictionaries with pipeline damage metadata. """ # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) # Get Liquefaction Fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False if hazard_type == "earthquake" and self.get_parameter( "use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") fragility_sets_liq = None if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), pipelines, liquefaction_fragility_key) values_payload = [] values_payload_liq = [] # for liquefaction if used unmapped_pipelines = [] mapped_pipelines = [] for pipeline in pipelines: # if find a match fragility for that pipeline if pipeline["id"] in fragility_sets.keys(): fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) # Check if liquefaction is applicable if use_liquefaction and \ geology_dataset_id is not None and \ fragility_sets_liq is not None and \ pipeline["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[pipeline["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_pipelines.append(pipeline) del pipelines if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if use_liquefaction is True and \ fragility_sets_liq is not None and \ geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # calculate LS and DS ds_results = [] damage_results = [] for i, pipeline in enumerate(mapped_pipelines): # default pgv_repairs = None pgd_repairs = 0.0 total_repair_rate = None break_rate = None leak_rate = None failure_probability = None num_pgv_repairs = None num_pgd_repairs = 0.0 num_repairs = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None ds_result = dict() damage_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result['guid'] = pipeline['properties']['guid'] fragility_set = fragility_sets[pipeline["id"]] # TODO assume there is only one curve fragility_curve = fragility_set.fragility_curves[0] hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory( pipeline) pgv_repairs = \ fragility_curve.solve_curve_expression( hval_dict, fragility_set.curve_parameters, **pipeline_args) # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit( fragility_curve.return_type["unit"], pgv_repairs) length = PipelineUtil.get_pipe_length(pipeline) # Number of PGV repairs num_pgv_repairs = pgv_repairs * length # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None \ and liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[pipeline["id"]] # TODO assume there is only one curve liq_fragility_curve = fragility_set_liq.fragility_curves[0] liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability pipeline_args = fragility_set_liq.construct_expression_args_from_inventory( pipeline) pgd_repairs = \ liq_fragility_curve.solve_curve_expression( liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit( liq_fragility_curve.return_type["unit"], pgd_repairs) num_pgd_repairs = pgd_repairs * length # record results if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs total_repair_rate = pgd_repairs + pgv_repairs failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_repairs = num_pgd_repairs + num_pgv_repairs ds_result['pgvrepairs'] = pgv_repairs ds_result['pgdrepairs'] = pgd_repairs ds_result['repairspkm'] = total_repair_rate ds_result['breakrate'] = break_rate ds_result['leakrate'] = leak_rate ds_result['failprob'] = failure_probability ds_result['numpgvrpr'] = num_pgv_repairs ds_result['numpgdrpr'] = num_pgd_repairs ds_result['numrepairs'] = num_repairs ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardval'] = hazard_vals # Check if liquefaction is applicable if use_liquefaction is True \ and fragility_sets_liq is not None \ and geology_dataset_id is not None: damage_result['liq_fragility_id'] = fragility_sets_liq[ pipeline["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) # pipelines do not have matched mappings for pipeline in unmapped_pipelines: ds_result = dict() ds_result['guid'] = pipeline['properties']['guid'] damage_result = dict() damage_result['guid'] = pipeline['properties']['guid'] if 'pipetype' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties']['pipetype'] elif 'pipelinesc' in pipeline['properties']: damage_result['pipeclass'] = pipeline['properties'][ 'pipelinesc'] else: damage_result['pipeclass'] = "" damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardval'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqhazval'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the pipeline damage analysis. Returns: obj: A JSON object of specifications of the pipeline damage analysis. """ return { 'name': 'pipeline-damage', 'description': 'buried pipeline damage analysis', 'input_parameters': [{ 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Geology dataset id', 'type': str, }], 'input_datasets': [{ 'id': 'pipeline', 'required': True, 'description': 'Pipeline Inventory', 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'pipeline', 'type': 'ergo:pipelineDamageVer3' }, { 'id': 'metadata', 'parent_type': 'pipeline', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:pipelineDamageSupplement' }] }
class WaterFacilityDamage(BaseAnalysis): """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure. """ DEFAULT_EQ_FRAGILITY_KEY = "pga" DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code" DEFAULT_LIQ_FRAGILITY_KEY = "pgd" def __init__(self, incore_client): # Create Hazard and Fragility service self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(WaterFacilityDamage, self).__init__(incore_client) def run(self): """Performs Water facility damage analysis by using the parameters from the spec and creates an output dataset in csv format Returns: bool: True if successful, False otherwise """ # Facility dataset inventory_set = self.get_input_dataset( "water_facilities").get_inventory_reader() # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type of the exposure hazard_type = self.get_parameter("hazard_type") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(inventory_set), user_defined_cpu) avg_bulk_input_size = int(len(inventory_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(inventory_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.waterfacility_damage_concurrent_futures( self.waterfacilityset_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def waterfacility_damage_concurrent_futures(self, function_name, parallel_processes, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. parallel_processes (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=parallel_processes) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, hazard_dataset_id): """Gets applicable fragilities and calculates damage Args: facilities (list): Multiple water facilities from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with water facility damage values list: A list of ordered dictionaries with other water facility data/metadata """ # Liquefaction related variables use_liquefaction = False liquefaction_available = False fragility_sets_liq = None liquefaction_resp = None geology_dataset_id = None liq_hazard_vals = None liq_demand_types = None liq_demand_units = None liquefaction_prob = None loc = None # Obtain the fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: if hazard_type == 'tsunami': fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY elif hazard_type == 'earthquake': fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY else: raise ValueError( "Hazard type other than Earthquake and Tsunami are not currently supported." ) self.set_parameter("fragility_key", fragility_key) # Obtain the fragility set fragility_sets = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) # Obtain the liquefaction fragility Key liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if hazard_type == "earthquake": if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), facilities, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True # Determine whether to use hazard uncertainty uncertainty = self.get_parameter("use_hazard_uncertainty") # Setup fragility translation structures values_payload = [] values_payload_liq = [] unmapped_waterfacilities = [] mapped_waterfacilities = [] for facility in facilities: if facility["id"] in fragility_sets.keys(): # Fill in generic details fragility_set = fragility_sets[facility["id"]] location = GeoUtil.get_location(facility) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_waterfacilities.append(facility) # Fill in liquefaction parameters if liquefaction_available and facility[ "id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[facility["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_waterfacilities.append(facility) del facilities if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # Check if liquefaction is applicable if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) # Calculate LS and DS facility_results = [] damage_results = [] for i, facility in enumerate(mapped_waterfacilities): fragility_set = fragility_sets[facility["id"]] limit_states = dict() dmg_intervals = dict() # Setup conditions for the analysis hazard_std_dev = 0 if uncertainty: hazard_std_dev = random.random() if isinstance(fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists( hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp[i]["hazardValues"]): facility_args = fragility_set.construct_expression_args_from_inventory( facility) limit_states = \ fragility_set.calculate_limit_state(hval_dict, std_dev=hazard_std_dev, inventory_type='water_facility', **facility_args) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate( fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( facility) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_intervals = fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='water_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # TODO: ideally, this goes into a single variable declaration section facility_result = { 'guid': facility['properties']['guid'], **limit_states, **dmg_intervals } facility_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals if use_liquefaction and fragility_sets_liq and geology_dataset_id: damage_result['liq_fragility_id'] = fragility_sets_liq[ facility["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) for facility in unmapped_waterfacilities: facility_result = dict() damage_result = dict() facility_result['guid'] = facility['properties']['guid'] damage_result['guid'] = facility['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None facility_results.append(facility_result) damage_results.append(damage_result) return facility_results, damage_results def get_spec(self): return { 'name': 'water-facility-damage', 'description': 'water facility damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': False, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'water_facilities', 'required': True, 'description': 'Water Facility Inventory', 'type': ['ergo:waterFacilityTopo'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'water_facilities', 'description': 'A csv file with limit state probabilities and damage states ' 'for each water facility', 'type': 'ergo:waterFacilityDamageVer6' }, { 'id': 'metadata', 'parent_type': 'water_facilities', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:waterFacilityDamageSupplement' }] }
class EpfDamage(BaseAnalysis): """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_LIQ_FRAGILITY_KEY = "pgd" DEFAULT_FRAGILITY_KEY = "pga" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(EpfDamage, self).__init__(incore_client) def run(self): """Executes electric power facility damage analysis.""" epf_set = self.get_input_dataset("epfs").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY self.set_parameter("fragility_key", fragility_key) # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Hazard type, note this is here for future use if additional hazards are supported by this analysis hazard_type = self.get_parameter("hazard_type") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter( "use_hazard_uncertainty") if use_hazard_uncertainty: raise ValueError("Uncertainty is not implemented yet.") user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(epf_set), user_defined_cpu) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.epf_damage_concurrent_future( self.epf_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def epf_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id): """Run analysis for multiple epfs. Args: epfs (list): Multiple epfs from input inventory set. hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane). hazard_dataset_id (str): An id of the hazard exposure. Returns: list: A list of ordered dictionaries with epf damage values and other data/metadata. """ use_liquefaction = False liquefaction_available = False fragility_key = self.get_parameter("fragility_key") fragility_set = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key) if hazard_type == "earthquake": liquefaction_fragility_key = self.get_parameter( "liquefaction_fragility_key") if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset geology_dataset_id = self.get_parameter( "liquefaction_geology_dataset_id") if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), epfs, liquefaction_fragility_key) if fragility_sets_liq is not None: liquefaction_available = True values_payload = [] values_payload_liq = [] unmapped_epfs = [] mapped_epfs = [] for epf in epfs: epf_id = epf["id"] if epf_id in fragility_set: location = GeoUtil.get_location(epf) loc = str(location.y) + "," + str(location.x) demands = fragility_set[epf_id].demand_types units = fragility_set[epf_id].demand_units value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_epfs.append(epf) if liquefaction_available and epf["id"] in fragility_sets_liq: fragility_set_liq = fragility_sets_liq[epf["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units value_liq = { "demands": demands_liq, "units": units_liq, "loc": loc } values_payload_liq.append(value_liq) else: unmapped_epfs.append(epf) if hazard_type == 'earthquake': hazard_vals = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'tornado': hazard_vals = self.hazardsvc.post_tornado_hazard_values( hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': # TODO: implement hurricane raise ValueError('Hurricane hazard has not yet been implemented!') elif hazard_type == 'tsunami': hazard_vals = self.hazardsvc.post_tsunami_hazard_values( hazard_dataset_id, values_payload) else: raise ValueError("Missing hazard type.") liquefaction_resp = None if liquefaction_available: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, geology_dataset_id, values_payload_liq) ds_results = [] damage_results = [] i = 0 for epf in mapped_epfs: ds_result = dict() damage_result = dict() selected_fragility_set = fragility_set[epf["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_val = AnalysisUtil.update_precision_of_lists( hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] hval_dict = dict() j = 0 for d in selected_fragility_set.demand_types: hval_dict[d] = hazard_val[j] j += 1 epf_args = selected_fragility_set.construct_expression_args_from_inventory( epf) limit_states = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='electric_facility', **epf_args) if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[epf["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] liquefaction_prob = liquefaction_resp[i][ 'liqProbability'] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( epf) pgd_limit_states = \ fragility_set_liq.calculate_limit_state( hval_dict_liq, inventory_type="electric_facility", **facility_liq_args) else: raise ValueError( "One of the fragilities is in deprecated format. " "This should not happen If you are seeing this please report the issue." ) limit_states = AnalysisUtil.adjust_limit_states_for_pgd( limit_states, pgd_limit_states) dmg_interval = selected_fragility_set.calculate_damage_interval( limit_states, hazard_type=hazard_type, inventory_type='electric_facility') else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result["guid"] = epf["properties"]["guid"] ds_result.update(limit_states) ds_result.update(dmg_interval) ds_result[ 'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_val, hazard_type) damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type damage_result["hazardvals"] = hazard_val if hazard_type == "earthquake" and use_liquefaction is True: if liquefaction_available: damage_result['liq_fragility_id'] = fragility_sets_liq[ epf["id"]].id damage_result['liqdemandtypes'] = liq_demand_types damage_result['liqdemandunits'] = liq_demand_units damage_result['liqhazval'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob else: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) i += 1 ############################################################# # unmapped for epf in unmapped_epfs: ds_result = dict() damage_result = dict() ds_result['guid'] = epf['properties']['guid'] damage_result['guid'] = epf['properties']['guid'] damage_result['fragility_id'] = None damage_result["demandtypes"] = None damage_result['demandunits'] = None damage_result["hazardtype"] = None damage_result['hazardval'] = None if hazard_type == "earthquake" and use_liquefaction is True: damage_result['liq_fragility_id'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazval'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the epf damage analysis. Returns: obj: A JSON object of specifications of the epf damage analysis. """ return { 'name': 'epf-damage', 'description': 'Electric Power Facility damage analysis.', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'A name of the resulting dataset', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard type (e.g. earthquake).', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID which defines the particular hazard (e.g. New madrid earthquake ' 'using Atkinson Boore 1995).', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset ()', 'type': str }, { 'id': 'liquefaction_fragility_key', 'required': False, 'description': 'Fragility key to use in liquefaction mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use a ground liquifacition to modify damage interval.', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request.', 'type': int }, ], 'input_datasets': [{ 'id': 'epfs', 'required': True, 'description': 'Electric Power Facility Inventory', 'type': ['incore:epf', 'ergo:epf'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'epfs', 'type': 'incore:epfDamageVer3' }, { 'id': 'metadata', 'parent_type': 'epfs', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:epfDamageSupplement' }] }
class RoadDamage(BaseAnalysis): """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard. Args: incore_client (IncoreClient): Service authentication. """ DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(RoadDamage, self).__init__(incore_client) def run(self): """Executes road damage analysis.""" # Road dataset road_set = self.get_input_dataset("roads").get_inventory_reader() # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: fragility_key = self.DEFAULT_FRAGILITY_KEY # Get hazard input hazard_dataset_id = self.get_parameter("hazard_id") # Get hazard type hazard_type = self.get_parameter("hazard_type") # Liquefaction use_liquefaction = False if self.get_parameter("use_liquefaction") is not None: use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset for liquefaction geology_dataset_id = None if self.get_parameter("liquefaction_geology_dataset_id") is not None: geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") # Hazard Uncertainty use_hazard_uncertainty = False if self.get_parameter("use_hazard_uncertainty") is not None: use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") user_defined_cpu = 1 if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu) avg_bulk_input_size = int(len(road_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(road_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard_type), repeat(hazard_dataset_id), repeat(use_hazard_uncertainty), repeat(geology_dataset_id), repeat(fragility_key), repeat(use_liquefaction)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def road_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Number of workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: output_ds: A list of ordered dictionaries with road damage values output_dmg: A list of ordered dictionaries with other road data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty, geology_dataset_id, fragility_key, use_liquefaction): """Run analysis for multiple roads. Args: roads (list): Multiple roads from input inventory set. hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami). hazard_dataset_id (str): An id of the hazard exposure. use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not geology_dataset_id (str): An id of the geology for use in liquefaction. fragility_key (str): Fragility key describing the type of fragility. use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage, False otherwise. Returns: list: A list of ordered dictionaries with road damage values and other data/metadata. list: A list of ordered dictionaries with other road data/metadata. """ fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key) values_payload = [] mapped_roads = [] unmapped_roads = [] pgd_flag = True # for liquefaction liquefaction_resp = None for road in roads: if road["id"] in fragility_sets.keys(): fragility_set = fragility_sets[road["id"]] location = GeoUtil.get_location(road) loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types # for liquefaction if any(demand.lower() != 'pgd' for demand in demands): pgd_flag = False units = fragility_set.demand_units value = { "demands": demands, "units": units, "loc": loc } values_payload.append(value) mapped_roads.append(road) else: unmapped_roads.append(road) del roads # get hazard and liquefaction values if hazard_type == 'earthquake': hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload) if pgd_flag and use_liquefaction and geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, values_payload) elif hazard_type == 'tsunami': hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload) elif hazard_type == 'hurricane': hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload) else: raise ValueError("The provided hazard type is not supported yet by this analysis") # calculate LS and DS ds_results = [] damage_results = [] for i, road in enumerate(mapped_roads): dmg_probability = dict() dmg_interval = dict() demand_types_liq = None demand_units_liq = None liq_hazard_vals = None liquefaction_prob = None selected_fragility_set = fragility_sets[road["id"]] hazard_std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(selected_fragility_set.demand_types): hval_dict[d] = hazard_vals[j] if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): road_args = selected_fragility_set.construct_expression_args_from_inventory(road) dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='road', **road_args) # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if liquefaction_resp is not None and len(liquefaction_resp) > 0: liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) demand_types_liq = liquefaction_resp[i]['demands'] demand_units_liq = liquefaction_resp[i]['units'] liquefaction_prob = liquefaction_resp[i]['liqProbability'] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] dmg_probability = selected_fragility_set.calculate_limit_state( liq_hval_dict, inventory_type='road', **road_args) dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="road") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ds_result = dict() ds_result['guid'] = road['properties']['guid'] ds_result.update(dmg_probability) ds_result.update(dmg_interval) ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = selected_fragility_set.id damage_result['demandtypes'] = demand_types damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type damage_result['hazardvals'] = hazard_vals damage_result['liqdemandtypes'] = demand_types_liq damage_result['liqdemandunits'] = demand_units_liq damage_result['liqhazvals'] = liq_hazard_vals damage_result['liqprobability'] = liquefaction_prob ds_results.append(ds_result) damage_results.append(damage_result) for road in unmapped_roads: ds_result = dict() damage_result = dict() ds_result['guid'] = road['properties']['guid'] damage_result['guid'] = road['properties']['guid'] damage_result['fragility_id'] = None damage_result['demandtypes'] = None damage_result['demandunits'] = None damage_result['hazardtype'] = None damage_result['hazardvals'] = None damage_result['liqdemandtypes'] = None damage_result['liqdemandunits'] = None damage_result['liqhazvals'] = None damage_result['liqprobability'] = None ds_results.append(ds_result) damage_results.append(damage_result) return ds_results, damage_results def get_spec(self): """Get specifications of the road damage analysis. Returns: obj: A JSON object of specifications of the road damage analysis. """ return { 'name': 'road-damage', 'description': 'road damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key', 'required': False, 'description': 'Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liquefaction_geology_dataset_id', 'required': False, 'description': 'Liquefaction geology/susceptibility dataset id. ' 'If not provided, liquefaction will be ignored', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [ { 'id': 'roads', 'required': True, 'description': 'Road Inventory', 'type': ['ergo:roadLinkTopo', 'incore:roads', 'ergo:roadLinkTopoVer2'] }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], } ], 'output_datasets': [ { 'id': 'result', 'parent_type': 'roads', 'description': 'CSV file of road structural damage', 'type': 'ergo:roadDamageVer3' }, { 'id': 'metadata', 'parent_type': 'roads', 'description': 'additional metadata in json file about applied hazard value and ' 'fragility', 'type': 'incore:roadDamageSupplement' } ] }
class NonStructBuildingDamage(BaseAnalysis): """Computes non-structural structural building damage for an earthquake hazard. Args: incore_client (IncoreClient): Service authentication. """ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) super(NonStructBuildingDamage, self).__init__(incore_client) def run(self): """Executes building damage analysis.""" # Building dataset building_set = self.get_input_dataset( "buildings").get_inventory_reader() # set Default Fragility key fragility_key_as = self.get_parameter("fragility_key_as") if fragility_key_as is None: self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) fragility_key_ds = self.get_parameter("fragility_key_ds") if fragility_key_ds is None: self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") if use_hazard_uncertainty is None: self.set_parameter("use_hazard_uncertainty", False) # Set Default Liquefaction use_liquefaction = self.get_parameter("use_liquefaction") if use_liquefaction is None: self.set_parameter("use_liquefaction", False) user_defined_cpu = 1 if not self.get_parameter("num_cpu") is None and self.get_parameter( "num_cpu") > 0: user_defined_cpu = self.get_parameter("num_cpu") num_workers = AnalysisUtil.determine_parallelism_locally( self, len(building_set), user_defined_cpu) avg_bulk_input_size = int(len(building_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(building_set) while count < len(inventory_list): inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.building_damage_concurrent_future( self.building_damage_analysis_bulk_input, num_workers, inventory_args) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) self.set_result_json_data("damage_result", damage_results, name=self.get_parameter("result_name") + "_additional_info") return True def building_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: function_name (function): The function to be parallelized. num_workers (int): Maximum number workers in parallelization. *args: All the arguments in order to pass into parameter function_name. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg def building_damage_analysis_bulk_input(self, buildings): """Run analysis for multiple buildings. Args: buildings (list): Multiple buildings from input inventory set. Returns: dict: An ordered dictionary with building damage values. dict: An ordered dictionary with building data/metadata. """ # read static parameters from object self hazard_type = self.get_parameter("hazard_type") hazard_dataset_id = self.get_parameter("hazard_id") liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") use_liquefaction = self.get_parameter("use_liquefaction") use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") building_results = [] damage_results = [] fragility_sets_as = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_as")) fragility_sets_ds = self.fragilitysvc.match_inventory( self.get_input_dataset("dfr3_mapping_set"), buildings, self.get_parameter("fragility_key_ds")) values_payload_as = [] values_payload_ds = [] values_payload_liq = [] mapped_buildings = [] unmapped_buildings = [] for building in buildings: if building["id"] in fragility_sets_as and building[ "id"] in fragility_sets_ds: fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] location = GeoUtil.get_location(building) loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive demands_as = AnalysisUtil.get_hazard_demand_types( building, fragility_set_as, hazard_type) units_as = fragility_set_as.demand_units value_as = { "demands": demands_as, "units": units_as, "loc": loc } values_payload_as.append(value_as) # Drift-Sensitive demands_ds = AnalysisUtil.get_hazard_demand_types( building, fragility_set_ds, hazard_type) units_ds = fragility_set_ds.demand_units value_ds = { "demands": demands_ds, "units": units_ds, "loc": loc } values_payload_ds.append(value_ds) # liquefaction if use_liquefaction: value_liq = { "demands": ["pgd"], # implied... "units": ["in"], "loc": loc } values_payload_liq.append(value_liq) mapped_buildings.append(building) else: unmapped_buildings.append(building) del buildings # get hazard values and liquefaction if hazard_type == 'earthquake': hazard_resp_as = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_as) hazard_resp_ds = self.hazardsvc.post_earthquake_hazard_values( hazard_dataset_id, values_payload_ds) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_resp = self.hazardsvc.post_liquefaction_values( hazard_dataset_id, liq_geology_dataset_id, values_payload_liq) else: raise ValueError( 'Hazard does not support liquefaction! Check to make sure you defined the ' 'liquefaction portion of your scenario earthquake.') else: raise ValueError( "The provided hazard type is not supported yet by this analysis" ) # calculate LS and DS for i, building in enumerate(mapped_buildings): dmg_probability_as = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_as = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } dmg_probability_ds = {"LS_0": None, "LS_1": None, "LS_2": None} dmg_interval_ds = { "DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None } fragility_set_as = fragility_sets_as[building["id"]] fragility_set_ds = fragility_sets_ds[building["id"]] # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: raise ValueError('Uncertainty has not yet been implemented!') ############### # AS if isinstance(fragility_set_as.fragility_curves[0], DFR3Curve): hazard_vals_as = AnalysisUtil.update_precision_of_lists( hazard_resp_as[i]["hazardValues"]) demand_types_as = hazard_resp_as[i]["demands"] demand_units_as = hazard_resp_as[i]["units"] hval_dict_as = dict() for j, d in enumerate(fragility_set_as.demand_types): hval_dict_as[d] = hazard_vals_as[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_as[i]["hazardValues"]): building_args = fragility_set_as.construct_expression_args_from_inventory( building) dmg_probability_as = fragility_set_as. \ calculate_limit_state(hval_dict_as, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_as = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_as, liquefaction_dmg)) dmg_interval_as = fragility_set_ds.calculate_damage_interval( dmg_probability_as, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") ############### # DS if isinstance(fragility_set_ds.fragility_curves[0], DFR3Curve): hazard_vals_ds = AnalysisUtil.update_precision_of_lists( hazard_resp_ds[i]["hazardValues"]) demand_types_ds = hazard_resp_ds[i]["demands"] demand_units_ds = hazard_resp_ds[i]["units"] hval_dict_ds = dict() for j, d in enumerate(fragility_set_ds.demand_types): hval_dict_ds[d] = hazard_vals_ds[j] if not AnalysisUtil.do_hazard_values_have_errors( hazard_resp_ds[i]["hazardValues"]): building_args = fragility_set_ds.construct_expression_args_from_inventory( building) dmg_probability_ds = fragility_set_ds. \ calculate_limit_state(hval_dict_ds, inventory_type="building", **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: liquefaction_dmg = AnalysisUtil.update_precision_of_lists( liquefaction_resp[i]["groundFailureProb"]) dmg_probability_ds = AnalysisUtil.update_precision_of_dicts( NonStructBuildingUtil. adjust_damage_for_liquefaction( dmg_probability_ds, liquefaction_dmg)) dmg_interval_ds = fragility_set_ds.calculate_damage_interval( dmg_probability_ds, hazard_type=hazard_type, inventory_type="building") else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # put results in dictionary # AS denotes acceleration-sensitive fragility assigned to the building. # DS denotes drift-sensitive fragility assigned to the building. building_result = dict() building_result['guid'] = building['properties']['guid'] building_result['AS_LS_0'] = dmg_probability_as['LS_0'] building_result['AS_LS_1'] = dmg_probability_as['LS_1'] building_result['AS_LS_2'] = dmg_probability_as['LS_2'] building_result['AS_DS_0'] = dmg_interval_as['DS_0'] building_result['AS_DS_1'] = dmg_interval_as['DS_1'] building_result['AS_DS_2'] = dmg_interval_as['DS_2'] building_result['AS_DS_3'] = dmg_interval_as['DS_3'] building_result['DS_LS_0'] = dmg_probability_ds['LS_0'] building_result['DS_LS_1'] = dmg_probability_ds['LS_1'] building_result['DS_LS_2'] = dmg_probability_ds['LS_2'] building_result['DS_DS_0'] = dmg_interval_ds['DS_0'] building_result['DS_DS_1'] = dmg_interval_ds['DS_1'] building_result['DS_DS_2'] = dmg_interval_ds['DS_2'] building_result['DS_DS_3'] = dmg_interval_ds['DS_3'] building_result[ 'hazard_exposure_as'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_as, hazard_type) building_result[ 'hazard_exposure_ds'] = AnalysisUtil.get_exposure_from_hazard_values( hazard_vals_ds, hazard_type) # put damage results in dictionary damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = fragility_set_as.id damage_result['demandtypes_as'] = demand_types_as damage_result['demandunits_as'] = demand_units_as damage_result['fragility_id_ds'] = fragility_set_ds.id damage_result['demandtypes_ds'] = demand_types_ds damage_result['demandunits_ds'] = demand_units_ds damage_result['hazardtype'] = hazard_type damage_result['hazardvals_as'] = hazard_vals_as damage_result['hazardvals_ds'] = hazard_vals_ds building_results.append(building_result) damage_results.append(damage_result) for building in unmapped_buildings: building_result = dict() building_result['guid'] = building['properties']['guid'] damage_result = dict() damage_result['guid'] = building['properties']['guid'] damage_result['fragility_id_as'] = None damage_result['demandtypes_as'] = None damage_result['demandunits_as'] = None damage_result['fragility_id_ds'] = None damage_result['demandtypes_ds'] = None damage_result['demandunits_ds'] = None damage_result['hazardtype'] = None damage_result['hazardvals_as'] = None damage_result['hazardvals_ds'] = None building_results.append(building_result) damage_results.append(damage_result) return building_results, damage_results def get_spec(self): """Get specifications of the building damage analysis. Returns: obj: A JSON object of specifications of the building damage analysis. """ return { 'name': 'building-damage', 'description': 'building damage analysis', 'input_parameters': [ { 'id': 'result_name', 'required': True, 'description': 'result dataset name', 'type': str }, { 'id': 'hazard_type', 'required': True, 'description': 'Hazard Type (e.g. earthquake)', 'type': str }, { 'id': 'hazard_id', 'required': True, 'description': 'Hazard ID', 'type': str }, { 'id': 'fragility_key_as', 'required': False, 'description': 'Acceleration-Sensitive Fragility key to use in mapping dataset', 'type': str }, { 'id': 'fragility_key_ds', 'required': False, 'description': 'Drift-Sensitive Fragility key to use in mapping dataset', 'type': str }, { 'id': 'use_liquefaction', 'required': False, 'description': 'Use liquefaction', 'type': bool }, { 'id': 'liq_geology_dataset_id', 'required': False, 'description': 'liquefaction geology dataset id, \ if use liquefaction, you have to provide this id', 'type': str }, { 'id': 'use_hazard_uncertainty', 'required': False, 'description': 'Use hazard uncertainty', 'type': bool }, { 'id': 'num_cpu', 'required': False, 'description': 'If using parallel execution, the number of cpus to request', 'type': int }, ], 'input_datasets': [{ 'id': 'buildings', 'required': True, 'description': 'building Inventory', 'type': ['ergo:buildingInventoryVer4'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], }], 'output_datasets': [{ 'id': 'result', 'parent_type': 'buildings', 'description': 'CSV file of damage states for building non-structural damage', 'type': 'ergo:nsBuildingInventoryDamageVer3' }, { 'id': 'damage_result', 'parent_type': 'buildings', 'description': 'Json file with information about applied hazard value and fragility', 'type': 'incore:nsBuildingInventoryDamageSupplement' }] }