Ejemplo n.º 1
0
class WaterFacilityDamage(BaseAnalysis):
    """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure.

    """

    DEFAULT_EQ_FRAGILITY_KEY = "pga"
    DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code"
    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"

    def __init__(self, incore_client):
        # Create Hazard and Fragility service
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(WaterFacilityDamage, self).__init__(incore_client)

    def run(self):
        """Performs Water facility damage analysis by using the parameters from the spec
        and creates an output dataset in csv format

        Returns:
            bool: True if successful, False otherwise
        """
        # Facility dataset
        inventory_set = self.get_input_dataset(
            "water_facilities").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(inventory_set), user_defined_cpu)

        avg_bulk_input_size = int(len(inventory_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(inventory_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results,
         damage_results) = self.waterfacility_damage_concurrent_futures(
             self.waterfacilityset_damage_analysis_bulk_input, num_workers,
             inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def waterfacility_damage_concurrent_futures(self, function_name,
                                                parallel_processes, *args):
        """Utilizes concurrent.future module.

            Args:
                function_name (function): The function to be parallelized.
                parallel_processes (int): Number of workers in parallelization.
                *args: All the arguments in order to pass into parameter function_name.

            Returns:
                list: A list of ordered dictionaries with water facility damage values
                list: A list of ordered dictionaries with other water facility data/metadata


        """
        output_ds = []
        output_dmg = []

        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallel_processes) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def waterfacilityset_damage_analysis_bulk_input(self, facilities,
                                                    hazard_type,
                                                    hazard_dataset_id):
        """Gets applicable fragilities and calculates damage

        Args:
            facilities (list): Multiple water facilities from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with water facility damage values
            list: A list of ordered dictionaries with other water facility data/metadata
        """

        # Liquefaction related variables
        use_liquefaction = False
        liquefaction_available = False
        fragility_sets_liq = None
        liquefaction_resp = None
        geology_dataset_id = None
        liq_hazard_vals = None
        liq_demand_types = None
        liq_demand_units = None
        liquefaction_prob = None
        loc = None

        # Obtain the fragility key
        fragility_key = self.get_parameter("fragility_key")

        if fragility_key is None:
            if hazard_type == 'tsunami':
                fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY
            elif hazard_type == 'earthquake':
                fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY
            else:
                raise ValueError(
                    "Hazard type other than Earthquake and Tsunami are not currently supported."
                )

            self.set_parameter("fragility_key", fragility_key)

        # Obtain the fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), facilities,
            fragility_key)

        # Obtain the liquefaction fragility Key
        liquefaction_fragility_key = self.get_parameter(
            "liquefaction_fragility_key")

        if hazard_type == "earthquake":
            if self.get_parameter("use_liquefaction") is True:
                if liquefaction_fragility_key is None:
                    liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY

                use_liquefaction = self.get_parameter("use_liquefaction")

                # Obtain the geology dataset
                geology_dataset_id = self.get_parameter(
                    "liquefaction_geology_dataset_id")

                if geology_dataset_id is not None:
                    fragility_sets_liq = self.fragilitysvc.match_inventory(
                        self.get_input_dataset("dfr3_mapping_set"), facilities,
                        liquefaction_fragility_key)

                    if fragility_sets_liq is not None:
                        liquefaction_available = True

        # Determine whether to use hazard uncertainty
        uncertainty = self.get_parameter("use_hazard_uncertainty")

        # Setup fragility translation structures
        values_payload = []
        values_payload_liq = []
        unmapped_waterfacilities = []
        mapped_waterfacilities = []

        for facility in facilities:
            if facility["id"] in fragility_sets.keys():
                # Fill in generic details
                fragility_set = fragility_sets[facility["id"]]
                location = GeoUtil.get_location(facility)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                units = fragility_set.demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_waterfacilities.append(facility)

                # Fill in liquefaction parameters
                if liquefaction_available and facility[
                        "id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[facility["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_waterfacilities.append(facility)

        del facilities

        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        # Check if liquefaction is applicable
        if liquefaction_available:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        # Calculate LS and DS
        facility_results = []
        damage_results = []

        for i, facility in enumerate(mapped_waterfacilities):
            fragility_set = fragility_sets[facility["id"]]
            limit_states = dict()
            dmg_intervals = dict()

            # Setup conditions for the analysis
            hazard_std_dev = 0

            if uncertainty:
                hazard_std_dev = random.random()

            if isinstance(fragility_set.fragility_curves[0], DFR3Curve):
                hazard_vals = AnalysisUtil.update_precision_of_lists(
                    hazard_resp[i]["hazardValues"])
                demand_types = hazard_resp[i]["demands"]
                demand_units = hazard_resp[i]["units"]

                hval_dict = dict()

                for j, d in enumerate(fragility_set.demand_types):
                    hval_dict[d] = hazard_vals[j]

                if not AnalysisUtil.do_hazard_values_have_errors(
                        hazard_resp[i]["hazardValues"]):
                    facility_args = fragility_set.construct_expression_args_from_inventory(
                        facility)
                    limit_states = \
                        fragility_set.calculate_limit_state(hval_dict,
                                                            std_dev=hazard_std_dev,
                                                            inventory_type='water_facility',
                                                            **facility_args)
                    # Evaluate liquefaction: if it is not none, then liquefaction is available
                    if liquefaction_resp is not None:
                        fragility_set_liq = fragility_sets_liq[facility["id"]]

                        if isinstance(fragility_set_liq.fragility_curves[0],
                                      DFR3Curve):
                            liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                                liquefaction_resp[i]["pgdValues"])
                            liq_demand_types = liquefaction_resp[i]["demands"]
                            liq_demand_units = liquefaction_resp[i]["units"]
                            liquefaction_prob = liquefaction_resp[i][
                                'liqProbability']

                            hval_dict_liq = dict()

                            for j, d in enumerate(
                                    fragility_set_liq.demand_types):
                                hval_dict_liq[d] = liq_hazard_vals[j]

                            facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(
                                facility)
                            pgd_limit_states = \
                                fragility_set_liq.calculate_limit_state(
                                    hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility",
                                    **facility_liq_args)
                        else:
                            raise ValueError(
                                "One of the fragilities is in deprecated format. "
                                "This should not happen If you are seeing this please report the issue."
                            )

                        limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                            limit_states, pgd_limit_states)

                    dmg_intervals = fragility_set.calculate_damage_interval(
                        limit_states,
                        hazard_type=hazard_type,
                        inventory_type='water_facility')
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            # TODO: ideally, this goes into a single variable declaration section

            facility_result = {
                'guid': facility['properties']['guid'],
                **limit_states,
                **dmg_intervals
            }
            facility_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_vals, hazard_type)
            damage_result = dict()
            damage_result['guid'] = facility['properties']['guid']
            damage_result['fragility_id'] = fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardvals'] = hazard_vals

            if use_liquefaction and fragility_sets_liq and geology_dataset_id:
                damage_result['liq_fragility_id'] = fragility_sets_liq[
                    facility["id"]].id
                damage_result['liqdemandtypes'] = liq_demand_types
                damage_result['liqdemandunits'] = liq_demand_units
                damage_result['liqhazval'] = liq_hazard_vals
                damage_result['liqprobability'] = liquefaction_prob
            else:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            facility_results.append(facility_result)
            damage_results.append(damage_result)

        for facility in unmapped_waterfacilities:
            facility_result = dict()
            damage_result = dict()
            facility_result['guid'] = facility['properties']['guid']
            damage_result['guid'] = facility['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardvals'] = None
            damage_result['liq_fragility_id'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazval'] = None
            damage_result['liqprobability'] = None

            facility_results.append(facility_result)
            damage_results.append(damage_result)

        return facility_results, damage_results

    def get_spec(self):
        return {
            'name':
            'water-facility-damage',
            'description':
            'water facility damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': False,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'liquefaction_fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in liquefaction mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'water_facilities',
                'required': True,
                'description': 'Water Facility Inventory',
                'type': ['ergo:waterFacilityTopo'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id':
                'result',
                'parent_type':
                'water_facilities',
                'description':
                'A csv file with limit state probabilities and damage states '
                'for each water facility',
                'type':
                'ergo:waterFacilityDamageVer6'
            }, {
                'id':
                'metadata',
                'parent_type':
                'water_facilities',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:waterFacilityDamageSupplement'
            }]
        }
Ejemplo n.º 2
0
class PipelineDamageRepairRate(BaseAnalysis):
    """Computes pipeline damage for a hazard.

    Args:
        incore_client: Service client with authentication info

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(PipelineDamageRepairRate, self).__init__(incore_client)

    def run(self):
        """Execute pipeline damage analysis """
        # Pipeline dataset
        pipeline_dataset = self.get_input_dataset(
            "pipeline").get_inventory_reader()

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(pipeline_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(pipeline_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.pipeline_damage_concurrent_future(
            self.pipeline_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def pipeline_damage_concurrent_future(self, function_name, num_workers,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type,
                                            hazard_dataset_id):
        """Run pipeline damage analysis for multiple pipelines.

        Args:
            pipelines (list): multiple pipelines from pieline dataset.
            hazard_type (str): Hazard type
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata.
            damage_results (list): A list of ordered dictionaries with pipeline damage metadata.
        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # get fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), pipelines,
            fragility_key)

        # Get Liquefaction Fragility Key
        liquefaction_fragility_key = self.get_parameter(
            "liquefaction_fragility_key")
        if hazard_type == "earthquake" and liquefaction_fragility_key is None:
            liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset id
        geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")
        fragility_sets_liq = None
        if geology_dataset_id is not None:
            fragility_sets_liq = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), pipelines,
                liquefaction_fragility_key)

        values_payload = []
        values_payload_liq = []  # for liquefaction if used
        unmapped_pipelines = []
        mapped_pipelines = []
        for pipeline in pipelines:
            # if find a match fragility for that pipeline
            if pipeline["id"] in fragility_sets.keys():
                fragility_set = fragility_sets[pipeline["id"]]
                location = GeoUtil.get_location(pipeline)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                units = fragility_set.demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_pipelines.append(pipeline)

                # Check if liquefaction is applicable
                if use_liquefaction and \
                        geology_dataset_id is not None and \
                        fragility_sets_liq is not None and \
                        pipeline["id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[pipeline["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_pipelines.append(pipeline)
        del pipelines

        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        # Check if liquefaction is applicable
        if use_liquefaction is True and \
                fragility_sets_liq is not None and \
                geology_dataset_id is not None:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        # calculate LS and DS
        ds_results = []
        damage_results = []
        for i, pipeline in enumerate(mapped_pipelines):
            # default
            pgv_repairs = None
            pgd_repairs = 0.0
            total_repair_rate = None
            break_rate = None
            leak_rate = None
            failure_probability = None
            num_pgv_repairs = None
            num_pgd_repairs = 0.0
            num_repairs = None

            liq_hazard_vals = None
            liq_demand_types = None
            liq_demand_units = None
            liquefaction_prob = None

            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = pipeline['properties']['guid']
            damage_result['guid'] = pipeline['properties']['guid']

            fragility_set = fragility_sets[pipeline["id"]]
            # TODO assume there is only one curve
            fragility_curve = fragility_set.fragility_curves[0]

            hazard_vals = AnalysisUtil.update_precision_of_lists(
                hazard_resp[i]["hazardValues"])
            demand_types = hazard_resp[i]["demands"]
            demand_units = hazard_resp[i]["units"]

            hval_dict = dict()
            for j, d in enumerate(fragility_set.demand_types):
                hval_dict[d] = hazard_vals[j]

            if not AnalysisUtil.do_hazard_values_have_errors(
                    hazard_resp[i]["hazardValues"]):
                pipeline_args = fragility_set.construct_expression_args_from_inventory(
                    pipeline)
                pgv_repairs = \
                    fragility_curve.solve_curve_expression(
                        hval_dict, fragility_set.curve_parameters, **pipeline_args)
                # Convert PGV repairs to SI units
                pgv_repairs = PipelineUtil.convert_result_unit(
                    fragility_curve.return_type["unit"], pgv_repairs)

                length = PipelineUtil.get_pipe_length(pipeline)

                # Number of PGV repairs
                num_pgv_repairs = pgv_repairs * length

                # Check if liquefaction is applicable
                if use_liquefaction is True \
                        and fragility_sets_liq is not None \
                        and geology_dataset_id is not None \
                        and liquefaction_resp is not None:
                    fragility_set_liq = fragility_sets_liq[pipeline["id"]]

                    # TODO assume there is only one curve
                    liq_fragility_curve = fragility_set_liq.fragility_curves[0]

                    liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                        liquefaction_resp[i]["pgdValues"])
                    liq_demand_types = liquefaction_resp[i]["demands"]
                    liq_demand_units = liquefaction_resp[i]["units"]
                    liquefaction_prob = liquefaction_resp[i]['liqProbability']
                    liq_hval_dict = dict()
                    for j, d in enumerate(liquefaction_resp[i]["demands"]):
                        liq_hval_dict[d] = liq_hazard_vals[j]

                    # !important! removing the liqProbability and passing in the "diameter"
                    # no fragility is actually using liqProbability
                    pipeline_args = fragility_set_liq.construct_expression_args_from_inventory(
                        pipeline)
                    pgd_repairs = \
                        liq_fragility_curve.solve_curve_expression(
                            liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args)
                    # Convert PGD repairs to SI units
                    pgd_repairs = PipelineUtil.convert_result_unit(
                        liq_fragility_curve.return_type["unit"], pgd_repairs)
                    num_pgd_repairs = pgd_repairs * length

                    # record results
                    if 'pipetype' in pipeline['properties']:
                        damage_result['pipeclass'] = pipeline['properties'][
                            'pipetype']
                    elif 'pipelinesc' in pipeline['properties']:
                        damage_result['pipeclass'] = pipeline['properties'][
                            'pipelinesc']
                    else:
                        damage_result['pipeclass'] = ""

                break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs
                leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs
                total_repair_rate = pgd_repairs + pgv_repairs
                failure_probability = 1 - math.exp(-1.0 * break_rate * length)
                num_repairs = num_pgd_repairs + num_pgv_repairs

            ds_result['pgvrepairs'] = pgv_repairs
            ds_result['pgdrepairs'] = pgd_repairs
            ds_result['repairspkm'] = total_repair_rate
            ds_result['breakrate'] = break_rate
            ds_result['leakrate'] = leak_rate
            ds_result['failprob'] = failure_probability
            ds_result['numpgvrpr'] = num_pgv_repairs
            ds_result['numpgdrpr'] = num_pgd_repairs
            ds_result['numrepairs'] = num_repairs
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_vals, hazard_type)

            damage_result['fragility_id'] = fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardval'] = hazard_vals

            # Check if liquefaction is applicable
            if use_liquefaction is True \
                    and fragility_sets_liq is not None \
                    and geology_dataset_id is not None:
                damage_result['liq_fragility_id'] = fragility_sets_liq[
                    pipeline["id"]].id
                damage_result['liqdemandtypes'] = liq_demand_types
                damage_result['liqdemandunits'] = liq_demand_units
                damage_result['liqhazval'] = liq_hazard_vals
                damage_result['liqprobability'] = liquefaction_prob
            else:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        # pipelines do not have matched mappings
        for pipeline in unmapped_pipelines:
            ds_result = dict()
            ds_result['guid'] = pipeline['properties']['guid']

            damage_result = dict()
            damage_result['guid'] = pipeline['properties']['guid']
            if 'pipetype' in pipeline['properties']:
                damage_result['pipeclass'] = pipeline['properties']['pipetype']
            elif 'pipelinesc' in pipeline['properties']:
                damage_result['pipeclass'] = pipeline['properties'][
                    'pipelinesc']
            else:
                damage_result['pipeclass'] = ""

            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardval'] = None
            damage_result['liq_fragility_id'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazval'] = None
            damage_result['liqhazval'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the pipeline damage analysis.

        Returns:
            obj: A JSON object of specifications of the pipeline damage analysis.

        """
        return {
            'name':
            'pipeline-damage',
            'description':
            'buried pipeline damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'result dataset name',
                'type': str
            }, {
                'id': 'hazard_type',
                'required': True,
                'description': 'Hazard Type (e.g. earthquake)',
                'type': str
            }, {
                'id': 'hazard_id',
                'required': True,
                'description': 'Hazard ID',
                'type': str
            }, {
                'id': 'fragility_key',
                'required': False,
                'description': 'Fragility key to use in mapping dataset',
                'type': str
            }, {
                'id': 'use_liquefaction',
                'required': False,
                'description': 'Use liquefaction',
                'type': bool
            }, {
                'id': 'liquefaction_fragility_key',
                'required': False,
                'description':
                'Fragility key to use in liquefaction mapping dataset',
                'type': str
            }, {
                'id': 'num_cpu',
                'required': False,
                'description':
                'If using parallel execution, the number of cpus to request',
                'type': int
            }, {
                'id': 'liquefaction_geology_dataset_id',
                'required': False,
                'description': 'Geology dataset id',
                'type': str,
            }],
            'input_datasets': [{
                'id':
                'pipeline',
                'required':
                True,
                'description':
                'Pipeline Inventory',
                'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'pipeline',
                'type': 'ergo:pipelineDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'pipeline',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:pipelineDamageSupplement'
            }]
        }
Ejemplo n.º 3
0
class RoadDamage(BaseAnalysis):
    """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(RoadDamage, self).__init__(incore_client)

    def run(self):
        """Executes road damage analysis."""
        # Road dataset
        road_set = self.get_input_dataset("roads").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset for liquefaction
        geology_dataset_id = None
        if self.get_parameter("liquefaction_geology_dataset_id") is not None:
            geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")

        user_defined_cpu = 1
        if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu)

        avg_bulk_input_size = int(len(road_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input,
                                                                          num_workers,
                                                                          inventory_args,
                                                                          repeat(hazard_type),
                                                                          repeat(hazard_dataset_id),
                                                                          repeat(use_hazard_uncertainty),
                                                                          repeat(geology_dataset_id),
                                                                          repeat(fragility_key),
                                                                          repeat(use_liquefaction))

        self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True

    def road_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            output_ds: A list of ordered dictionaries with road damage values
            output_dmg: A list of ordered dictionaries with other road data/metadata.

        """

        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty,
                                        geology_dataset_id, fragility_key, use_liquefaction):
        """Run analysis for multiple roads.

        Args:
            roads (list): Multiple roads from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami).
            hazard_dataset_id (str): An id of the hazard exposure.
            use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not
            geology_dataset_id (str): An id of the geology for use in liquefaction.
            fragility_key (str): Fragility key describing the type of fragility.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.

        Returns:
            list: A list of ordered dictionaries with road damage values and other data/metadata.
            list: A list of ordered dictionaries with other road data/metadata.

        """
        fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads,
                                                           fragility_key)

        values_payload = []
        mapped_roads = []
        unmapped_roads = []
        pgd_flag = True  # for liquefaction
        liquefaction_resp = None

        for road in roads:
            if road["id"] in fragility_sets.keys():
                fragility_set = fragility_sets[road["id"]]
                location = GeoUtil.get_location(road)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                # for liquefaction
                if any(demand.lower() != 'pgd' for demand in demands):
                    pgd_flag = False
                units = fragility_set.demand_units
                value = {
                    "demands": demands,
                    "units": units,
                    "loc": loc
                }
                values_payload.append(value)
                mapped_roads.append(road)
            else:
                unmapped_roads.append(road)
        del roads

        # get hazard and liquefaction values
        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload)

            if pgd_flag and use_liquefaction and geology_dataset_id is not None:
                liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id,
                                                                            values_payload)

        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload)
        else:
            raise ValueError("The provided hazard type is not supported yet by this analysis")

        # calculate LS and DS
        ds_results = []
        damage_results = []
        for i, road in enumerate(mapped_roads):
            dmg_probability = dict()
            dmg_interval = dict()
            demand_types_liq = None
            demand_units_liq = None
            liq_hazard_vals = None
            liquefaction_prob = None
            selected_fragility_set = fragility_sets[road["id"]]
            hazard_std_dev = 0.0
            if use_hazard_uncertainty:
                raise ValueError("Uncertainty Not Implemented Yet.")

            if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve):
                hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"])
                demand_types = hazard_resp[i]["demands"]
                demand_units = hazard_resp[i]["units"]
                hval_dict = dict()
                for j, d in enumerate(selected_fragility_set.demand_types):
                    hval_dict[d] = hazard_vals[j]

                if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]):
                    road_args = selected_fragility_set.construct_expression_args_from_inventory(road)
                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hval_dict, inventory_type='road', **road_args)

                    # if there is liquefaction, overwrite the hazardval with liquefaction value
                    # recalculate dmg_probability and dmg_interval
                    if liquefaction_resp is not None and len(liquefaction_resp) > 0:
                        liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"])
                        demand_types_liq = liquefaction_resp[i]['demands']
                        demand_units_liq = liquefaction_resp[i]['units']
                        liquefaction_prob = liquefaction_resp[i]['liqProbability']
                        liq_hval_dict = dict()
                        for j, d in enumerate(liquefaction_resp[i]["demands"]):
                            liq_hval_dict[d] = liq_hazard_vals[j]
                        dmg_probability = selected_fragility_set.calculate_limit_state(
                            liq_hval_dict,
                            inventory_type='road',
                            **road_args)

                    dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability,
                                                                                    hazard_type=hazard_type,
                                                                                    inventory_type="road")
            else:
                raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are "
                                 "seeing this please report the issue.")

            ds_result = dict()
            ds_result['guid'] = road['properties']['guid']
            ds_result.update(dmg_probability)
            ds_result.update(dmg_interval)
            ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type)

            damage_result = dict()
            damage_result['guid'] = road['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardvals'] = hazard_vals
            damage_result['liqdemandtypes'] = demand_types_liq
            damage_result['liqdemandunits'] = demand_units_liq
            damage_result['liqhazvals'] = liq_hazard_vals
            damage_result['liqprobability'] = liquefaction_prob

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        for road in unmapped_roads:
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = road['properties']['guid']

            damage_result['guid'] = road['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardvals'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazvals'] = None
            damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the road damage analysis.

        Returns:
            obj: A JSON object of specifications of the road damage analysis.

        """

        return {
            'name': 'road-damage',
            'description': 'road damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'liquefaction_geology_dataset_id',
                    'required': False,
                    'description': 'Liquefaction geology/susceptibility dataset id. '
                                   'If not provided, liquefaction will be ignored',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'roads',
                    'required': True,
                    'description': 'Road Inventory',
                    'type': ['ergo:roadLinkTopo', 'incore:roads', 'ergo:roadLinkTopoVer2']
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'roads',
                    'description': 'CSV file of road structural damage',
                    'type': 'ergo:roadDamageVer3'
                },
                {
                    'id': 'metadata',
                    'parent_type': 'roads',
                    'description': 'additional metadata in json file about applied hazard value and '
                                   'fragility',
                    'type': 'incore:roadDamageSupplement'
                }
            ]
        }
Ejemplo n.º 4
0
class BridgeDamage(BaseAnalysis):
    """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BridgeDamage, self).__init__(incore_client)

    def run(self):
        """Executes bridge damage analysis."""
        # Bridge dataset
        bridge_set = self.get_input_dataset("bridges").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(bridge_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bridge_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bridge_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.bridge_damage_concurrent_future(
            self.bridge_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def bridge_damage_concurrent_future(self, function_name, num_workers,
                                        *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def bridge_damage_analysis_bulk_input(self, bridges, hazard_type,
                                          hazard_dataset_id):
        """Run analysis for multiple bridges.

        Args:
            bridges (list): Multiple bridges from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BridgeUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key)

        values_payload = []
        unmapped_bridges = []
        mapped_bridges = []
        for b in bridges:
            bridge_id = b["id"]
            if bridge_id in fragility_set:
                location = GeoUtil.get_location(b)
                loc = str(location.y) + "," + str(location.x)

                demands = fragility_set[bridge_id].demand_types
                units = fragility_set[bridge_id].demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_bridges.append(b)

            else:
                unmapped_bridges.append(b)

        # not needed anymore as they are already split into mapped and unmapped
        del bridges

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_vals = self.hazardsvc.post_hurricane_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'flood':
            hazard_vals = self.hazardsvc.post_flood_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        ds_results = []
        damage_results = []

        i = 0
        for bridge in mapped_bridges:
            ds_result = dict()
            damage_result = dict()
            dmg_probability = dict()
            dmg_intervals = dict()
            selected_fragility_set = fragility_set[bridge["id"]]

            if isinstance(selected_fragility_set.fragility_curves[0],
                          DFR3Curve):
                # Supports multiple demand types in same fragility
                hazard_val = AnalysisUtil.update_precision_of_lists(
                    hazard_vals[i]["hazardValues"])
                input_demand_types = hazard_vals[i]["demands"]
                input_demand_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = hazard_val[j]
                    j += 1

                if not AnalysisUtil.do_hazard_values_have_errors(
                        hazard_vals[i]["hazardValues"]):
                    bridge_args = selected_fragility_set.construct_expression_args_from_inventory(
                        bridge)
                    dmg_probability = \
                        selected_fragility_set.calculate_limit_state(hval_dict,
                                                                     inventory_type="bridge",
                                                                     **bridge_args)
                    dmg_intervals = selected_fragility_set.calculate_damage_interval(
                        dmg_probability,
                        hazard_type=hazard_type,
                        inventory_type="bridge")
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key)
            retrofit_type = BridgeUtil.get_retrofit_type(fragility_key)

            ds_result['guid'] = bridge['properties']['guid']
            ds_result.update(dmg_probability)
            ds_result.update(dmg_intervals)
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_val, hazard_type)

            damage_result['guid'] = bridge['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result["retrofit"] = retrofit_type
            damage_result["retrocost"] = retrofit_cost
            damage_result["demandtypes"] = input_demand_types
            damage_result["demandunits"] = input_demand_units
            damage_result["hazardtype"] = hazard_type
            damage_result["hazardval"] = hazard_val

            # add spans to bridge output so mean damage calculation can use that info
            if "spans" in bridge["properties"] and bridge["properties"][
                    "spans"] is not None:
                if isinstance(bridge["properties"]["spans"],
                              str) and bridge["properties"]["spans"].isdigit():
                    damage_result['spans'] = int(bridge["properties"]["spans"])
                elif isinstance(bridge["properties"]["spans"], int):
                    damage_result['spans'] = bridge["properties"]["spans"]
            elif "SPANS" in bridge["properties"] and bridge["properties"][
                    "SPANS"] is not None:
                if isinstance(bridge["properties"]["SPANS"],
                              str) and bridge["properties"]["SPANS"].isdigit():
                    damage_result['SPANS'] = int(bridge["properties"]["SPANS"])
                elif isinstance(bridge["properties"]["SPANS"], int):
                    damage_result['SPANS'] = bridge["properties"]["SPANS"]
            else:
                damage_result['spans'] = 1

            ds_results.append(ds_result)
            damage_results.append(damage_result)
            i += 1

        for bridge in unmapped_bridges:
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = bridge['properties']['guid']

            damage_result['guid'] = bridge['properties']['guid']
            damage_result["retrofit"] = None
            damage_result["retrocost"] = None
            damage_result["demandtypes"] = None
            damage_result['demandunits'] = None
            damage_result["hazardtype"] = None
            damage_result['hazardval'] = None
            damage_result['spans'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the bridge damage analysis.

        Returns:
            obj: A JSON object of specifications of the bridge damage analysis.

        """
        return {
            'name':
            'bridge-damage',
            'description':
            'bridge damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id':
                'bridges',
                'required':
                True,
                'description':
                'Bridge Inventory',
                'type':
                ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'bridges',
                'description': 'CSV file of bridge structural damage',
                'type': 'ergo:bridgeDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'bridges',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:bridgeDamageSupplement'
            }]
        }
Ejemplo n.º 5
0
class BuildingDamage(BaseAnalysis):
    """Building Damage Analysis calculates the probability of building damage based on
    different hazard type such as earthquake, tsunami, and tornado.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BuildingDamage, self).__init__(incore_client)

    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        bldg_set = self.get_input_dataset("buildings").get_inventory_reader()

        # building retrofit strategy
        retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy")
        if retrofit_strategy_dataset is not None:
            retrofit_strategy = list(retrofit_strategy_dataset.get_csv_reader())
        else:
            retrofit_strategy = None

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BuildingUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bldg_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bldg_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input,
                                                                              num_workers,
                                                                              inventory_args,
                                                                              repeat(retrofit_strategy),
                                                                              repeat(hazard_type),
                                                                              repeat(hazard_dataset_id))

        self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("damage_result",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True

    def building_damage_concurrent_future(self, function_name, parallelism, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            parallelism (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_type, hazard_dataset_id):
        """Run analysis for multiple buildings.

        Args:
            buildings (list): Multiple buildings from input inventory set.
            retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional
            hazard_type (str): Hazard type, either earthquake, tornado, or tsunami.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """

        fragility_key = self.get_parameter("fragility_key")
        fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings,
                                                           fragility_key, retrofit_strategy)
        values_payload = []
        unmapped_buildings = []
        mapped_buildings = []
        for b in buildings:
            bldg_id = b["id"]
            if bldg_id in fragility_sets:
                location = GeoUtil.get_location(b)
                loc = str(location.y) + "," + str(location.x)
                demands = AnalysisUtil.get_hazard_demand_types(b, fragility_sets[bldg_id], hazard_type)
                units = fragility_sets[bldg_id].demand_units
                value = {
                    "demands": demands,
                    "units": units,
                    "loc": loc
                }
                values_payload.append(value)
                mapped_buildings.append(b)
            else:
                unmapped_buildings.append(b)

        # not needed anymore as they are already split into mapped and unmapped
        del buildings

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload,
                                                                    self.get_parameter('seed'))
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'flood':
            hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload)
        else:
            raise ValueError("The provided hazard type is not supported yet by this analysis")

        ds_results = []
        damage_results = []

        i = 0
        for b in mapped_buildings:
            ds_result = dict()
            damage_result = dict()
            dmg_probability = dict()
            dmg_interval = dict()
            b_id = b["id"]
            selected_fragility_set = fragility_sets[b_id]

            # TODO: Once all fragilities are migrated to new format, we can remove this condition
            if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve):
                # Supports multiple demand types in same fragility
                b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"])
                b_demands = hazard_vals[i]["demands"]
                b_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0

                # To calculate damage, use demand type name from fragility that will be used in the expression, instead
                # of using what the hazard service returns. There could be a difference "SA" in DFR3 vs "1.07 SA"
                # from hazard
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = b_haz_vals[j]
                    j += 1
                if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]):
                    building_args = selected_fragility_set.construct_expression_args_from_inventory(b)

                    building_period = selected_fragility_set.fragility_curves[0].get_building_period(
                        selected_fragility_set.curve_parameters, **building_args)

                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hval_dict, **building_args, period=building_period)
                    dmg_interval = selected_fragility_set.calculate_damage_interval(
                        dmg_probability, hazard_type=hazard_type, inventory_type="building")
            else:
                raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are "
                                 "seeing this please report the issue.")

            ds_result['guid'] = b['properties']['guid']
            damage_result['guid'] = b['properties']['guid']

            ds_result.update(dmg_probability)
            ds_result.update(dmg_interval)
            ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(b_haz_vals, hazard_type)

            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result['demandtype'] = b_demands
            damage_result['demandunits'] = b_units
            damage_result['hazardval'] = b_haz_vals

            ds_results.append(ds_result)
            damage_results.append(damage_result)
            i += 1

        for b in unmapped_buildings:
            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = b['properties']['guid']
            damage_result['guid'] = b['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtype'] = None
            damage_result['demandunits'] = None
            damage_result['hazardval'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the building damage analysis.

        Returns:
            obj: A JSON object of specifications of the building damage analysis.

        """
        return {
            'name': 'building-damage',
            'description': 'building damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
                {
                    'id': 'seed',
                    'required': False,
                    'description': 'Initial seed for the tornado hazard value',
                    'type': int
                }
            ],
            'input_datasets': [
                {
                    'id': 'buildings',
                    'required': True,
                    'description': 'Building Inventory',
                    'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5',
                             'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                },
                {
                    'id': 'retrofit_strategy',
                    'required': False,
                    'description': 'Building retrofit strategy that contains guid and retrofit method',
                    'type': ['incore:retrofitStrategy']
                }
            ],
            'output_datasets': [
                {
                    'id': 'ds_result',
                    'parent_type': 'buildings',
                    'description': 'CSV file of damage states for building structural damage',
                    'type': 'ergo:buildingDamageVer6'
                },
                {
                    'id': 'damage_result',
                    'parent_type': 'buildings',
                    'description': 'Json file with information about applied hazard value and fragility',
                    'type': 'incore:buildingDamageSupplement'
                }
            ]
        }
Ejemplo n.º 6
0
class EpfDamage(BaseAnalysis):
    """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"
    DEFAULT_FRAGILITY_KEY = "pga"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(EpfDamage, self).__init__(incore_client)

    def run(self):
        """Executes electric power facility damage analysis."""
        epf_set = self.get_input_dataset("epfs").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type, note this is here for future use if additional hazards are supported by this analysis
        hazard_type = self.get_parameter("hazard_type")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        if use_hazard_uncertainty:
            raise ValueError("Uncertainty is not implemented yet.")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(epf_set), user_defined_cpu)

        avg_bulk_input_size = int(len(epf_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(epf_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.epf_damage_concurrent_future(
            self.epf_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def epf_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def epf_damage_analysis_bulk_input(self, epfs, hazard_type,
                                       hazard_dataset_id):
        """Run analysis for multiple epfs.

        Args:
            epfs (list): Multiple epfs from input inventory set.
            hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        use_liquefaction = False
        liquefaction_available = False

        fragility_key = self.get_parameter("fragility_key")

        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key)

        if hazard_type == "earthquake":
            liquefaction_fragility_key = self.get_parameter(
                "liquefaction_fragility_key")
            if self.get_parameter("use_liquefaction") is True:
                if liquefaction_fragility_key is None:
                    liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY

                use_liquefaction = self.get_parameter("use_liquefaction")

                # Obtain the geology dataset
                geology_dataset_id = self.get_parameter(
                    "liquefaction_geology_dataset_id")

                if geology_dataset_id is not None:
                    fragility_sets_liq = self.fragilitysvc.match_inventory(
                        self.get_input_dataset("dfr3_mapping_set"), epfs,
                        liquefaction_fragility_key)

                    if fragility_sets_liq is not None:
                        liquefaction_available = True

        values_payload = []
        values_payload_liq = []
        unmapped_epfs = []
        mapped_epfs = []
        for epf in epfs:
            epf_id = epf["id"]
            if epf_id in fragility_set:
                location = GeoUtil.get_location(epf)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set[epf_id].demand_types
                units = fragility_set[epf_id].demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_epfs.append(epf)

                if liquefaction_available and epf["id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[epf["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_epfs.append(epf)

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            # TODO: implement hurricane
            raise ValueError('Hurricane hazard has not yet been implemented!')
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError("Missing hazard type.")

        liquefaction_resp = None
        if liquefaction_available:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        ds_results = []
        damage_results = []

        i = 0
        for epf in mapped_epfs:
            ds_result = dict()
            damage_result = dict()
            selected_fragility_set = fragility_set[epf["id"]]

            if isinstance(selected_fragility_set.fragility_curves[0],
                          DFR3Curve):
                hazard_val = AnalysisUtil.update_precision_of_lists(
                    hazard_vals[i]["hazardValues"])
                input_demand_types = hazard_vals[i]["demands"]
                input_demand_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = hazard_val[j]
                    j += 1

                epf_args = selected_fragility_set.construct_expression_args_from_inventory(
                    epf)
                limit_states = selected_fragility_set.calculate_limit_state(
                    hval_dict, inventory_type='electric_facility', **epf_args)

                if liquefaction_resp is not None:
                    fragility_set_liq = fragility_sets_liq[epf["id"]]

                    if isinstance(fragility_set_liq.fragility_curves[0],
                                  DFR3Curve):
                        liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                            liquefaction_resp[i]["pgdValues"])
                        liq_demand_types = liquefaction_resp[i]["demands"]
                        liq_demand_units = liquefaction_resp[i]["units"]
                        liquefaction_prob = liquefaction_resp[i][
                            'liqProbability']

                        hval_dict_liq = dict()

                        for j, d in enumerate(fragility_set_liq.demand_types):
                            hval_dict_liq[d] = liq_hazard_vals[j]

                        facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(
                            epf)
                        pgd_limit_states = \
                            fragility_set_liq.calculate_limit_state(
                                hval_dict_liq, inventory_type="electric_facility",
                                **facility_liq_args)
                    else:
                        raise ValueError(
                            "One of the fragilities is in deprecated format. "
                            "This should not happen If you are seeing this please report the issue."
                        )

                    limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                        limit_states, pgd_limit_states)

                dmg_interval = selected_fragility_set.calculate_damage_interval(
                    limit_states,
                    hazard_type=hazard_type,
                    inventory_type='electric_facility')
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            ds_result["guid"] = epf["properties"]["guid"]
            ds_result.update(limit_states)
            ds_result.update(dmg_interval)
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_val, hazard_type)

            damage_result['guid'] = epf['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result["demandtypes"] = input_demand_types
            damage_result["demandunits"] = input_demand_units
            damage_result["hazardtype"] = hazard_type
            damage_result["hazardvals"] = hazard_val

            if hazard_type == "earthquake" and use_liquefaction is True:
                if liquefaction_available:
                    damage_result['liq_fragility_id'] = fragility_sets_liq[
                        epf["id"]].id
                    damage_result['liqdemandtypes'] = liq_demand_types
                    damage_result['liqdemandunits'] = liq_demand_units
                    damage_result['liqhazval'] = liq_hazard_vals
                    damage_result['liqprobability'] = liquefaction_prob
                else:
                    damage_result['liq_fragility_id'] = None
                    damage_result['liqdemandtypes'] = None
                    damage_result['liqdemandunits'] = None
                    damage_result['liqhazval'] = None
                    damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

            i += 1

        #############################################################

        # unmapped
        for epf in unmapped_epfs:
            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = epf['properties']['guid']
            damage_result['guid'] = epf['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result["demandtypes"] = None
            damage_result['demandunits'] = None
            damage_result["hazardtype"] = None
            damage_result['hazardval'] = None
            if hazard_type == "earthquake" and use_liquefaction is True:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the epf damage analysis.

        Returns:
            obj: A JSON object of specifications of the epf damage analysis.

        """
        return {
            'name':
            'epf-damage',
            'description':
            'Electric Power Facility damage analysis.',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'A name of the resulting dataset',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard type (e.g. earthquake).',
                    'type': str
                },
                {
                    'id':
                    'hazard_id',
                    'required':
                    True,
                    'description':
                    'Hazard ID which defines the particular hazard (e.g. New madrid earthquake '
                    'using Atkinson Boore 1995).',
                    'type':
                    str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in mapping dataset ()',
                    'type': str
                },
                {
                    'id': 'liquefaction_fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in liquefaction mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description':
                    'Use a ground liquifacition to modify damage interval.',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request.',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'epfs',
                'required': True,
                'description': 'Electric Power Facility Inventory',
                'type': ['incore:epf', 'ergo:epf'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'epfs',
                'type': 'incore:epfDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'epfs',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:epfDamageSupplement'
            }]
        }
Ejemplo n.º 7
0
class PipelineDamage(BaseAnalysis):
    """Computes pipeline damage for an earthquake or a tsunami).

    Args:
        incore_client: Service client with authentication info.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(PipelineDamage, self).__init__(incore_client)

    def run(self):
        """Execute pipeline damage analysis """

        pipeline_dataset = self.get_input_dataset(
            "pipeline").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(pipeline_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)
        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(pipeline_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (results, damage_results) = self.pipeline_damage_concurrent_future(
            self.pipeline_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")
        return True

    def pipeline_damage_concurrent_future(self, function_name, num_workers,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            dict: An ordered dictionaries with pipeline damage values.
            dict: An ordered dictionaries with other pipeline data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type,
                                            hazard_dataset_id):
        """Run pipeline damage analysis for multiple pipelines.

        Args:
            pipelines (list): Multiple pipelines from pipeline dataset.
            hazard_type (str): Hazard type (earthquake or tsunami).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            dict: An ordered dictionaries with pipeline damage values.
            dict: An ordered dictionaries with other pipeline data/metadata.

        """

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" if hazard_type == 'tsunami' else "pgv"
            self.set_parameter("fragility_key", fragility_key)

        # get fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), pipelines,
            fragility_key)

        values_payload = []
        unmapped_pipelines = []
        mapped_pipelines = []
        for pipeline in pipelines:
            # if find a match fragility for that pipeline
            if pipeline["id"] in fragility_sets.keys():
                fragility_set = fragility_sets[pipeline["id"]]
                location = GeoUtil.get_location(pipeline)
                loc = str(location.y) + "," + str(location.x)
                demands = AnalysisUtil.get_hazard_demand_types(
                    pipeline, fragility_set, hazard_type)
                units = fragility_sets[pipeline["id"]].demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_pipelines.append(pipeline)

            else:
                unmapped_pipelines.append(pipeline)

        # not needed anymore as they are already split into mapped and unmapped
        del pipelines

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )
        elif hazard_type == 'flood':
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        pipeline_results = []
        damage_results = []
        for i, pipeline in enumerate(mapped_pipelines):
            limit_states = dict()
            dmg_intervals = dict()
            pipeline_result = dict()
            fragility_set = fragility_sets[pipeline["id"]]

            # TODO: Once all fragilities are migrated to new format, we can remove this condition
            if isinstance(fragility_set.fragility_curves[0], DFR3Curve):
                # Supports multiple demand types in same fragility
                haz_vals = AnalysisUtil.update_precision_of_lists(
                    hazard_vals[i]["hazardValues"])
                demand_types = hazard_vals[i]["demands"]
                demand_units = hazard_vals[i]["units"]

                # construct hazard_value dictionary {"demand_type":"hazard_value", ...}
                hval_dict = dict()
                for j, d in enumerate(fragility_set.demand_types):
                    hval_dict[d] = haz_vals[j]

                if not AnalysisUtil.do_hazard_values_have_errors(
                        hazard_vals[i]["hazardValues"]):
                    pipeline_args = fragility_set.construct_expression_args_from_inventory(
                        pipeline)
                    limit_states = fragility_set.calculate_limit_state(
                        hval_dict, inventory_type="pipeline", **pipeline_args)
                    dmg_intervals = fragility_set.calculate_damage_interval(
                        limit_states,
                        hazard_type=hazard_type,
                        inventory_type="pipeline")

            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            pipeline_result['guid'] = pipeline['properties']['guid']
            pipeline_result.update(limit_states)
            pipeline_result.update(dmg_intervals)
            pipeline_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    haz_vals, hazard_type)
            damage_result = dict()
            damage_result['guid'] = pipeline['properties']['guid']
            damage_result['fragility_id'] = fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardval'] = haz_vals

            pipeline_results.append(pipeline_result)
            damage_results.append(damage_result)

        # for pipeline does not have matching fragility curves, default to None
        for pipeline in unmapped_pipelines:
            pipeline_result = dict()
            damage_result = dict()
            pipeline_result['guid'] = pipeline['properties']['guid']
            damage_result['guid'] = pipeline['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardvals'] = None

            pipeline_results.append(pipeline_result)
            damage_results.append(damage_result)

        return pipeline_results, damage_results

    def get_spec(self):
        """Get specifications of the pipeline damage analysis.

        Returns:
            obj: A JSON object of specifications of the pipeline damage analysis.

        """
        return {
            'name':
            'pipeline-damage',
            'description':
            'Buried pipeline damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'Result dataset name',
                'type': str
            }, {
                'id': 'hazard_type',
                'required': True,
                'description': 'Hazard Type',
                'type': str
            }, {
                'id': 'hazard_id',
                'required': True,
                'description': 'Hazard ID',
                'type': str
            }, {
                'id': 'fragility_key',
                'required': False,
                'description': 'Fragility key to use in mapping dataset',
                'type': str
            }, {
                'id': 'num_cpu',
                'required': False,
                'description':
                'If using parallel execution, the number of cpus to request',
                'type': int
            }, {
                'id': 'liquefaction_geology_dataset_id',
                'required': False,
                'description': 'Geology dataset id',
                'type': str,
            }],
            'input_datasets': [{
                'id':
                'pipeline',
                'required':
                True,
                'description':
                'Pipeline Inventory',
                'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'pipeline',
                'description': 'CSV file of damage states for pipeline damage',
                'type': 'incore:pipelineDamageVer3'
            }, {
                'id': 'metadata',
                'parent_type': 'pipeline',
                'description':
                'Json file with information about applied hazard value and fragility',
                'type': 'incore:pipelineDamageSupplement'
            }]
        }