Exemplo n.º 1
0
class BridgeDamage(BaseAnalysis):
    """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BridgeDamage, self).__init__(incore_client)

    def run(self):
        """Executes bridge damage analysis."""
        # Bridge dataset
        bridge_set = self.get_input_dataset("bridges").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(
            bridge_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bridge_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bridge_set)
        while count < len(inventory_list):
            inventory_args.append(
                inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.bridge_damage_concurrent_future(
            self.bridge_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type),
            repeat(hazard_dataset_id))

        self.set_result_csv_data("result", results,
                                 name=self.get_parameter("result_name"))

        return True

    def bridge_damage_concurrent_future(self, function_name, num_workers,
                                        *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def bridge_damage_analysis_bulk_input(self, bridges, hazard_type,
                                          hazard_dataset_id):
        """Run analysis for multiple bridges.

        Args:
            bridges (list): Multiple bridges from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BridgeUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        fragility_set = dict()
        fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges,
                                                          fragility_key)

        bridge_results = []
        list_bridges = bridges

        # Converting list of bridges into a dictionary for ease of reference
        bridges = dict()
        for br in list_bridges:
            bridges[br["id"]] = br
        list_bridges = None  # Clear as it's not needed anymore

        processed_bridges = []
        grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set)

        for demand, grouped_brs in grouped_bridges.items():

            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50))  # TODO: Move to globals?
            for brs in br_chunks:
                points = []
                for br_id in brs:
                    location = GeoUtil.get_location(bridges[br_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == "earthquake":
                    hazard_vals = \
                        self.hazardsvc.get_earthquake_hazard_values(
                            hazard_dataset_id,
                            input_demand_type,
                            input_demand_units,
                            points)
                elif hazard_type == "tsunami":
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type, input_demand_units, points)
                elif hazard_type == "tornado":
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == "hurricane":
                    hazard_vals = self.hazardsvc.get_hurricanewf_values(
                        hazard_dataset_id, input_demand_type, input_demand_units, points)
                else:
                    raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for br_id in brs:
                    bridge_result = collections.OrderedDict()
                    bridge = bridges[br_id]
                    selected_fragility_set = fragility_set[br_id]

                    hazard_val = hazard_vals[i]['hazardValue']

                    hazard_std_dev = 0.0
                    if use_hazard_uncertainty:
                        # TODO Get this from API once implemented
                        raise ValueError("Uncertainty Not Implemented!")

                    adjusted_fragility_set = copy.deepcopy(selected_fragility_set)
                    if use_liquefaction and 'liq' in bridge['properties']:
                        for fragility in adjusted_fragility_set.fragility_curves:
                            fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq'])

                    dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev)
                    retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key)
                    retrofit_type = BridgeUtil.get_retrofit_type(fragility_key)

                    dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability)

                    bridge_result['guid'] = bridge['properties']['guid']
                    bridge_result.update(dmg_probability)
                    bridge_result.update(dmg_intervals)
                    bridge_result["retrofit"] = retrofit_type
                    bridge_result["retrocost"] = retrofit_cost
                    bridge_result["demandtype"] = input_demand_type
                    bridge_result["demandunits"] = input_demand_units
                    bridge_result["hazardtype"] = hazard_type
                    bridge_result["hazardval"] = hazard_val

                    # add spans to bridge output so mean damage calculation can use that info
                    if "spans" in bridge["properties"] and bridge["properties"]["spans"] \
                            is not None and bridge["properties"]["spans"].isdigit():
                        bridge_result['spans'] = int(bridge["properties"]["spans"])
                    elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \
                            is not None and bridge["properties"]["SPANS"].isdigit():
                        bridge_result['spans'] = int(bridge["properties"]["SPANS"])
                    else:
                        bridge_result['spans'] = 1

                    bridge_results.append(bridge_result)
                    processed_bridges.append(br_id)  # remove processed bridges
                    i = i + 1

        unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0,
                                    "ls-extensi": 0.0, "ls-complet": 0.0}
        unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability)
        for br_id, br in bridges.items():
            if br_id not in processed_bridges:
                unmapped_br_result = collections.OrderedDict()
                unmapped_br_result['guid'] = br['properties']['guid']
                unmapped_br_result.update(unmapped_dmg_probability)
                unmapped_br_result.update(unmapped_dmg_intervals)
                unmapped_br_result["retrofit"] = "Non-Retrofit"
                unmapped_br_result["retrocost"] = 0.0
                unmapped_br_result["demandtype"] = "None"
                unmapped_br_result['demandunits'] = "None"
                unmapped_br_result["hazardtype"] = "None"
                unmapped_br_result['hazardval'] = 0.0
                bridge_results.append(unmapped_br_result)

        return bridge_results

    def get_spec(self):
        """Get specifications of the bridge damage analysis.

        Returns:
            obj: A JSON object of specifications of the bridge damage analysis.

        """
        return {
            'name': 'bridge-damage',
            'description': 'bridge damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'bridges',
                    'required': True,
                    'description': 'Bridge Inventory',
                    'type': ['ergo:bridges'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'bridges',
                    'description': 'CSV file of bridge structural damage',
                    'type': 'ergo:bridgeDamage'
                }
            ]
        }
Exemplo n.º 2
0
class EpfDamage(BaseAnalysis):
    """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"
    DEFAULT_FRAGILITY_KEY = "pga"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(EpfDamage, self).__init__(incore_client)

    def run(self):
        """Executes electric power facility damage analysis."""
        epf_set = self.get_input_dataset("epfs").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type, note this is here for future use if additional hazards are supported by this analysis
        hazard_type = self.get_parameter("hazard_type")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")
        liq_geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(epf_set), user_defined_cpu)

        avg_bulk_input_size = int(len(epf_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(epf_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.epf_damage_concurrent_future(
            self.epf_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id),
            repeat(use_hazard_uncertainty), repeat(use_liquefaction),
            repeat(liq_geology_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def epf_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def epf_damage_analysis_bulk_input(self, epfs, hazard_type,
                                       hazard_dataset_id,
                                       use_hazard_uncertainty,
                                       use_liquefaction,
                                       liq_geology_dataset_id):
        """Run analysis for multiple epfs.

        Args:
            epfs (list): Multiple epfs from input inventory set.
            hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.
            use_hazard_uncertainty (bool):  Hazard uncertainty. True for using uncertainty when computing damage,
                False otherwise.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.
            liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """
        result = []

        fragility_key = self.get_parameter("fragility_key")

        fragility_set = dict()
        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key)
        epf_results = []

        # Converting list of epfs into a dictionary for ease of reference
        list_epfs = epfs
        epfs = dict()
        for epf in list_epfs:
            epfs[epf["id"]] = epf
        del list_epfs  # Clear as it's not needed anymore

        processed_epf = []
        grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set)
        for demand, grouped_epf_items in grouped_epfs.items():
            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50))
            for epf_chunk in epf_chunks:
                points = []
                for epf_id in epf_chunk:
                    location = GeoUtil.get_location(epfs[epf_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == 'earthquake':
                    hazard_vals = self.hazardsvc.get_earthquake_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'tornado':
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == 'hurricane':
                    # TODO: implement hurricane
                    raise ValueError(
                        'Hurricane hazard has not yet been implemented!')

                elif hazard_type == 'tsunami':
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                else:
                    raise ValueError("Missing hazard type.")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for epf_id in epf_chunk:
                    epf_result = collections.OrderedDict()
                    epf = epfs[epf_id]
                    hazard_val = hazard_vals[i]['hazardValue']

                    # Sometimes the geotiffs give large negative values for out of bounds instead of 0
                    if hazard_val <= 0.0:
                        hazard_val = 0.0

                    std_dev = 0.0
                    if use_hazard_uncertainty:
                        raise ValueError("Uncertainty Not Implemented!")

                    selected_fragility_set = fragility_set[epf_id]
                    limit_states = selected_fragility_set.calculate_limit_state(
                        hazard_val, std_dev=std_dev)
                    dmg_interval = AnalysisUtil.calculate_damage_interval(
                        limit_states)

                    epf_result['guid'] = epf['properties']['guid']
                    epf_result.update(limit_states)
                    epf_result.update(dmg_interval)
                    epf_result['demandtype'] = input_demand_type
                    epf_result['demandunits'] = input_demand_units
                    epf_result['hazardtype'] = hazard_type
                    epf_result['hazardval'] = hazard_val

                    epf_results.append(epf_result)
                    processed_epf.append(epf_id)
                    i = i + 1

        # when there is liquefaction, limit state need to be modified
        if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None:
            liq_fragility_key = self.get_parameter(
                "liquefaction_fragility_key")
            if liq_fragility_key is None:
                liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY
            liq_fragility_set = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), epfs,
                liq_fragility_key)
            grouped_liq_epfs = AnalysisUtil.group_by_demand_type(
                epfs, liq_fragility_set)

            for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items():
                liq_input_demand_type = liq_demand[0]
                liq_input_demand_units = liq_demand[1]

                # For every group of unique demand and demand unit, call the end-point once
                liq_epf_chunks = list(
                    AnalysisUtil.chunks(grouped_liq_epf_items, 50))
                for liq_epf_chunk in liq_epf_chunks:
                    points = []
                    for liq_epf_id in liq_epf_chunk:
                        location = GeoUtil.get_location(epfs[liq_epf_id])
                        points.append(str(location.y) + "," + str(location.x))
                    liquefaction_vals = self.hazardsvc.get_liquefaction_values(
                        hazard_dataset_id, liq_geology_dataset_id,
                        liq_input_demand_units, points)

                    # Parse the batch hazard value results and map them back to the building and fragility.
                    # This is a potential pitfall as we are relying on the order of the returned results
                    i = 0
                    for liq_epf_id in liq_epf_chunk:
                        liq_hazard_val = liquefaction_vals[i][
                            liq_input_demand_type]

                        std_dev = 0.0
                        if use_hazard_uncertainty:
                            raise ValueError("Uncertainty Not Implemented!")

                        liquefaction_prob = liquefaction_vals[i][
                            'liqProbability']

                        selected_liq_fragility = liq_fragility_set[liq_epf_id]
                        pgd_limit_states = selected_liq_fragility.calculate_limit_state(
                            liq_hazard_val, std_dev=std_dev)

                        # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit
                        # states and dmg_interval
                        for epf_result in epf_results:
                            if epf_result['guid'] == epfs[liq_epf_id]['guid']:
                                limit_states = {
                                    "ls-slight": epf_result['ls-slight'],
                                    "ls-moderat": epf_result['ls-moderat'],
                                    "ls-extensi": epf_result['ls-extensi'],
                                    "ls-complet": epf_result['ls-complet']
                                }
                                liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                                    limit_states, pgd_limit_states)
                                liq_dmg_interval = AnalysisUtil.calculate_damage_interval(
                                    liq_limit_states)
                                epf_result.update(liq_limit_states)
                                epf_result.update(liq_dmg_interval)
                                epf_result[
                                    'liqhaztype'] = liq_input_demand_type
                                epf_result['liqhazval'] = liq_hazard_val
                                epf_result[
                                    'liqprobability'] = liquefaction_prob
                        i = i + 1

        unmapped_limit_states = {
            "ls-slight": 0.0,
            "ls-moderat": 0.0,
            "ls-extensi": 0.0,
            "ls-complet": 0.0
        }
        unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(
            unmapped_limit_states)
        for epf_id, epf in epfs.items():
            if epf_id not in processed_epf:
                unmapped_epf_result = collections.OrderedDict()
                unmapped_epf_result['guid'] = epf['properties']['guid']
                unmapped_epf_result.update(unmapped_limit_states)
                unmapped_epf_result.update(unmapped_dmg_intervals)
                unmapped_epf_result["demandtype"] = "None"
                unmapped_epf_result['demandunits'] = "None"
                unmapped_epf_result["hazardtype"] = "None"
                unmapped_epf_result['hazardval'] = 0.0
                unmapped_epf_result['liqhaztype'] = "NA"
                unmapped_epf_result['liqhazval'] = "NA"
                unmapped_epf_result['liqprobability'] = "NA"
                epf_results.append(unmapped_epf_result)

        return epf_results

    def get_spec(self):
        """Get specifications of the epf damage analysis.

        Returns:
            obj: A JSON object of specifications of the epf damage analysis.

        """
        return {
            'name':
            'epf-damage',
            'description':
            'Electric Power Facility damage analysis.',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'A name of the resulting dataset',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard type (e.g. earthquake).',
                    'type': str
                },
                {
                    'id':
                    'hazard_id',
                    'required':
                    True,
                    'description':
                    'Hazard ID which defines the particular hazard (e.g. New madrid earthquake '
                    'using Atkinson Boore 1995).',
                    'type':
                    str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in mapping dataset ()',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description':
                    'Use a ground liquifacition to modify damage interval.',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request.',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'epfs',
                'required': True,
                'description': 'Electric Power Facility Inventory',
                'type': ['incore:epf', 'ergo:epf'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'epfs',
                'type': 'incore:epfDamage'
            }]
        }
Exemplo n.º 3
0
class BuildingDamage(BaseAnalysis):
    """Building Damage Analysis calculates the probability of building damage based on
    different hazard type such as earthquake, tsunami, and tornado.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BuildingDamage, self).__init__(incore_client)

    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        bldg_set = self.get_input_dataset("buildings").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BuildingUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(bldg_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bldg_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bldg_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.building_damage_concurrent_future(
            self.building_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def building_damage_concurrent_future(self, function_name, parallelism,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            parallelism (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallelism) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def building_damage_analysis_bulk_input(self, buildings, hazard_type,
                                            hazard_dataset_id):
        """Run analysis for multiple buildings.

        Args:
            buildings (list): Multiple buildings from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, or tsunami.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        fragility_key = self.get_parameter("fragility_key")

        fragility_sets = dict()
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), buildings,
            fragility_key)

        bldg_results = []
        list_buildings = buildings

        buildings = dict()
        # Converting list of buildings into a dictionary for ease of reference
        for b in list_buildings:
            buildings[b["id"]] = b

        list_buildings = None  # Clear as it's not needed anymore

        grouped_buildings = AnalysisUtil.group_by_demand_type(buildings,
                                                              fragility_sets,
                                                              hazard_type,
                                                              is_building=True)

        for demand, grouped_bldgs in grouped_buildings.items():

            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            bldg_chunks = list(AnalysisUtil.chunks(
                grouped_bldgs, 50))  # TODO: Move to globals?
            for bldgs in bldg_chunks:
                points = []
                for bldg_id in bldgs:
                    location = GeoUtil.get_location(buildings[bldg_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == 'earthquake':
                    hazard_vals = self.hazardsvc.get_earthquake_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'tornado':
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == 'tsunami':
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'hurricane':
                    # TODO implement hurricane
                    print("hurricane not yet implemented")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for bldg_id in bldgs:
                    bldg_result = collections.OrderedDict()
                    building = buildings[bldg_id]
                    hazard_val = hazard_vals[i]['hazardValue']
                    output_demand_type = hazard_vals[i]['demand']
                    if hazard_type == 'earthquake':
                        period = float(hazard_vals[i]['period'])
                        if period > 0:
                            output_demand_type = str(
                                hazard_vals[i]
                                ['period']) + " " + output_demand_type

                    num_stories = building['properties']['no_stories']
                    selected_fragility_set = fragility_sets[bldg_id]
                    building_period = selected_fragility_set.fragility_curves[
                        0].get_building_period(num_stories)
                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hazard_val, building_period)
                    dmg_interval = AnalysisUtil.calculate_damage_interval(
                        dmg_probability)

                    bldg_result['guid'] = building['properties']['guid']
                    bldg_result.update(dmg_probability)
                    bldg_result.update(dmg_interval)
                    bldg_result['demandtype'] = output_demand_type
                    bldg_result['demandunits'] = input_demand_units
                    bldg_result['hazardval'] = hazard_val

                    bldg_results.append(bldg_result)
                    del buildings[bldg_id]
                    i = i + 1

        unmapped_hazard_val = 0.0
        unmapped_output_demand_type = "None"
        unmapped_output_demand_unit = "None"
        for unmapped_bldg_id, unmapped_bldg in buildings.items():
            unmapped_bldg_result = collections.OrderedDict()
            unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid']
            unmapped_bldg_result['demandtype'] = unmapped_output_demand_type
            unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit
            unmapped_bldg_result['hazardval'] = unmapped_hazard_val
            bldg_results.append(unmapped_bldg_result)

        return bldg_results

    def get_spec(self):
        """Get specifications of the building damage analysis.

        Returns:
            obj: A JSON object of specifications of the building damage analysis.

        """
        return {
            'name':
            'building-damage',
            'description':
            'building damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id':
                'buildings',
                'required':
                True,
                'description':
                'Building Inventory',
                'type': [
                    'ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5',
                    'ergo:buildingInventoryVer6'
                ],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'buildings',
                'description': 'CSV file of building structural damage',
                'type': 'ergo:buildingDamageVer4'
            }]
        }
Exemplo n.º 4
0
class WaterFacilityDamage(BaseAnalysis):
    """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure.

    """

    DEFAULT_EQ_FRAGILITY_KEY = "pga"
    DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code"
    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"

    def __init__(self, incore_client):
        # Create Hazard and Fragility service
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(WaterFacilityDamage, self).__init__(incore_client)

    def get_spec(self):
        return {
            'name': 'water-facility-damage',
            'description': 'water facility damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': False,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },

                {
                    'id': 'liquefaction_geology_dataset_id',
                    'required': False,
                    'description': 'Liquefaction geology/susceptibility dataset id. '
                                   'If not provided, liquefaction will be ignored',
                    'type': str
                },
                {
                    'id': 'liquefaction_fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in liquefaction mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'water_facilities',
                    'required': True,
                    'description': 'Water Facility Inventory',
                    'type': ['ergo:waterFacilityTopo'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'water_facilities',
                    'description': 'A csv file with limit state probabilities and damage states '
                                   'for each water facility',
                    'type': 'ergo:waterFacilityDamageVer4'
                }
            ]
        }

    def run(self):
        """Performs Water facility damage analysis by using the parameters from the spec
        and creates an output dataset in csv format

        Returns:
            bool: True if successful, False otherwise
        """
        # Facility dataset
        inventory_set = self.get_input_dataset(
            "water_facilities").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self,
                                                                 len(
                                                                     inventory_set),
                                                                 user_defined_cpu)

        avg_bulk_input_size = int(len(inventory_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(inventory_set)
        while count < len(inventory_list):
            inventory_args.append(
                inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.waterfacility_damage_concurrent_execution(
            self.waterfacilityset_damage_analysis, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result", results,
                                 name=self.get_parameter("result_name"))

        return True

    def waterfacility_damage_concurrent_execution(self, function_name,
                                                  parallel_processes,
                                                  *args):
        """Utilizes concurrent.future module.

            Args:
                function_name (function): The function to be parallelized.
                parallel_processes (int): Number of workers in parallelization.
                *args: All the arguments in order to pass into parameter function_name.

            Returns:
                list: A list of ordered dictionaries with damage results and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallel_processes) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def waterfacilityset_damage_analysis(self, facilities, hazard_type,
                                         hazard_dataset_id):
        """Gets applicable fragilities and calculates damage

        Args:
            facilities (list): Multiple water facilities from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
             list: A list of ordered dictionaries with water facility damage values and metadata.
        """
        result = []
        liq_fragility = None
        use_liquefaction = self.get_parameter("use_liquefaction")
        liq_geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")
        uncertainty = self.get_parameter("use_hazard_uncertainty")
        fragility_key = self.get_parameter("fragility_key")

        if hazard_type == 'earthquake':
            if fragility_key is None:
                fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY

            pga_fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"),
                                                                  facilities, fragility_key)

            liq_fragility_set = []
            if use_liquefaction and liq_geology_dataset_id is not None:
                liq_fragility_key = self.get_parameter(
                    "liquefaction_fragility_key")
                if liq_fragility_key is None:
                    liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY
                liq_fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset(
                    "dfr3_mapping_set"), facilities, liq_fragility_key)

            for facility in facilities:
                fragility = pga_fragility_set[facility["id"]]
                if facility["id"] in liq_fragility_set:
                    liq_fragility = liq_fragility_set[facility["id"]]

                result.append(
                    self.waterfacility_damage_analysis(facility, fragility,
                                                       liq_fragility,
                                                       hazard_type,
                                                       hazard_dataset_id,
                                                       liq_geology_dataset_id,
                                                       uncertainty))

        elif hazard_type == 'tsunami':
            if fragility_key is None:
                fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY

            inundation_fragility_set = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key)

            for facility in facilities:
                fragility = inundation_fragility_set[facility["id"]]
                result.append(
                    self.waterfacility_damage_analysis(facility, fragility, [],
                                                       hazard_type,
                                                       hazard_dataset_id, "",
                                                       False))
        else:
            raise ValueError(
                "Hazard type other than Earthquake and Tsunami are not currently supported.")

        return result

    def waterfacility_damage_analysis(self, facility, fragility, liq_fragility,
                                      hazard_type, hazard_dataset_id,
                                      liq_geology_dataset_id, uncertainty):
        """Computes damage analysis for a single facility

        Args:
            facility (obj): A JSON mapping of a facility based on mapping attributes
            fragility (obj): A JSON description of fragility mapped to the building.
            liq_fragility (obj): A JSON description of liquefaction fragility mapped to the building.
            hazard_type (str): A string that indicates the hazard type
            hazard_dataset_id (str): Hazard id from the hazard service
            liq_geology_dataset_id (str): Geology dataset id from data service to use for liquefaction calculation, if
                applicable
            uncertainty (bool): Whether to use hazard standard deviation values for uncertainty

        Returns:
            OrderedDict: A dictionary with water facility damage values and other data/metadata.
        """
        std_dev = 0
        if uncertainty:
            std_dev = random.random()

        hazard_demand_type = fragility.demand_type
        demand_units = fragility.demand_units
        liq_hazard_type = ""
        liq_hazard_val = 0.0
        liquefaction_prob = 0.0
        location = GeoUtil.get_location(facility)

        point = str(location.y) + "," + str(location.x)

        if hazard_type == "earthquake":
            hazard_val_set = self.hazardsvc.get_earthquake_hazard_values(
                hazard_dataset_id, hazard_demand_type,
                demand_units, [point])
        elif hazard_type == "tsunami":
            hazard_val_set = self.hazardsvc.get_tsunami_hazard_values(
                hazard_dataset_id, hazard_demand_type, demand_units, [point])
        else:
            raise ValueError(
                "Hazard type other than Earthquake and Tsunami are not currently supported.")
        hazard_val = hazard_val_set[0]['hazardValue']
        if hazard_val < 0:
            hazard_val = 0

        limit_states = fragility.calculate_limit_state(hazard_val, std_dev)

        if liq_fragility is not None and liq_geology_dataset_id:
            liq_hazard_type = liq_fragility.demand_type
            pgd_demand_units = liq_fragility.demand_units
            point = str(location.y) + "," + str(location.x)

            liquefaction = self.hazardsvc.get_liquefaction_values(
                hazard_dataset_id, liq_geology_dataset_id,
                pgd_demand_units, [point])
            liq_hazard_val = liquefaction[0][liq_hazard_type]
            liquefaction_prob = liquefaction[0]['liqProbability']
            pgd_limit_states = liq_fragility.calculate_limit_state(liq_hazard_val, std_dev)

            limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                limit_states, pgd_limit_states)

        dmg_intervals = AnalysisUtil.calculate_damage_interval(limit_states)

        result = collections.OrderedDict()
        result = {**limit_states, **dmg_intervals}  # Needs py 3.5+
        metadata = collections.OrderedDict()
        metadata['guid'] = facility['properties']['guid']
        metadata['hazardtype'] = hazard_type
        metadata['demandtype'] = hazard_demand_type
        metadata['hazardval'] = hazard_val
        metadata['liqhaztype'] = liq_hazard_type
        metadata['liqhazval'] = liq_hazard_val
        metadata['liqprobability'] = liquefaction_prob

        result = {**metadata, **result}
        return result
Exemplo n.º 5
0
class NonStructBuildingDamage(BaseAnalysis):
    """Computes non-structural structural building damage for an earthquake hazard.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(NonStructBuildingDamage, self).__init__(incore_client)

    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        building_set = self.get_input_dataset("buildings").get_inventory_reader()

        # set Default Fragility key
        fragility_key_as = self.get_parameter("fragility_key_as")
        if fragility_key_as is None:
            self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS)

        fragility_key_ds = self.get_parameter("fragility_key_ds")
        if fragility_key_ds is None:
            self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS)

        # Set Default Hazard Uncertainty
        use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")
        if use_hazard_uncertainty is None:
            self.set_parameter("use_hazard_uncertainty", False)

        # Set Default Liquefaction
        use_liquefaction = self.get_parameter("use_liquefaction")
        if use_liquefaction is None:
            self.set_parameter("use_liquefaction", False)

        results = []
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(building_set), user_defined_cpu)

        avg_bulk_input_size = int(len(building_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(building_set)

        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input,
                                                         num_workers,
                                                         inventory_args)

        self.set_result_csv_data("result", results, name=self.get_parameter("result_name"))

        return True

    def building_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def building_damage_analysis_bulk_input(self, buildings):
        """Run analysis for multiple buildings.

        Args:
            buildings (list): Multiple buildings from input inventory set.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        result = []
        fragility_sets_as = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings,
                                                              self.get_parameter("fragility_key_as"))
        fragility_sets_ds = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings,
                                                              self.get_parameter("fragility_key_ds"))

        for building in buildings:
            fragility_set_as = None
            fragility_set_ds = None

            if building["id"] in fragility_sets_as \
                    and building["id"] in fragility_sets_ds:
                fragility_set_as = fragility_sets_as[building["id"]]
                fragility_set_ds = fragility_sets_ds[building["id"]]

            result.append(self.building_damage_analysis(building,
                                                        fragility_set_as,
                                                        fragility_set_ds))

        return result

    def building_damage_analysis(self, building, fragility_set_as, fragility_set_ds):
        """Calculates bridge damage results for a single building.

        Args:
            building (obj): A JSON-mapping of a geometric object from the inventory: current building.
            fragility_set_as (obj): A JSON description of acceleration-sensitive (AS) fragility
                assigned to the building.
            fragility_set_ds (obj): A JSON description of drift-sensitive (DS) fragility
                assigned to the building.

        Returns:
            OrderedDict: A dictionary with building damage values and other data/metadata.

        """
        building_results = collections.OrderedDict()
        dmg_probability_as = collections.OrderedDict()
        dmg_probability_ds = collections.OrderedDict()
        hazard_demand_type_as = None
        hazard_demand_type_ds = None
        hazard_val_as = 0.0
        hazard_val_ds = 0.0

        # read static parameters from object self
        hazard_dataset_id = self.get_parameter("hazard_id")
        liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id")
        use_liquefaction = self.get_parameter("use_liquefaction")
        use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")

        # Acceleration-Sensitive Fragility ID Code
        if fragility_set_as is not None:
            hazard_demand_type_as = AnalysisUtil.get_hazard_demand_type(building, fragility_set_as, 'earthquake')
            demand_units_as = fragility_set_as.demand_units
            location = GeoUtil.get_location(building)

            point = str(location.y) + "," + str(location.x)

            hazard_val_as = self.hazardsvc.get_earthquake_hazard_values(
                hazard_dataset_id, hazard_demand_type_as,
                demand_units_as,
                points=[point])[0]['hazardValue']

            dmg_probability_as = fragility_set_as.calculate_limit_state(hazard_val_as)
            # adjust dmg probability for liquefaction
            if use_liquefaction:
                if liq_geology_dataset_id is not None:
                    liqufaction_dmg = self.hazardsvc.get_liquefaction_values(
                        hazard_dataset_id, liq_geology_dataset_id,
                        'in',
                        points=[point])[0][
                        'groundFailureProb']
                else:
                    raise ValueError('Hazard does not support liquefaction! \
                                     Check to make sure you defined the liquefaction\
                                     portion of your scenario earthquake.')
                dmg_probability_as = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as,
                                                                                          liqufaction_dmg)

            # TODO this value needs to come from the hazard service
            # adjust dmg probability for hazard uncertainty
            if use_hazard_uncertainty:
                raise ValueError('Uncertainty has not yet been implemented!')
        else:
            dmg_probability_as['immocc'] = 0.0
            dmg_probability_as['lifesfty'] = 0.0
            dmg_probability_as['collprev'] = 0.0

        dmg_interval_as = AnalysisUtil.calculate_damage_interval(dmg_probability_as)

        # Drift-Sensitive Fragility ID Code
        if fragility_set_ds is not None:
            hazard_demand_type_ds = AnalysisUtil.get_hazard_demand_type(building, fragility_set_ds, 'earthquake')
            demand_units_ds = fragility_set_ds.demand_units
            location = GeoUtil.get_location(building)

            point = str(location.y) + "," + str(location.x)

            hazard_val_ds = self.hazardsvc.get_earthquake_hazard_values(
                hazard_dataset_id, hazard_demand_type_ds,
                demand_units_ds, points=[point])[0]['hazardValue']

            dmg_probability_ds = fragility_set_ds.calculate_limit_state(hazard_val_ds)

            # adjust hazard value for liquefaction
            if use_liquefaction:
                if liq_geology_dataset_id is not None:
                    liqufaction_dmg = self.hazardsvc.get_liquefaction_values(
                        hazard_dataset_id, liq_geology_dataset_id,
                        'in',
                        points=[point])[0][
                        'groundFailureProb']
                else:
                    raise ValueError('Hazard does not support liquefaction! \
                                                 Check to make sure you defined the liquefaction\
                                                 portion of your scenario earthquake.')
                dmg_probability_ds = NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds,
                                                                                          liqufaction_dmg)

            # TODO this value needs to come from the hazard service
            # adjust dmg probability for hazard uncertainty
            if use_hazard_uncertainty:
                raise ValueError('Uncertainty has not yet been implemented!')
        else:
            dmg_probability_ds['immocc'] = 0.0
            dmg_probability_ds['lifesfty'] = 0.0
            dmg_probability_ds['collprev'] = 0.0

        dmg_interval_ds = AnalysisUtil.calculate_damage_interval(dmg_probability_ds)

        # put results in dictionary
        building_results['guid'] = building['properties']['guid']
        building_results['immocc_as'] = dmg_probability_as['immocc']
        building_results['lifsfty_as'] = dmg_probability_as['lifesfty']
        building_results['collpre_as'] = dmg_probability_as['collprev']
        building_results['insig_as'] = dmg_interval_as['insignific']
        building_results['mod_as'] = dmg_interval_as['moderate']
        building_results['heavy_as'] = dmg_interval_as['heavy']
        building_results['comp_as'] = dmg_interval_as['complete']
        building_results['immocc_ds'] = dmg_probability_ds['immocc']
        building_results['lifsfty_ds'] = dmg_probability_ds['lifesfty']
        building_results['collpre_ds'] = dmg_probability_ds['collprev']
        building_results['insig_ds'] = dmg_interval_ds['insignific']
        building_results['mod_ds'] = dmg_interval_ds['moderate']
        building_results['heavy_ds'] = dmg_interval_ds['heavy']
        building_results['comp_ds'] = dmg_interval_ds['complete']
        building_results["hzrdtyp_as"] = hazard_demand_type_as
        building_results["hzrdval_as"] = hazard_val_as
        building_results["hzrdtyp_ds"] = hazard_demand_type_ds
        building_results["hzrdval_ds"] = hazard_val_ds

        return building_results

    def get_spec(self):
        """Get specifications of the building damage analysis.

        Returns:
            obj: A JSON object of specifications of the building damage analysis.

        """
        return {
            'name': 'building-damage',
            'description': 'building damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key_as',
                    'required': False,
                    'description': 'AS Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'fragility_key_ds',
                    'required': False,
                    'description': 'DS Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'liq_geology_dataset_id',
                    'required': False,
                    'description': 'liquefaction geology dataset id, \
                        if use liquefaction, you have to provide this id',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'buildings',
                    'required': True,
                    'description': 'building Inventory',
                    'type': ['ergo:buildingInventoryVer4'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'buildings',
                    'description': 'CSV file of building non-structural damage',
                    'type': 'ergo:nsBuildingInventoryDamage'
                }
            ]
        }
Exemplo n.º 6
0
class PipelineDamageRepairRate(BaseAnalysis):
    """Computes pipeline damage for a hazard.

    Args:
        incore_client: Service client with authentication info

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(PipelineDamageRepairRate, self).__init__(incore_client)

    def run(self):
        """Execute pipeline damage analysis """
        # Pipeline dataset
        pipeline_dataset = self.get_input_dataset(
            "pipeline").get_inventory_reader()

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(pipeline_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(pipeline_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.pipeline_damage_concurrent_future(
            self.pipeline_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def pipeline_damage_concurrent_future(self, function_name, num_workers,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type,
                                            hazard_dataset_id):
        """Run pipeline damage analysis for multiple pipelines.

        Args:
            pipelines (list): multiple pipelines from pieline dataset.
            hazard_type (str): Hazard type
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with pipeline damage values and other data/metadata.

        """
        result = []

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # get fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), pipelines,
            fragility_key)

        # Get Liquefaction Fragility Key
        liquefaction_fragility_key = self.get_parameter(
            "liquefaction_fragility_key")
        if hazard_type == "earthquake" and liquefaction_fragility_key is None:
            liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset id
        geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")
        if geology_dataset_id is not None:
            fragility_sets_liq = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), pipelines,
                liquefaction_fragility_key)

        for pipeline in pipelines:
            if pipeline["id"] in fragility_sets.keys():
                liq_fragility_set = None
                # Check if mapping contains liquefaction fragility
                if geology_dataset_id is not None and \
                        fragility_sets_liq is not None and \
                        pipeline["id"] in fragility_sets_liq:
                    liq_fragility_set = fragility_sets_liq[pipeline["id"]]

                result.append(
                    self.pipeline_damage_analysis(
                        pipeline, hazard_type, fragility_sets[pipeline["id"]],
                        liq_fragility_set, hazard_dataset_id,
                        geology_dataset_id, use_liquefaction))

        return result

    def pipeline_damage_analysis(self, pipeline, hazard_type, fragility_set,
                                 fragility_set_liq, hazard_dataset_id,
                                 geology_dataset_id, use_liquefaction):
        """Run pipeline damage for a single pipeline.

        Args:
            pipeline (obj): a single pipeline.
            hazard_type (str): hazard type.
            fragility_set (obj): A JSON description of fragility assigned to the building.
            fragility_set_liq (obj): A JSON description of fragility assigned to the building with liqufaction.
            hazard_dataset_id (str): A hazard dataset to use.
            geology_dataset_id (str): A dataset id for geology dataset for liqufaction.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.

        Returns:
            OrderedDict: A dictionary with pipeline damage values and other data/metadata.
        """

        pipeline_results = collections.OrderedDict()
        pgv_repairs = 0.0
        pgd_repairs = 0.0
        liq_hazard_type = ""
        liq_hazard_val = 0.0
        liquefaction_prob = 0.0

        if fragility_set is not None:
            demand_type = fragility_set.demand_type.lower()
            demand_units = fragility_set.demand_units
            location = GeoUtil.get_location(pipeline)
            point = str(location.y) + "," + str(location.x)

            if hazard_type == 'earthquake':
                hazard_resp = self.hazardsvc.get_earthquake_hazard_values(
                    hazard_dataset_id, demand_type, demand_units, [point])
            elif hazard_type == 'tsunami':
                hazard_resp = self.hazardsvc.get_tsunami_hazard_values(
                    hazard_dataset_id, demand_type, demand_units, [point])
            elif hazard_type == 'tornado':
                hazard_resp = self.hazardsvc.get_tornado_hazard_values(
                    hazard_dataset_id, demand_units, [point])
            elif hazard_type == 'hurricane':
                hazard_resp = self.hazardsvc.get_hurricanewf_values(
                    hazard_dataset_id, demand_type, demand_units, [point])
            else:
                raise ValueError("Hazard type are not currently supported.")

            hazard_val = hazard_resp[0]['hazardValue']
            if hazard_val <= 0.0:
                hazard_val = 0.0

            diameter = PipelineUtil.get_pipe_diameter(pipeline)
            fragility_vars = {'x': hazard_val, 'y': diameter}
            fragility_curve = fragility_set.fragility_curves[0]

            # TODO: here assume that custom fragility set only has one limit state
            pgv_repairs = fragility_set.calculate_custom_limit_state(
                fragility_vars)['failure']

            # Convert PGV repairs to SI units
            pgv_repairs = PipelineUtil.convert_result_unit(
                fragility_curve.description, pgv_repairs)

            if use_liquefaction is True and fragility_set_liq is not None and geology_dataset_id is not None:
                liq_fragility_curve = fragility_set_liq.fragility_curves[0]
                liq_hazard_type = fragility_set_liq.demand_type
                pgd_demand_units = fragility_set_liq.demand_units

                # Get PGD hazard value from hazard service
                location_str = str(location.y) + "," + str(location.x)
                liquefaction = self.hazardsvc.get_liquefaction_values(
                    hazard_dataset_id, geology_dataset_id, pgd_demand_units,
                    [location_str])
                liq_hazard_val = liquefaction[0]['pgd']
                liquefaction_prob = liquefaction[0]['liqProbability']

                liq_fragility_vars = {
                    'x': liq_hazard_val,
                    'y': liquefaction_prob
                }
                pgd_repairs = liq_fragility_curve.compute_custom_limit_state_probability(
                    liq_fragility_vars)
                # Convert PGD repairs to SI units
                pgd_repairs = PipelineUtil.convert_result_unit(
                    liq_fragility_curve.description, pgd_repairs)

            total_repair_rate = pgd_repairs + pgv_repairs
            break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs
            leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs

            length = PipelineUtil.get_pipe_length(pipeline)

            failure_probability = 1 - math.exp(-1.0 * break_rate * length)
            num_pgd_repairs = pgd_repairs * length
            num_pgv_repairs = pgv_repairs * length
            num_repairs = num_pgd_repairs + num_pgv_repairs

            pipeline_results['guid'] = pipeline['properties']['guid']
            if 'pipetype' in pipeline['properties']:
                pipeline_results['pipeclass'] = pipeline['properties'][
                    'pipetype']
            elif 'pipelinesc' in pipeline['properties']:
                pipeline_results['pipeclass'] = pipeline['properties'][
                    'pipelinesc']
            else:
                pipeline_results['pipeclass'] = ""

            pipeline_results['pgvrepairs'] = pgv_repairs
            pipeline_results['pgdrepairs'] = pgd_repairs
            pipeline_results['repairspkm'] = total_repair_rate
            pipeline_results['breakrate'] = break_rate
            pipeline_results['leakrate'] = leak_rate
            pipeline_results['failprob'] = failure_probability
            pipeline_results['demandtype'] = demand_type
            pipeline_results['hazardtype'] = hazard_type
            pipeline_results['hazardval'] = hazard_val
            pipeline_results['liqhaztype'] = liq_hazard_type
            pipeline_results['liqhazval'] = liq_hazard_val
            pipeline_results['liqprobability'] = liquefaction_prob
            pipeline_results['numpgvrpr'] = num_pgv_repairs
            pipeline_results['numpgdrpr'] = num_pgd_repairs
            pipeline_results['numrepairs'] = num_repairs

        return pipeline_results

    def get_spec(self):
        """Get specifications of the pipeline damage analysis.

        Returns:
            obj: A JSON object of specifications of the pipeline damage analysis.

        """
        return {
            'name':
            'pipeline-damage',
            'description':
            'buried pipeline damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'result dataset name',
                'type': str
            }, {
                'id': 'hazard_type',
                'required': True,
                'description': 'Hazard Type (e.g. earthquake)',
                'type': str
            }, {
                'id': 'hazard_id',
                'required': True,
                'description': 'Hazard ID',
                'type': str
            }, {
                'id': 'fragility_key',
                'required': False,
                'description': 'Fragility key to use in mapping dataset',
                'type': str
            }, {
                'id': 'use_liquefaction',
                'required': False,
                'description': 'Use liquefaction',
                'type': bool
            }, {
                'id': 'liquefaction_fragility_key',
                'required': False,
                'description':
                'Fragility key to use in liquefaction mapping dataset',
                'type': str
            }, {
                'id': 'num_cpu',
                'required': False,
                'description':
                'If using parallel execution, the number of cpus to request',
                'type': int
            }, {
                'id': 'liquefaction_geology_dataset_id',
                'required': False,
                'description': 'Geology dataset id',
                'type': str,
            }],
            'input_datasets': [{
                'id':
                'pipeline',
                'required':
                True,
                'description':
                'Pipeline Inventory',
                'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'pipeline',
                'type': 'ergo:pipelineDamage'
            }]
        }
Exemplo n.º 7
0
class RoadDamage(BaseAnalysis):
    """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(RoadDamage, self).__init__(incore_client)

    def run(self):
        """Executes road damage analysis."""
        # Road dataset
        road_set = self.get_input_dataset("roads").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset for liquefaction
        geology_dataset_id = None
        if self.get_parameter("liquefaction_geology_dataset_id") is not None:
            geology_dataset_id = self.get_parameter(
                "liquefaction_geology_dataset_id")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        user_defined_cpu = 1
        if self.get_parameter(
                "num_cpu") is not None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(road_set), user_defined_cpu)

        avg_bulk_input_size = int(len(road_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.road_damage_concurrent_future(
            self.road_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id),
            repeat(use_hazard_uncertainty), repeat(geology_dataset_id),
            repeat(fragility_key), repeat(use_liquefaction))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def road_damage_concurrent_future(self, function_name, parallelism, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            parallelism (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with road damage values and other data/metadata.

        """

        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallelism) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def road_damage_analysis_bulk_input(self, roads, hazard_type,
                                        hazard_dataset_id,
                                        use_hazard_uncertainty,
                                        geology_dataset_id, fragility_key,
                                        use_liquefaction):
        """Run analysis for multiple roads.

        Args:
            roads (list): Multiple roads from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami).
            hazard_dataset_id (str): An id of the hazard exposure.
            use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not
            geology_dataset_id (str): An id of the geology for use in liquefaction.
            fragility_key (str): Fragility key describing the type of fragility.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.

        Returns:
            list: A list of ordered dictionaries with road damage values and other data/metadata.

        """
        road_results = []
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key)

        list_roads = roads

        # Converting list of roads into a dictionary for ease of reference
        roads = dict()
        for rd in list_roads:
            roads[rd["id"]] = rd
        del list_roads

        processed_roads = []
        grouped_roads = AnalysisUtil.group_by_demand_type(
            roads, fragility_sets)
        for demand, grouped_road_items in grouped_roads.items():
            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            road_chunks = list(AnalysisUtil.chunks(grouped_road_items, 50))
            for road_chunk in road_chunks:
                points = []
                for road_id in road_chunk:
                    location = GeoUtil.get_location(roads[road_id])
                    points.append(str(location.y) + "," + str(location.x))

                liquefaction = []
                if hazard_type == 'earthquake':
                    hazard_vals = self.hazardsvc.get_earthquake_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)

                    if input_demand_type.lower(
                    ) == 'pgd' and use_liquefaction and geology_dataset_id is not None:
                        liquefaction = self.hazardsvc.get_liquefaction_values(
                            hazard_dataset_id, geology_dataset_id,
                            input_demand_units, points)
                elif hazard_type == 'tornado':
                    raise ValueError(
                        'Earthquake and tsunamis are the only hazards supported for road damage'
                    )
                elif hazard_type == 'hurricane':
                    raise ValueError(
                        'Earthquake and tsunamis are the only hazards supported for road damage'
                    )
                elif hazard_type == 'tsunami':
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                else:
                    raise ValueError("Missing hazard type.")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for road_id in road_chunk:
                    road_result = collections.OrderedDict()
                    road = roads[road_id]
                    hazard_val = hazard_vals[i]['hazardValue']

                    # Sometimes the geotiffs give large negative values for out of bounds instead of 0
                    if hazard_val <= 0.0:
                        hazard_val = 0.0

                    std_dev = 0.0
                    if use_hazard_uncertainty:
                        raise ValueError("Uncertainty Not Implemented Yet.")

                    selected_fragility_set = fragility_sets[road_id]
                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hazard_val, std_dev=std_dev)
                    dmg_interval = AnalysisUtil.calculate_damage_interval(
                        dmg_probability)

                    road_result['guid'] = road['properties']['guid']
                    road_result.update(dmg_probability)
                    road_result.update(dmg_interval)
                    road_result['demandtype'] = input_demand_type
                    road_result['demandunits'] = input_demand_units
                    road_result['hazardtype'] = hazard_type
                    road_result['hazardval'] = hazard_val

                    # if there is liquefaction, overwrite the hazardval with liquefaction value
                    # recalculate dmg_probability and dmg_interval
                    if len(liquefaction) > 0:
                        if input_demand_type in liquefaction[i]:
                            liquefaction_val = liquefaction[i][
                                input_demand_type]
                        elif input_demand_type.lower() in liquefaction[i]:
                            liquefaction_val = liquefaction[i][
                                input_demand_type.lower()]
                        elif input_demand_type.upper() in liquefaction[i]:
                            liquefaction_val = liquefaction[i][
                                input_demand_type.upper]
                        else:
                            liquefaction_val = 0.0
                        dmg_probability = selected_fragility_set.calculate_limit_state(
                            liquefaction_val, std_dev=std_dev)
                        dmg_interval = AnalysisUtil.calculate_damage_interval(
                            dmg_probability)

                        road_result['hazardval'] = liquefaction_val
                        road_result.update(dmg_probability)
                        road_result.update(dmg_interval)

                    road_results.append(road_result)
                    processed_roads.append(road_id)
                    i = i + 1

        unmapped_dmg_probability = {
            "ls-slight": 0.0,
            "ls-moderat": 0.0,
            "ls-extensi": 0.0,
            "ls-complet": 0.0
        }
        unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(
            unmapped_dmg_probability)
        for road_id, rd in roads.items():
            if road_id not in processed_roads:
                unmapped_rd_result = collections.OrderedDict()
                unmapped_rd_result['guid'] = rd['properties']['guid']
                unmapped_rd_result.update(unmapped_dmg_probability)
                unmapped_rd_result.update(unmapped_dmg_intervals)
                unmapped_rd_result['demandtype'] = "None"
                unmapped_rd_result['demandunits'] = "None"
                unmapped_rd_result['hazardtype'] = "None"
                unmapped_rd_result['hazardval'] = 0.0
                road_results.append(unmapped_rd_result)

        return road_results

    def get_spec(self):
        """Get specifications of the road damage analysis.

        Returns:
            obj: A JSON object of specifications of the road damage analysis.

        """

        return {
            'name':
            'road-damage',
            'description':
            'road damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'roads',
                'required': True,
                'description': 'Road Inventory',
                'type': ['ergo:roadLinkTopo', 'incore:roads']
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'roads',
                'description': 'CSV file of road structural damage',
                'type': 'ergo:roadDamage'
            }]
        }