def run_with_base_class():
    client = IncoreClient(INCORE_API_DEV_URL)
    hazard_type = "earthquake"
    hazard_id = "5b902cb273c3371e1236b36b"
    facility_datasetid = "5a284f2ac7d30d13bc081e52"

    mapping_id = "5b47c3b1337d4a387e85564b"  # Hazus Potable Water Facility Fragility Mapping - Only PGA

    liq_geology_dataset_id = "5a284f53c7d30d13bc08249c"

    uncertainty = False
    liquefaction = False
    liq_fragility_key = "pgd"

    wf_dmg = WaterFacilityDamage(client)
    wf_dmg.load_remote_input_dataset("water_facilities", facility_datasetid)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    wf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    result_name = "wf-dmg-results.csv"
    wf_dmg.set_parameter("result_name", result_name)
    wf_dmg.set_parameter("hazard_type", hazard_type)
    wf_dmg.set_parameter("hazard_id", hazard_id)
    wf_dmg.set_parameter("fragility_key", "pga")
    wf_dmg.set_parameter("use_liquefaction", liquefaction)
    wf_dmg.set_parameter("liquefaction_geology_dataset_id",
                         liq_geology_dataset_id)
    wf_dmg.set_parameter("liquefaction_fragility_key", liq_fragility_key)
    wf_dmg.set_parameter("use_hazard_uncertainty", uncertainty)
    wf_dmg.set_parameter("num_cpu", 4)

    wf_dmg.run_analysis()
Пример #2
0
def test_road_failure():
    client = IncoreClient(INCORE_API_DEV_URL, token_file_name=".incrtesttoken")

    # road inventory for Galveston island
    road_dataset_id = "5f0dd5ecb922f96f4e962caf"
    # distance table for Galveston island
    distance_dataset_id = "5f1883abfeef2d758c4e857d"
    # road damage by hurricane inundation mapping
    mapping_id = "5f0cb04fe392b24d4800f316"
    # Galveston Deterministic Hurricane - Kriging inundationDuration
    hazard_type = "hurricane"
    hazard_id = "5f10837c01d3241d77729a4f"

    # Create road damage
    road_failure = RoadFailure(client)
    # Load input datasets
    road_failure.load_remote_input_dataset("roads", road_dataset_id)
    road_failure.load_remote_input_dataset("distance_table",
                                           distance_dataset_id)
    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    road_failure.set_input_dataset('dfr3_mapping_set', mapping_set)
    # Specify the result name
    result_name = "road_result"
    # Set analysis parameters
    road_failure.set_parameter("result_name", result_name)
    road_failure.set_parameter("hazard_type", hazard_type)
    road_failure.set_parameter("hazard_id", hazard_id)
    road_failure.set_parameter("num_cpu", 4)

    # Run road damage by hurricane inundation analysis
    result = road_failure.run_analysis()

    assert result is True
Пример #3
0
def run_with_base_class():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)
    pipeline_dmg = PipelineDamage(client)

    # test tsunami pipeline
    pipeline_dmg.load_remote_input_dataset("pipeline",
                                           "5ef1171b2367ff111d082f0c")

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(
        fragility_service.get_mapping(
            "60b124e01f2b7d4a916ba456"))  # new format fragility curves
    # mapping_set = MappingSet(fragility_service.get_mapping("5ef11888da15730b13b84353")) # legacy fragility curves
    pipeline_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    pipeline_dmg.set_parameter("result_name",
                               "seaside_tsunami_pipeline_result")
    pipeline_dmg.set_parameter("hazard_type", "tsunami")
    pipeline_dmg.set_parameter(
        "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code")
    pipeline_dmg.set_parameter("hazard_id", "5bc9eaf7f7b08533c7e610e1")
    pipeline_dmg.set_parameter("num_cpu", 4)

    # Run pipeline damage analysis
    result = pipeline_dmg.run_analysis()
Пример #4
0
def test_pipeline_dmg():
    client = IncoreClient(service_url=INCORE_API_DEV_URL, token_file_name=".incrtesttoken")
    pipeline_dmg = PipelineDamage(client)

    # test tsunami pipeline
    pipeline_dmg.load_remote_input_dataset("pipeline",
                                           "5ef1171b2367ff111d082f0c")

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping("5ef11888da15730b13b84353"))
    pipeline_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    pipeline_dmg.set_parameter("result_name",
                               "seaside_tsunami_pipeline_result")
    pipeline_dmg.set_parameter("hazard_type", "tsunami")
    pipeline_dmg.set_parameter("fragility_key",
                               "Non-Retrofit inundationDepth Fragility ID Code")
    pipeline_dmg.set_parameter("hazard_id", "5bc9eaf7f7b08533c7e610e1")
    pipeline_dmg.set_parameter("num_cpu", 4)

    # Run pipeline damage analysis
    result = pipeline_dmg.run_analysis()

    assert result is True
Пример #5
0
def run_with_base_class():
    client = IncoreClient(INCORE_API_DEV_URL)

    # EQ Road Dataset - Seaside roads
    road_dataset_id = "5ee7af50772cf80008577ae3"

    hazard_type = "tsunami"
    liq_geology_dataset_id = None

    if hazard_type == 'earthquake':
        # Seaside Earthquake
        hazard_id = "5ba8f379ec2309043520906f"

        # Earthquake mapping
        mapping_id = "5ee7b145c54361000148dcc5"

        fragility_key = "pgd"
        liquefaction = False
    elif hazard_type == 'tsunami':
        # Seaside Tsunami
        hazard_id = "5bc9eaf7f7b08533c7e610e1"

        # Tsunami Mapping for Seaside
        mapping_id = "5ee7b2c9c54361000148de37"

        fragility_key = "Non-Retrofit inundationDepth Fragility ID Code"
        liquefaction = False
    else:
        raise ValueError(
            "Earthquake and tsunami are the only testable hazards with road damage currently"
        )

    uncertainty = False

    # Run Seaside earthquake road damage
    road_dmg = RoadDamage(client)
    road_dmg.load_remote_input_dataset("roads", road_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    road_dmg.set_parameter("result_name", "seaside_road_dmg_" + hazard_type)
    road_dmg.set_parameter("hazard_type", hazard_type)
    road_dmg.set_parameter("hazard_id", hazard_id)
    if fragility_key is not None:
        road_dmg.set_parameter("fragility_key", fragility_key)
    road_dmg.set_parameter("num_cpu", 1)
    road_dmg.set_parameter("use_liquefaction", liquefaction)
    if liquefaction and liq_geology_dataset_id is not None:
        road_dmg.set_parameter("liquefaction_geology_dataset_id",
                               liq_geology_dataset_id)
    road_dmg.set_parameter("use_hazard_uncertainty", uncertainty)

    # Run Analysis
    road_dmg.run_analysis()
def run_with_base_class():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)

    # Joplin tornado building damage
    bldg_dataset_id = "5df7d0de425e0b00092d0082"  # joplin building v6

    bldg_dmg = BuildingDamage(client)
    bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id)

    mapping_id = "5e8e3a21eaa8b80001f04f1c"  # 19 archetype, non-retrofit
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set)
    bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code")

    # The simulated EF-5 tornado shows geographical locations and the range of wind speed
    # of tornado hazard in Joplin.
    hazard_type = "tornado"
    hazard_id = "5dfa32bbc0601200080893fb"

    result_name = "joplin_tornado_dmg_result"
    bldg_dmg.set_parameter("result_name", result_name)
    bldg_dmg.set_parameter("hazard_type", hazard_type)
    bldg_dmg.set_parameter("hazard_id", hazard_id)
    bldg_dmg.set_parameter("num_cpu", 4)
    bldg_dmg.set_parameter("seed", 1000)
    bldg_dmg.run_analysis()
    # end of Building damage analysis

    # get csv results from Building damage analysis
    building_dmg_result = bldg_dmg.get_output_dataset("ds_result")
    building_dmg_result.get_dataframe_from_csv()

    restoration = JoplinEmpiricalRestoration(client)
    # Building final target level dataset defining custom final restoration values
    building_fl_id = "61a68979d5b02930aa43ae39"

    restoration.load_remote_input_dataset("buildings", bldg_dataset_id)
    # restoration.load_remote_input_dataset("building_dmg", building_dmg_result)
    restoration.set_input_dataset("building_dmg", building_dmg_result)
    restoration.load_remote_input_dataset("building_functionality_level", building_fl_id)

    result_name = "Joplin_empirical_restoration_result"
    restoration.set_parameter("result_name", result_name)
    restoration.set_parameter("target_functionality_level", 0)
    # restoration.set_parameter("seed", 1234)

    # Run Analysis
    restoration.run_analysis()
Пример #7
0
def pytest_sessionstart(session):
    """
    Called after the Session object has been created and
    before performing collection and entering the run test loop.
    """
    try:
        with open(
                os.path.join(os.path.dirname(__file__), "pyincore/.incorepw"),
                'r') as f:
            cred = f.read().splitlines()
    except EnvironmentError:
        assert False
    credentials = jwt.decode(cred[0], cred[1])

    monkeypatch = MonkeyPatch()
    monkeypatch.setattr("builtins.input", lambda x: credentials["username"])
    monkeypatch.setattr("getpass.getpass", lambda y: credentials["password"])
    client = IncoreClient(service_url=pyglobals.INCORE_API_DEV_URL,
                          token_file_name=".incrtesttoken")
    pytest.client = client
    pytest.datasvc = DataService(client)
    pytest.fragilitysvc = FragilityService(client)
    pytest.repairsvc = RepairService(client)
    pytest.restorationsvc = RestorationService(client)
    pytest.hazardsvc = HazardService(client)
    pytest.spacesvc = SpaceService(client)
    print(
        f"Successfully initialized Incore client and services. Using {pyglobals.INCORE_API_DEV_URL}"
    )
Пример #8
0
def run_with_base_class():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)

    # Memphis 7.9 AB-95
    hazard_type = "earthquake"
    hazard_id = "5b902cb273c3371e1236b36b"

    # Shelby County Essential Facilities
    building_dataset_id = "5a284f42c7d30d13bc0821ba"

    # Default Building Fragility Mapping v1.0
    mapping_id = "5b47b350337d4a3629076f2c"

    non_structural_building_dmg = NonStructBuildingDamage(client)

    # Load input datasets
    non_structural_building_dmg.load_remote_input_dataset(
        "buildings", building_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    non_structural_building_dmg.set_input_dataset('dfr3_mapping_set',
                                                  mapping_set)

    # Specify the result name
    result_name = "non_structural_building_dmg_result"

    # Set analysis parameters
    non_structural_building_dmg.set_parameter("result_name", result_name)
    non_structural_building_dmg.set_parameter("hazard_type", hazard_type)
    non_structural_building_dmg.set_parameter("hazard_id", hazard_id)
    non_structural_building_dmg.set_parameter("num_cpu", 4)

    # use liquefaction (slow)
    # Shelby County Liquefaction Susceptibility
    use_liquefaction = True
    liq_geology_dataset_id = "5a284f55c7d30d13bc0824ba"

    non_structural_building_dmg.set_parameter("use_liquefaction",
                                              use_liquefaction)
    non_structural_building_dmg.set_parameter("liq_geology_dataset_id",
                                              liq_geology_dataset_id)

    # Run analysis
    non_structural_building_dmg.run_analysis()
def test_pipeline_dmg_w_repair_rate():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)

    # This is the Memphis Water Buried Pipeline with Topology dataset the Ergo repository
    pipeline_dataset_id = "5a284f28c7d30d13bc081d14"
    # pipeline mapping
    mapping_id = "5b47c227337d4a38464efea8"

    # New madrid earthquake using Atkinson Boore 1995
    hazard_type = "earthquake"
    hazard_id = "5b902cb273c3371e1236b36b"

    # Geology dataset
    liq_geology_dataset_id = "5a284f53c7d30d13bc08249c"

    # Create pipeline damage
    test_pipeline_dmg_w_rr = PipelineDamageRepairRate(client)
    # Load input datasets
    test_pipeline_dmg_w_rr.load_remote_input_dataset("pipeline",
                                                     pipeline_dataset_id)
    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    test_pipeline_dmg_w_rr.set_input_dataset('dfr3_mapping_set', mapping_set)
    # Specify the result name
    result_name = "pipeline_result"
    # Set analysis parameters
    test_pipeline_dmg_w_rr.set_parameter("result_name", result_name)
    test_pipeline_dmg_w_rr.set_parameter("hazard_type", hazard_type)
    test_pipeline_dmg_w_rr.set_parameter("hazard_id", hazard_id)
    test_pipeline_dmg_w_rr.set_parameter("liquefaction_fragility_key", "pgd")
    # test_pipeline_dmg_w_rr.set_parameter("use_liquefaction", False)
    test_pipeline_dmg_w_rr.set_parameter(
        "use_liquefaction", True)  # toggle on and off to see liquefaction
    test_pipeline_dmg_w_rr.set_parameter("num_cpu", 4)
    test_pipeline_dmg_w_rr.set_parameter("liquefaction_geology_dataset_id",
                                         liq_geology_dataset_id)
    # Run pipeline damage analysis
    result = test_pipeline_dmg_w_rr.run_analysis()

    assert result is True
def run_with_base_class():
    client = IncoreClient(INCORE_API_DEV_URL)

    # Memphis Earthquake damage
    # New madrid earthquake using Atkinson Boore 1995
    hazard_type = "earthquake"
    hazard_id = "5b902cb273c3371e1236b36b"

    # Building dataset
    # 5a284f0bc7d30d13bc081a28  5kb
    # 5bcf2fcbf242fe047ce79dad 300kb
    # 5a284f37c7d30d13bc08219c 20mb
    bldg_dataset_id = "5a284f0bc7d30d13bc081a28"

    bldg_dmg = BuildingDamage(client)
    bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id)

    # Earthquake mapping
    mapping_id = "5b47b350337d4a3629076f2c"
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    result_name = "memphis_eq_bldg_dmg_result"
    bldg_dmg.set_parameter("result_name", result_name)
    bldg_dmg.set_parameter("hazard_type", hazard_type)
    bldg_dmg.set_parameter("hazard_id", hazard_id)
    bldg_dmg.set_parameter("num_cpu", 4)

    # Run Analysis
    bldg_dmg.run_analysis()

    # TSUNAMI

    hazard_type = "tsunami"
    hazard_id = "5bc9e25ef7b08533c7e610dc"

    # Seaside building dataset
    bldg_dataset_id = "5bcf2fcbf242fe047ce79dad"

    # Run seaside tsunami building damage
    bldg_dmg = BuildingDamage(client)
    bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id)

    # Tsunami mapping
    mapping_id = "5b48fb1f337d4a478e7bd54d"
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    result_name = "seaside_tsunami_dmg_result"
    bldg_dmg.set_parameter("result_name", result_name)
    bldg_dmg.set_parameter("hazard_type", hazard_type)
    bldg_dmg.set_parameter("hazard_id", hazard_id)
    bldg_dmg.set_parameter("num_cpu", 4)
    bldg_dmg.run_analysis()
Пример #11
0
def run_with_base_class():
    client = IncoreClient(INCORE_API_DEV_URL)

    hazard_type = "earthquake"

    hazard_id = "5eebcbb08f80fe3899ad6039"

    epf_dataset_id = "5eebcaa17a00803abc85ec11"

    # Earthquake mapping
    mapping_id = "5eebcc13e7226233ce4ef0d7"

    # Run epf damage
    epf_dmg = EpfDamage(client)

    epf_dmg.load_remote_input_dataset("epfs", epf_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    epf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    epf_dmg.set_parameter("result_name", "earthquake_epf_dmg_result")
    epf_dmg.set_parameter("hazard_type", hazard_type)
    epf_dmg.set_parameter("hazard_id", hazard_id)
    epf_dmg.set_parameter("num_cpu", 1)

    # Run Analysis
    epf_dmg.run_analysis()

    hazard_type = "tsunami"

    hazard_id = "5bc9eaf7f7b08533c7e610e1"

    epf_dataset_id = "5eebcaa17a00803abc85ec11"

    # Tsunami mapping
    mapping_id = "5eebce11e7226233ce4ef305"

    # Run epf damage
    epf_dmg = EpfDamage(client)
    epf_dmg.load_remote_input_dataset("epfs", epf_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    epf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    epf_dmg.set_parameter("fragility_key", "Non-Retrofit inundationDepth Fragility ID Code")
    epf_dmg.set_parameter("result_name", "tsunami_epf_dmg_result")
    epf_dmg.set_parameter("hazard_type", hazard_type)
    epf_dmg.set_parameter("hazard_id", hazard_id)
    epf_dmg.set_parameter("num_cpu", 1)

    # Run Analysis
    epf_dmg.run_analysis()
Пример #12
0
    def __init__(self, incore_client):
        # Create Hazard and Fragility service
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(WaterFacilityDamage, self).__init__(incore_client)
Пример #13
0
def fragilitysvc(monkeypatch):
    client = IncoreClient(service_url=pyglobals.INCORE_API_DEV_URL, token_file_name=".incrtesttoken")
    return FragilityService(client)
Пример #14
0
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(RoadDamage, self).__init__(incore_client)
Пример #15
0
class WaterFacilityDamage(BaseAnalysis):
    """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure.

    """

    DEFAULT_EQ_FRAGILITY_KEY = "pga"
    DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code"
    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"

    def __init__(self, incore_client):
        # Create Hazard and Fragility service
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(WaterFacilityDamage, self).__init__(incore_client)

    def run(self):
        """Performs Water facility damage analysis by using the parameters from the spec
        and creates an output dataset in csv format

        Returns:
            bool: True if successful, False otherwise
        """
        # Facility dataset
        inventory_set = self.get_input_dataset(
            "water_facilities").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(inventory_set), user_defined_cpu)

        avg_bulk_input_size = int(len(inventory_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(inventory_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results,
         damage_results) = self.waterfacility_damage_concurrent_futures(
             self.waterfacilityset_damage_analysis_bulk_input, num_workers,
             inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def waterfacility_damage_concurrent_futures(self, function_name,
                                                parallel_processes, *args):
        """Utilizes concurrent.future module.

            Args:
                function_name (function): The function to be parallelized.
                parallel_processes (int): Number of workers in parallelization.
                *args: All the arguments in order to pass into parameter function_name.

            Returns:
                list: A list of ordered dictionaries with water facility damage values
                list: A list of ordered dictionaries with other water facility data/metadata


        """
        output_ds = []
        output_dmg = []

        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallel_processes) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def waterfacilityset_damage_analysis_bulk_input(self, facilities,
                                                    hazard_type,
                                                    hazard_dataset_id):
        """Gets applicable fragilities and calculates damage

        Args:
            facilities (list): Multiple water facilities from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with water facility damage values
            list: A list of ordered dictionaries with other water facility data/metadata
        """

        # Liquefaction related variables
        use_liquefaction = False
        liquefaction_available = False
        fragility_sets_liq = None
        liquefaction_resp = None
        geology_dataset_id = None
        liq_hazard_vals = None
        liq_demand_types = None
        liq_demand_units = None
        liquefaction_prob = None
        loc = None

        # Obtain the fragility key
        fragility_key = self.get_parameter("fragility_key")

        if fragility_key is None:
            if hazard_type == 'tsunami':
                fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY
            elif hazard_type == 'earthquake':
                fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY
            else:
                raise ValueError(
                    "Hazard type other than Earthquake and Tsunami are not currently supported."
                )

            self.set_parameter("fragility_key", fragility_key)

        # Obtain the fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), facilities,
            fragility_key)

        # Obtain the liquefaction fragility Key
        liquefaction_fragility_key = self.get_parameter(
            "liquefaction_fragility_key")

        if hazard_type == "earthquake":
            if self.get_parameter("use_liquefaction") is True:
                if liquefaction_fragility_key is None:
                    liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY

                use_liquefaction = self.get_parameter("use_liquefaction")

                # Obtain the geology dataset
                geology_dataset_id = self.get_parameter(
                    "liquefaction_geology_dataset_id")

                if geology_dataset_id is not None:
                    fragility_sets_liq = self.fragilitysvc.match_inventory(
                        self.get_input_dataset("dfr3_mapping_set"), facilities,
                        liquefaction_fragility_key)

                    if fragility_sets_liq is not None:
                        liquefaction_available = True

        # Determine whether to use hazard uncertainty
        uncertainty = self.get_parameter("use_hazard_uncertainty")

        # Setup fragility translation structures
        values_payload = []
        values_payload_liq = []
        unmapped_waterfacilities = []
        mapped_waterfacilities = []

        for facility in facilities:
            if facility["id"] in fragility_sets.keys():
                # Fill in generic details
                fragility_set = fragility_sets[facility["id"]]
                location = GeoUtil.get_location(facility)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                units = fragility_set.demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_waterfacilities.append(facility)

                # Fill in liquefaction parameters
                if liquefaction_available and facility[
                        "id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[facility["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_waterfacilities.append(facility)

        del facilities

        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        # Check if liquefaction is applicable
        if liquefaction_available:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        # Calculate LS and DS
        facility_results = []
        damage_results = []

        for i, facility in enumerate(mapped_waterfacilities):
            fragility_set = fragility_sets[facility["id"]]
            limit_states = dict()
            dmg_intervals = dict()

            # Setup conditions for the analysis
            hazard_std_dev = 0

            if uncertainty:
                hazard_std_dev = random.random()

            if isinstance(fragility_set.fragility_curves[0], DFR3Curve):
                hazard_vals = AnalysisUtil.update_precision_of_lists(
                    hazard_resp[i]["hazardValues"])
                demand_types = hazard_resp[i]["demands"]
                demand_units = hazard_resp[i]["units"]

                hval_dict = dict()

                for j, d in enumerate(fragility_set.demand_types):
                    hval_dict[d] = hazard_vals[j]

                if not AnalysisUtil.do_hazard_values_have_errors(
                        hazard_resp[i]["hazardValues"]):
                    facility_args = fragility_set.construct_expression_args_from_inventory(
                        facility)
                    limit_states = \
                        fragility_set.calculate_limit_state(hval_dict,
                                                            std_dev=hazard_std_dev,
                                                            inventory_type='water_facility',
                                                            **facility_args)
                    # Evaluate liquefaction: if it is not none, then liquefaction is available
                    if liquefaction_resp is not None:
                        fragility_set_liq = fragility_sets_liq[facility["id"]]

                        if isinstance(fragility_set_liq.fragility_curves[0],
                                      DFR3Curve):
                            liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                                liquefaction_resp[i]["pgdValues"])
                            liq_demand_types = liquefaction_resp[i]["demands"]
                            liq_demand_units = liquefaction_resp[i]["units"]
                            liquefaction_prob = liquefaction_resp[i][
                                'liqProbability']

                            hval_dict_liq = dict()

                            for j, d in enumerate(
                                    fragility_set_liq.demand_types):
                                hval_dict_liq[d] = liq_hazard_vals[j]

                            facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(
                                facility)
                            pgd_limit_states = \
                                fragility_set_liq.calculate_limit_state(
                                    hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility",
                                    **facility_liq_args)
                        else:
                            raise ValueError(
                                "One of the fragilities is in deprecated format. "
                                "This should not happen If you are seeing this please report the issue."
                            )

                        limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                            limit_states, pgd_limit_states)

                    dmg_intervals = fragility_set.calculate_damage_interval(
                        limit_states,
                        hazard_type=hazard_type,
                        inventory_type='water_facility')
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            # TODO: ideally, this goes into a single variable declaration section

            facility_result = {
                'guid': facility['properties']['guid'],
                **limit_states,
                **dmg_intervals
            }
            facility_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_vals, hazard_type)
            damage_result = dict()
            damage_result['guid'] = facility['properties']['guid']
            damage_result['fragility_id'] = fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardvals'] = hazard_vals

            if use_liquefaction and fragility_sets_liq and geology_dataset_id:
                damage_result['liq_fragility_id'] = fragility_sets_liq[
                    facility["id"]].id
                damage_result['liqdemandtypes'] = liq_demand_types
                damage_result['liqdemandunits'] = liq_demand_units
                damage_result['liqhazval'] = liq_hazard_vals
                damage_result['liqprobability'] = liquefaction_prob
            else:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            facility_results.append(facility_result)
            damage_results.append(damage_result)

        for facility in unmapped_waterfacilities:
            facility_result = dict()
            damage_result = dict()
            facility_result['guid'] = facility['properties']['guid']
            damage_result['guid'] = facility['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardvals'] = None
            damage_result['liq_fragility_id'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazval'] = None
            damage_result['liqprobability'] = None

            facility_results.append(facility_result)
            damage_results.append(damage_result)

        return facility_results, damage_results

    def get_spec(self):
        return {
            'name':
            'water-facility-damage',
            'description':
            'water facility damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': False,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'liquefaction_fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in liquefaction mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'water_facilities',
                'required': True,
                'description': 'Water Facility Inventory',
                'type': ['ergo:waterFacilityTopo'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id':
                'result',
                'parent_type':
                'water_facilities',
                'description':
                'A csv file with limit state probabilities and damage states '
                'for each water facility',
                'type':
                'ergo:waterFacilityDamageVer6'
            }, {
                'id':
                'metadata',
                'parent_type':
                'water_facilities',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:waterFacilityDamageSupplement'
            }]
        }
Пример #16
0
class BridgeDamage(BaseAnalysis):
    """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BridgeDamage, self).__init__(incore_client)

    def run(self):
        """Executes bridge damage analysis."""
        # Bridge dataset
        bridge_set = self.get_input_dataset("bridges").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(bridge_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bridge_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bridge_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.bridge_damage_concurrent_future(
            self.bridge_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def bridge_damage_concurrent_future(self, function_name, num_workers,
                                        *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def bridge_damage_analysis_bulk_input(self, bridges, hazard_type,
                                          hazard_dataset_id):
        """Run analysis for multiple bridges.

        Args:
            bridges (list): Multiple bridges from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BridgeUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key)

        values_payload = []
        unmapped_bridges = []
        mapped_bridges = []
        for b in bridges:
            bridge_id = b["id"]
            if bridge_id in fragility_set:
                location = GeoUtil.get_location(b)
                loc = str(location.y) + "," + str(location.x)

                demands = fragility_set[bridge_id].demand_types
                units = fragility_set[bridge_id].demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_bridges.append(b)

            else:
                unmapped_bridges.append(b)

        # not needed anymore as they are already split into mapped and unmapped
        del bridges

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_vals = self.hazardsvc.post_hurricane_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'flood':
            hazard_vals = self.hazardsvc.post_flood_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        ds_results = []
        damage_results = []

        i = 0
        for bridge in mapped_bridges:
            ds_result = dict()
            damage_result = dict()
            dmg_probability = dict()
            dmg_intervals = dict()
            selected_fragility_set = fragility_set[bridge["id"]]

            if isinstance(selected_fragility_set.fragility_curves[0],
                          DFR3Curve):
                # Supports multiple demand types in same fragility
                hazard_val = AnalysisUtil.update_precision_of_lists(
                    hazard_vals[i]["hazardValues"])
                input_demand_types = hazard_vals[i]["demands"]
                input_demand_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = hazard_val[j]
                    j += 1

                if not AnalysisUtil.do_hazard_values_have_errors(
                        hazard_vals[i]["hazardValues"]):
                    bridge_args = selected_fragility_set.construct_expression_args_from_inventory(
                        bridge)
                    dmg_probability = \
                        selected_fragility_set.calculate_limit_state(hval_dict,
                                                                     inventory_type="bridge",
                                                                     **bridge_args)
                    dmg_intervals = selected_fragility_set.calculate_damage_interval(
                        dmg_probability,
                        hazard_type=hazard_type,
                        inventory_type="bridge")
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key)
            retrofit_type = BridgeUtil.get_retrofit_type(fragility_key)

            ds_result['guid'] = bridge['properties']['guid']
            ds_result.update(dmg_probability)
            ds_result.update(dmg_intervals)
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_val, hazard_type)

            damage_result['guid'] = bridge['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result["retrofit"] = retrofit_type
            damage_result["retrocost"] = retrofit_cost
            damage_result["demandtypes"] = input_demand_types
            damage_result["demandunits"] = input_demand_units
            damage_result["hazardtype"] = hazard_type
            damage_result["hazardval"] = hazard_val

            # add spans to bridge output so mean damage calculation can use that info
            if "spans" in bridge["properties"] and bridge["properties"][
                    "spans"] is not None:
                if isinstance(bridge["properties"]["spans"],
                              str) and bridge["properties"]["spans"].isdigit():
                    damage_result['spans'] = int(bridge["properties"]["spans"])
                elif isinstance(bridge["properties"]["spans"], int):
                    damage_result['spans'] = bridge["properties"]["spans"]
            elif "SPANS" in bridge["properties"] and bridge["properties"][
                    "SPANS"] is not None:
                if isinstance(bridge["properties"]["SPANS"],
                              str) and bridge["properties"]["SPANS"].isdigit():
                    damage_result['SPANS'] = int(bridge["properties"]["SPANS"])
                elif isinstance(bridge["properties"]["SPANS"], int):
                    damage_result['SPANS'] = bridge["properties"]["SPANS"]
            else:
                damage_result['spans'] = 1

            ds_results.append(ds_result)
            damage_results.append(damage_result)
            i += 1

        for bridge in unmapped_bridges:
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = bridge['properties']['guid']

            damage_result['guid'] = bridge['properties']['guid']
            damage_result["retrofit"] = None
            damage_result["retrocost"] = None
            damage_result["demandtypes"] = None
            damage_result['demandunits'] = None
            damage_result["hazardtype"] = None
            damage_result['hazardval'] = None
            damage_result['spans'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the bridge damage analysis.

        Returns:
            obj: A JSON object of specifications of the bridge damage analysis.

        """
        return {
            'name':
            'bridge-damage',
            'description':
            'bridge damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id':
                'bridges',
                'required':
                True,
                'description':
                'Bridge Inventory',
                'type':
                ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'bridges',
                'description': 'CSV file of bridge structural damage',
                'type': 'ergo:bridgeDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'bridges',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:bridgeDamageSupplement'
            }]
        }
Пример #17
0
class RoadDamage(BaseAnalysis):
    """Road Damage Analysis calculates the probability of road damage based on an earthquake or tsunami hazard.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(RoadDamage, self).__init__(incore_client)

    def run(self):
        """Executes road damage analysis."""
        # Road dataset
        road_set = self.get_input_dataset("roads").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset for liquefaction
        geology_dataset_id = None
        if self.get_parameter("liquefaction_geology_dataset_id") is not None:
            geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")

        user_defined_cpu = 1
        if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu)

        avg_bulk_input_size = int(len(road_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input,
                                                                          num_workers,
                                                                          inventory_args,
                                                                          repeat(hazard_type),
                                                                          repeat(hazard_dataset_id),
                                                                          repeat(use_hazard_uncertainty),
                                                                          repeat(geology_dataset_id),
                                                                          repeat(fragility_key),
                                                                          repeat(use_liquefaction))

        self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True

    def road_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            output_ds: A list of ordered dictionaries with road damage values
            output_dmg: A list of ordered dictionaries with other road data/metadata.

        """

        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, use_hazard_uncertainty,
                                        geology_dataset_id, fragility_key, use_liquefaction):
        """Run analysis for multiple roads.

        Args:
            roads (list): Multiple roads from input inventory set.
            hazard_type (str): A hazard type of the hazard exposure (earthquake or tsunami).
            hazard_dataset_id (str): An id of the hazard exposure.
            use_hazard_uncertainty(bool): Flag to indicate use uncertainty or not
            geology_dataset_id (str): An id of the geology for use in liquefaction.
            fragility_key (str): Fragility key describing the type of fragility.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.

        Returns:
            list: A list of ordered dictionaries with road damage values and other data/metadata.
            list: A list of ordered dictionaries with other road data/metadata.

        """
        fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads,
                                                           fragility_key)

        values_payload = []
        mapped_roads = []
        unmapped_roads = []
        pgd_flag = True  # for liquefaction
        liquefaction_resp = None

        for road in roads:
            if road["id"] in fragility_sets.keys():
                fragility_set = fragility_sets[road["id"]]
                location = GeoUtil.get_location(road)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                # for liquefaction
                if any(demand.lower() != 'pgd' for demand in demands):
                    pgd_flag = False
                units = fragility_set.demand_units
                value = {
                    "demands": demands,
                    "units": units,
                    "loc": loc
                }
                values_payload.append(value)
                mapped_roads.append(road)
            else:
                unmapped_roads.append(road)
        del roads

        # get hazard and liquefaction values
        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload)

            if pgd_flag and use_liquefaction and geology_dataset_id is not None:
                liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id,
                                                                            values_payload)

        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_resp = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload)
        else:
            raise ValueError("The provided hazard type is not supported yet by this analysis")

        # calculate LS and DS
        ds_results = []
        damage_results = []
        for i, road in enumerate(mapped_roads):
            dmg_probability = dict()
            dmg_interval = dict()
            demand_types_liq = None
            demand_units_liq = None
            liq_hazard_vals = None
            liquefaction_prob = None
            selected_fragility_set = fragility_sets[road["id"]]
            hazard_std_dev = 0.0
            if use_hazard_uncertainty:
                raise ValueError("Uncertainty Not Implemented Yet.")

            if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve):
                hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"])
                demand_types = hazard_resp[i]["demands"]
                demand_units = hazard_resp[i]["units"]
                hval_dict = dict()
                for j, d in enumerate(selected_fragility_set.demand_types):
                    hval_dict[d] = hazard_vals[j]

                if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]):
                    road_args = selected_fragility_set.construct_expression_args_from_inventory(road)
                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hval_dict, inventory_type='road', **road_args)

                    # if there is liquefaction, overwrite the hazardval with liquefaction value
                    # recalculate dmg_probability and dmg_interval
                    if liquefaction_resp is not None and len(liquefaction_resp) > 0:
                        liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"])
                        demand_types_liq = liquefaction_resp[i]['demands']
                        demand_units_liq = liquefaction_resp[i]['units']
                        liquefaction_prob = liquefaction_resp[i]['liqProbability']
                        liq_hval_dict = dict()
                        for j, d in enumerate(liquefaction_resp[i]["demands"]):
                            liq_hval_dict[d] = liq_hazard_vals[j]
                        dmg_probability = selected_fragility_set.calculate_limit_state(
                            liq_hval_dict,
                            inventory_type='road',
                            **road_args)

                    dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability,
                                                                                    hazard_type=hazard_type,
                                                                                    inventory_type="road")
            else:
                raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are "
                                 "seeing this please report the issue.")

            ds_result = dict()
            ds_result['guid'] = road['properties']['guid']
            ds_result.update(dmg_probability)
            ds_result.update(dmg_interval)
            ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type)

            damage_result = dict()
            damage_result['guid'] = road['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardvals'] = hazard_vals
            damage_result['liqdemandtypes'] = demand_types_liq
            damage_result['liqdemandunits'] = demand_units_liq
            damage_result['liqhazvals'] = liq_hazard_vals
            damage_result['liqprobability'] = liquefaction_prob

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        for road in unmapped_roads:
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = road['properties']['guid']

            damage_result['guid'] = road['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardvals'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazvals'] = None
            damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the road damage analysis.

        Returns:
            obj: A JSON object of specifications of the road damage analysis.

        """

        return {
            'name': 'road-damage',
            'description': 'road damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'liquefaction_geology_dataset_id',
                    'required': False,
                    'description': 'Liquefaction geology/susceptibility dataset id. '
                                   'If not provided, liquefaction will be ignored',
                    'type': str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'roads',
                    'required': True,
                    'description': 'Road Inventory',
                    'type': ['ergo:roadLinkTopo', 'incore:roads', 'ergo:roadLinkTopoVer2']
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'roads',
                    'description': 'CSV file of road structural damage',
                    'type': 'ergo:roadDamageVer3'
                },
                {
                    'id': 'metadata',
                    'parent_type': 'roads',
                    'description': 'additional metadata in json file about applied hazard value and '
                                   'fragility',
                    'type': 'incore:roadDamageSupplement'
                }
            ]
        }
Пример #18
0
class BridgeDamage(BaseAnalysis):
    """Computes bridge structural damage for earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BridgeDamage, self).__init__(incore_client)

    def run(self):
        """Executes bridge damage analysis."""
        # Bridge dataset
        bridge_set = self.get_input_dataset("bridges").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(
            bridge_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bridge_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bridge_set)
        while count < len(inventory_list):
            inventory_args.append(
                inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.bridge_damage_concurrent_future(
            self.bridge_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type),
            repeat(hazard_dataset_id))

        self.set_result_csv_data("result", results,
                                 name=self.get_parameter("result_name"))

        return True

    def bridge_damage_concurrent_future(self, function_name, num_workers,
                                        *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def bridge_damage_analysis_bulk_input(self, bridges, hazard_type,
                                          hazard_dataset_id):
        """Run analysis for multiple bridges.

        Args:
            bridges (list): Multiple bridges from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, tsunami, or hurricane.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with bridge damage values and other data/metadata.

        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BridgeUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        fragility_set = dict()
        fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges,
                                                          fragility_key)

        bridge_results = []
        list_bridges = bridges

        # Converting list of bridges into a dictionary for ease of reference
        bridges = dict()
        for br in list_bridges:
            bridges[br["id"]] = br
        list_bridges = None  # Clear as it's not needed anymore

        processed_bridges = []
        grouped_bridges = AnalysisUtil.group_by_demand_type(bridges, fragility_set)

        for demand, grouped_brs in grouped_bridges.items():

            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            br_chunks = list(AnalysisUtil.chunks(grouped_brs, 50))  # TODO: Move to globals?
            for brs in br_chunks:
                points = []
                for br_id in brs:
                    location = GeoUtil.get_location(bridges[br_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == "earthquake":
                    hazard_vals = \
                        self.hazardsvc.get_earthquake_hazard_values(
                            hazard_dataset_id,
                            input_demand_type,
                            input_demand_units,
                            points)
                elif hazard_type == "tsunami":
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type, input_demand_units, points)
                elif hazard_type == "tornado":
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == "hurricane":
                    hazard_vals = self.hazardsvc.get_hurricanewf_values(
                        hazard_dataset_id, input_demand_type, input_demand_units, points)
                else:
                    raise ValueError("We only support Earthquake, Tornado, Tsunami, and Hurricane at the moment!")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for br_id in brs:
                    bridge_result = collections.OrderedDict()
                    bridge = bridges[br_id]
                    selected_fragility_set = fragility_set[br_id]

                    hazard_val = hazard_vals[i]['hazardValue']

                    hazard_std_dev = 0.0
                    if use_hazard_uncertainty:
                        # TODO Get this from API once implemented
                        raise ValueError("Uncertainty Not Implemented!")

                    adjusted_fragility_set = copy.deepcopy(selected_fragility_set)
                    if use_liquefaction and 'liq' in bridge['properties']:
                        for fragility in adjusted_fragility_set.fragility_curves:
                            fragility.adjust_fragility_for_liquefaction(bridge['properties']['liq'])

                    dmg_probability = adjusted_fragility_set.calculate_limit_state(hazard_val, std_dev=hazard_std_dev)
                    retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key)
                    retrofit_type = BridgeUtil.get_retrofit_type(fragility_key)

                    dmg_intervals = AnalysisUtil.calculate_damage_interval(dmg_probability)

                    bridge_result['guid'] = bridge['properties']['guid']
                    bridge_result.update(dmg_probability)
                    bridge_result.update(dmg_intervals)
                    bridge_result["retrofit"] = retrofit_type
                    bridge_result["retrocost"] = retrofit_cost
                    bridge_result["demandtype"] = input_demand_type
                    bridge_result["demandunits"] = input_demand_units
                    bridge_result["hazardtype"] = hazard_type
                    bridge_result["hazardval"] = hazard_val

                    # add spans to bridge output so mean damage calculation can use that info
                    if "spans" in bridge["properties"] and bridge["properties"]["spans"] \
                            is not None and bridge["properties"]["spans"].isdigit():
                        bridge_result['spans'] = int(bridge["properties"]["spans"])
                    elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] \
                            is not None and bridge["properties"]["SPANS"].isdigit():
                        bridge_result['spans'] = int(bridge["properties"]["SPANS"])
                    else:
                        bridge_result['spans'] = 1

                    bridge_results.append(bridge_result)
                    processed_bridges.append(br_id)  # remove processed bridges
                    i = i + 1

        unmapped_dmg_probability = {"ls-slight": 0.0, "ls-moderat": 0.0,
                                    "ls-extensi": 0.0, "ls-complet": 0.0}
        unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(unmapped_dmg_probability)
        for br_id, br in bridges.items():
            if br_id not in processed_bridges:
                unmapped_br_result = collections.OrderedDict()
                unmapped_br_result['guid'] = br['properties']['guid']
                unmapped_br_result.update(unmapped_dmg_probability)
                unmapped_br_result.update(unmapped_dmg_intervals)
                unmapped_br_result["retrofit"] = "Non-Retrofit"
                unmapped_br_result["retrocost"] = 0.0
                unmapped_br_result["demandtype"] = "None"
                unmapped_br_result['demandunits'] = "None"
                unmapped_br_result["hazardtype"] = "None"
                unmapped_br_result['hazardval'] = 0.0
                bridge_results.append(unmapped_br_result)

        return bridge_results

    def get_spec(self):
        """Get specifications of the bridge damage analysis.

        Returns:
            obj: A JSON object of specifications of the bridge damage analysis.

        """
        return {
            'name': 'bridge-damage',
            'description': 'bridge damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [
                {
                    'id': 'bridges',
                    'required': True,
                    'description': 'Bridge Inventory',
                    'type': ['ergo:bridges'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                }
            ],
            'output_datasets': [
                {
                    'id': 'result',
                    'parent_type': 'bridges',
                    'description': 'CSV file of bridge structural damage',
                    'type': 'ergo:bridgeDamage'
                }
            ]
        }
Пример #19
0
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(PipelineDamageRepairRate, self).__init__(incore_client)
Пример #20
0
class BuildingDamage(BaseAnalysis):
    """Building Damage Analysis calculates the probability of building damage based on
    different hazard type such as earthquake, tsunami, and tornado.

    Args:
        incore_client (IncoreClient): Service authentication.

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BuildingDamage, self).__init__(incore_client)

    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        bldg_set = self.get_input_dataset("buildings").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BuildingUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(bldg_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bldg_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bldg_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.building_damage_concurrent_future(
            self.building_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def building_damage_concurrent_future(self, function_name, parallelism,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            parallelism (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=parallelism) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def building_damage_analysis_bulk_input(self, buildings, hazard_type,
                                            hazard_dataset_id):
        """Run analysis for multiple buildings.

        Args:
            buildings (list): Multiple buildings from input inventory set.
            hazard_type (str): Hazard type, either earthquake, tornado, or tsunami.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        fragility_key = self.get_parameter("fragility_key")

        fragility_sets = dict()
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), buildings,
            fragility_key)

        bldg_results = []
        list_buildings = buildings

        buildings = dict()
        # Converting list of buildings into a dictionary for ease of reference
        for b in list_buildings:
            buildings[b["id"]] = b

        list_buildings = None  # Clear as it's not needed anymore

        grouped_buildings = AnalysisUtil.group_by_demand_type(buildings,
                                                              fragility_sets,
                                                              hazard_type,
                                                              is_building=True)

        for demand, grouped_bldgs in grouped_buildings.items():

            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            bldg_chunks = list(AnalysisUtil.chunks(
                grouped_bldgs, 50))  # TODO: Move to globals?
            for bldgs in bldg_chunks:
                points = []
                for bldg_id in bldgs:
                    location = GeoUtil.get_location(buildings[bldg_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == 'earthquake':
                    hazard_vals = self.hazardsvc.get_earthquake_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'tornado':
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == 'tsunami':
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'hurricane':
                    # TODO implement hurricane
                    print("hurricane not yet implemented")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for bldg_id in bldgs:
                    bldg_result = collections.OrderedDict()
                    building = buildings[bldg_id]
                    hazard_val = hazard_vals[i]['hazardValue']
                    output_demand_type = hazard_vals[i]['demand']
                    if hazard_type == 'earthquake':
                        period = float(hazard_vals[i]['period'])
                        if period > 0:
                            output_demand_type = str(
                                hazard_vals[i]
                                ['period']) + " " + output_demand_type

                    num_stories = building['properties']['no_stories']
                    selected_fragility_set = fragility_sets[bldg_id]
                    building_period = selected_fragility_set.fragility_curves[
                        0].get_building_period(num_stories)
                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hazard_val, building_period)
                    dmg_interval = AnalysisUtil.calculate_damage_interval(
                        dmg_probability)

                    bldg_result['guid'] = building['properties']['guid']
                    bldg_result.update(dmg_probability)
                    bldg_result.update(dmg_interval)
                    bldg_result['demandtype'] = output_demand_type
                    bldg_result['demandunits'] = input_demand_units
                    bldg_result['hazardval'] = hazard_val

                    bldg_results.append(bldg_result)
                    del buildings[bldg_id]
                    i = i + 1

        unmapped_hazard_val = 0.0
        unmapped_output_demand_type = "None"
        unmapped_output_demand_unit = "None"
        for unmapped_bldg_id, unmapped_bldg in buildings.items():
            unmapped_bldg_result = collections.OrderedDict()
            unmapped_bldg_result['guid'] = unmapped_bldg['properties']['guid']
            unmapped_bldg_result['demandtype'] = unmapped_output_demand_type
            unmapped_bldg_result['demandunits'] = unmapped_output_demand_unit
            unmapped_bldg_result['hazardval'] = unmapped_hazard_val
            bldg_results.append(unmapped_bldg_result)

        return bldg_results

    def get_spec(self):
        """Get specifications of the building damage analysis.

        Returns:
            obj: A JSON object of specifications of the building damage analysis.

        """
        return {
            'name':
            'building-damage',
            'description':
            'building damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id':
                'buildings',
                'required':
                True,
                'description':
                'Building Inventory',
                'type': [
                    'ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5',
                    'ergo:buildingInventoryVer6'
                ],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'buildings',
                'description': 'CSV file of building structural damage',
                'type': 'ergo:buildingDamageVer4'
            }]
        }
Пример #21
0
class TornadoEpnDamage(BaseAnalysis):
    """
    Computes electric power network (EPN) probability of damage based on a tornado hazard.
    The process for computing the structural damage is similar to other parts of the built environment.
    First, fragilities are obtained based on the hazard type and attributes of the network tower and network pole.
    Based on the fragility, the hazard intensity at the location of the infrastructure is computed. Using this
    information, the probability of exceeding each limit state is computed, along with the probability of damage.
    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)
        self.datasetsvc = DataService(incore_client)
        self.fragility_tower_id = '5b201b41b1cf3e336de8fa67'
        self.fragility_pole_id = '5b201d91b1cf3e336de8fa68'

        # this is for deciding to use indpnode field. Not using this could be safer for general dataset
        self.use_indpnode = False
        self.nnode = 0
        self.highest_node_num = 0
        self.EF = 0
        self.nint = []
        self.indpnode = []
        self.mcost = 1435  # mean repair cost for single distribution pole
        self.vcost = (0.1 * self.mcost)**2
        self.sigmad = math.sqrt(math.log(
            self.vcost / (self.mcost**2) +
            1))  # convert to gaussian Std Deviation to be used in logncdf
        self.mud = math.log(
            (self.mcost**2) / math.sqrt(self.vcost + self.mcost**2))

        self.mcost = 400000  # mean repair cost for single transmission pole
        self.vcost = (0.1 * self.mcost)**2
        self.sigmat = math.sqrt(math.log(
            self.vcost / (self.mcost**2) +
            1))  # convert to gaussian Std Deviation to be used in logncdf
        self.mut = math.log(
            (self.mcost**2) / math.sqrt(self.vcost + self.mcost**2))

        self.tmut = 72  # mean repairtime for transmission tower in hrs
        self.tsigmat = 36  # std dev

        self.tmud = 5  # mean repairtime for poles in hrs
        self.tsigmad = 2.5

        self.totalcost2repairpath = []
        self.totalpoles2repair = []

        self.tornado_sim_field_name = 'SIMULATION'
        self.tornado_ef_field_name = 'EF_RATING'

        # tornado number of simulation and ef_rate
        self.nmcs = 0
        self.tornado_ef_rate = 0

        self.pole_distance = 38.1

        # node variables
        self.nodenwid_fld_name = "NODENWID"
        self.indpnode_fld_name = "INDPNODE"
        self.guid_fldname = 'GUID'

        # link variables
        self.tonode_fld_name = "TONODE"
        self.fromnode_fld_name = "FROMNODE"
        self.linetype_fld_name = "LINETYPE"

        # line type variable
        self.line_transmission = "transmission"
        self.line_distribution = "distribution"

        super(TornadoEpnDamage, self).__init__(incore_client)

    def run(self):
        node_dataset = self.get_input_dataset(
            "epn_node").get_inventory_reader()
        link_dataset = self.get_input_dataset(
            "epn_link").get_inventory_reader()
        tornado_id = self.get_parameter('tornado_id')
        tornado_metadata = self.hazardsvc.get_tornado_hazard_metadata(
            tornado_id)
        self.load_remote_input_dataset("tornado",
                                       tornado_metadata["datasetId"])
        tornado_dataset = self.get_input_dataset(
            "tornado").get_inventory_reader()
        ds_results, damage_results = self.get_damage(node_dataset,
                                                     link_dataset,
                                                     tornado_dataset,
                                                     tornado_id)

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def get_damage(self, node_dataset, link_dataset, tornado_dataset,
                   tornado_id):
        """

        Args:
            node_dataset (obj): Node dataset.
            link_dataset (obj): Link dataset.
            tornado_dataset (obj): Tornado dataset.
            tornado_id (str): Tornado id.

        """
        self.set_tornado_variables(tornado_dataset)
        self.set_node_variables(node_dataset)

        # get fragility curves set - tower for transmission, pole for distribution
        fragility_set_tower = FragilityCurveSet(
            self.fragilitysvc.get_dfr3_set(self.fragility_tower_id))
        assert fragility_set_tower.id == self.fragility_tower_id
        fragility_set_pole = FragilityCurveSet(
            self.fragilitysvc.get_dfr3_set(self.fragility_pole_id))
        assert fragility_set_pole.id == self.fragility_pole_id

        # network test
        node_id_validation = NetworkUtil.validate_network_node_ids(
            node_dataset, link_dataset, self.fromnode_fld_name,
            self.tonode_fld_name, self.nodenwid_fld_name)
        if node_id_validation is False:
            print(
                "ID in from or to node field doesn't exist in the node dataset"
            )
            os.exit(0)

        # getting network graph and node coordinates
        is_directed_graph = True

        graph, node_coords = NetworkUtil.create_network_graph_from_field(
            link_dataset, self.fromnode_fld_name, self.tonode_fld_name,
            is_directed_graph)

        # reverse the graph to acculate the damage to next to node
        graph = nx.DiGraph.reverse(graph, copy=True)

        # check the connection as a list
        connection_sets = []
        if is_directed_graph:
            connection_sets = list(nx.weakly_connected_components(graph))
        else:
            connection_sets = list(nx.connected_components(graph))

        # check the first node of the each network line, this first node should lead each separated network
        # also convert connection set to list
        first_node_list = []
        connection_list = []
        for c in connection_sets:
            connection_list.append(list(c))
            first_node_list.append(list(c)[0])

        intersection_list = []
        poly_list = []
        totalcost2repair = []
        totalpoles2repair = []
        totaltime2repair = []

        # construct guid field
        guid_list = []
        nodenwid_list = []
        for node_feature in node_dataset:
            # get guid colum
            guid_fld_val = ''
            if self.guid_fldname.lower() in node_feature['properties']:
                guid_fld_val = node_feature['properties'][
                    self.guid_fldname.lower()]
            elif self.guid_fldname in node_feature['properties']:
                guid_fld_val = node_feature['properties'][self.guid_fldname]
            guid_list.append(guid_fld_val)

            # get nodenwid colum
            nodenwid_fld_val = ''
            if self.nodenwid_fld_name.lower() in node_feature['properties']:
                nodenwid_fld_val = int(
                    node_feature['properties'][self.nodenwid_fld_name.lower()])
            elif self.nodenwid_fld_name in node_feature['properties']:
                nodenwid_fld_val = int(
                    node_feature['properties'][self.nodenwid_fld_name])
            nodenwid_list.append(nodenwid_fld_val)

        for z in range(self.nmcs):
            nodedam = [
                0
            ] * self.nnode  # placeholder for recording number of damaged pole for each node
            noderepair = [
                0
            ] * self.nnode  # placeholder for recording repair cost for each node
            poles2repair = [
                0
            ] * self.nnode  # placeholder for recording total number of poles to repair
            cost2repairpath = [
                0
            ] * self.nnode  # placeholder for recording total repair cost for the network
            time2repairpath = [
                0
            ] * self.nnode  # placeholder for recording total repair time for the network
            nodetimerep = [0] * self.nnode
            hazardval = [[
                0
            ]] * self.nnode  # placeholder for recording hazard values
            demandtypes = [[
                ""
            ]] * self.nnode  # placeholder for recording demand types
            demandunits = [[
                ""
            ]] * self.nnode  # placeholder for recording demand units

            # iterate link
            for line_feature in link_dataset:
                ndamage = 0  # number of damaged poles in each link
                repaircost = 0  # repair cost value
                repairtime = 0  # repair time value
                to_node_val = ""
                linetype_val = ""
                tor_hazard_values = [0]  # random wind speed in EF
                demand_types = [""]
                demand_units = [""]

                if self.tonode_fld_name.lower() in line_feature['properties']:
                    to_node_val = line_feature['properties'][
                        self.tonode_fld_name.lower()]
                elif self.tonode_fld_name in line_feature['properties']:
                    to_node_val = line_feature['properties'][
                        self.tonode_fld_name]

                if self.linetype_fld_name in line_feature['properties']:
                    linetype_val = line_feature['properties'][
                        self.linetype_fld_name]
                elif self.linetype_fld_name.lower(
                ) in line_feature['properties']:
                    linetype_val = line_feature['properties'][
                        self.linetype_fld_name.lower()]

                line = shape(line_feature['geometry'])

                # iterate tornado
                for tornado_feature in tornado_dataset:
                    resistivity_probability = 0  # resistivity value at the point of windSpeed
                    random_resistivity = 0  # random resistivity value between 0 and one

                    sim_fld_val = ""
                    ef_fld_val = ""

                    # get EF rating and simulation number column
                    if self.tornado_sim_field_name.lower(
                    ) in tornado_feature['properties']:
                        sim_fld_val = int(tornado_feature['properties'][
                            self.tornado_sim_field_name.lower()])
                    elif self.tornado_sim_field_name in tornado_feature[
                            'properties']:
                        sim_fld_val = int(tornado_feature['properties'][
                            self.tornado_sim_field_name])

                    if self.tornado_ef_field_name.lower(
                    ) in tornado_feature['properties']:
                        ef_fld_val = tornado_feature['properties'][
                            self.tornado_ef_field_name.lower()]
                    elif self.tornado_ef_field_name in tornado_feature[
                            'properties']:
                        ef_fld_val = tornado_feature['properties'][
                            self.tornado_ef_field_name]

                    if sim_fld_val == "" or ef_fld_val == "":
                        print(
                            "unable to convert tornado simulation field value to integer"
                        )
                        sys.exit(0)

                    # get Tornado EF polygon
                    # assumes that the polygon is not a multipolygon
                    poly = shape(tornado_feature['geometry'])
                    poly_list.append(poly)

                    # loop for ef ranges
                    for f in range(self.tornado_ef_rate):
                        npoles = 0  # number of poles in tornado ef box
                        poleresist = 0  # pole's resistance value
                        # setting EF rate value string to match in the tornado dataset's attribute table
                        ef_content = "EF" + str(f)

                        # compute the intersections between link line and ef polygon
                        # also figure out the length of the line that ovelapped with EF box

                        # compute the intersection between tornado polygon and line
                        if sim_fld_val == z and ef_fld_val.lower(
                        ) == ef_content.lower():
                            if poly is not None and line is not None:
                                if poly.intersects(line):
                                    intersection = poly.intersection(line)
                                    any_point = None
                                    intersection_length = intersection.length
                                    if intersection.length > 0:
                                        # print(intersection.__class__.__name__)
                                        # calculate the length of intersected line
                                        # since this is a geographic, it has to be projected to meters to be calcuated
                                        inter_length_meter = GeoUtil.calc_geog_distance_from_linestring(
                                            intersection)
                                        if isinstance(intersection,
                                                      MultiLineString):
                                            intersection_list.append(
                                                intersection)
                                            for inter_line in intersection.geoms:
                                                any_point = inter_line.centroid
                                                break
                                        elif isinstance(
                                                intersection, LineString):
                                            intersection_list.append(
                                                intersection)
                                            any_point = intersection.centroid

                                            # also, random point can be possible
                                            # by changing the following lines value 0.5
                                            # any_point = intersection.interpolate(0.5, normalized=True)

                                    if any_point is not None:
                                        # check if any_point is in the polygon
                                        if poly.contains(any_point) is False:
                                            # this is very hardly happen but should be needed just in case
                                            any_point = poly.centroid

                                    # check if the line is tower or transmission
                                    if linetype_val.lower(
                                    ) == self.line_transmission:
                                        fragility_set_used = fragility_set_tower
                                    else:
                                        fragility_set_used = fragility_set_pole

                                    values_payload = [{
                                        "demands": [
                                            x.lower() for x in
                                            fragility_set_used.demand_types
                                        ],
                                        "units": [
                                            x.lower() for x in
                                            fragility_set_used.demand_units
                                        ],
                                        "loc":
                                        str(any_point.coords[0][1]) + "," +
                                        str(any_point.coords[0][0])
                                    }]

                                    h_vals = self.hazardsvc.post_tornado_hazard_values(
                                        tornado_id, values_payload,
                                        self.get_parameter('seed'))
                                    tor_hazard_values = AnalysisUtil.update_precision_of_lists(
                                        h_vals[0]["hazardValues"])
                                    demand_types = h_vals[0]["demands"]
                                    demand_units = h_vals[0]["units"]
                                    hval_dict = dict()
                                    j = 0
                                    for d in h_vals[0]["demands"]:
                                        hval_dict[d] = tor_hazard_values[j]
                                        j += 1
                                    if isinstance(
                                            fragility_set_used.
                                            fragility_curves[0], DFR3Curve):
                                        inventory_args = fragility_set_used.construct_expression_args_from_inventory(
                                            tornado_feature)
                                        resistivity_probability = \
                                            fragility_set_used.calculate_limit_state(
                                                hval_dict,
                                                inventory_type=fragility_set_used.inventory_type, **inventory_args)
                                    else:
                                        raise ValueError(
                                            "One of the fragilities is in deprecated format. This should not happen. "
                                            "If you are seeing this please report the issue."
                                        )

                                    # randomly generated capacity of each poles ; 1 m/s is 2.23694 mph
                                    poleresist = resistivity_probability.get(
                                        'LS_0') * 2.23694
                                    npoles = int(
                                        round(inter_length_meter /
                                              self.pole_distance))
                                    repairtime_list = []

                                    for k in range(npoles):
                                        repair_time = 0
                                        random_resistivity = random.uniform(
                                            0, 1)

                                        if random_resistivity <= poleresist:
                                            ndamage += 1
                                            # following codes can't be converted from matlab to python
                                            # however, the cross product <=3 or == 24 almost doesn't happen
                                            # since the time and cost differs when it is pole or tower,
                                            # this could be changed by see if it is tower or pole
                                            # if numpy.cross(k, z) <= 3 or numpy.cross(k, z) == 24:
                                            if linetype_val.lower(
                                            ) == self.line_transmission:
                                                mu = self.mut
                                                sigma = self.sigmat
                                                tmu = self.tmut
                                                tsigma = self.tsigmat
                                            else:
                                                mu = self.mud
                                                sigma = self.sigmad
                                                tmu = self.tmud
                                                tsigma = self.tsigmad

                                            repairtime_list.append(
                                                numpy.random.normal(
                                                    tmu, tsigma))

                                    for k in range(ndamage):
                                        repaircost += numpy.random.lognormal(
                                            mu, sigma)

                                    # max of the repair time among different poles is taken
                                    # as the repair time for that line
                                    if len(repairtime_list) > 0:
                                        repairtime = max(repairtime_list)
                noderepair[to_node_val - 1] = repaircost
                nodedam[to_node_val - 1] = ndamage
                nodetimerep[to_node_val - 1] = repairtime
                hazardval[to_node_val - 1] = tor_hazard_values
                demandtypes[to_node_val - 1] = demand_types
                demandunits[to_node_val - 1] = demand_units

            # Calculate damage and repair cost based on network
            for i in range(len(first_node_list)):
                for j in range(len(connection_list[i])):
                    # print(connection_list[i][j], first_node_list[i])
                    pathij = list(
                        nx.all_simple_paths(graph, connection_list[i][j],
                                            first_node_list[i]))
                    poler = 0
                    coster = 0
                    timer = []
                    # print(pathij)
                    if len(pathij) > 0:
                        for k in range(len(pathij)):
                            for var1 in range(len(pathij[k])):
                                poler = poler + nodedam[pathij[k][var1]]
                                coster = coster + noderepair[pathij[k][var1]]
                                # max of the time for different lines is taken as the repair time for that path.
                                # -- path is constituted of different lines.
                                timer.append(nodetimerep[pathij[k][var1]])
                    poles2repair[connection_list[i][j]] = poler
                    cost2repairpath[connection_list[i][j]] = coster
                    if len(timer) > 0:
                        time2repairpath[connection_list[i][j]] = max(timer)
                    else:
                        time2repairpath[connection_list[i][j]] = 0
            totalcost2repair.append(cost2repairpath)
            totalpoles2repair.append(poles2repair)
            totaltime2repair.append(time2repairpath)

        # create guid field from node dataset

        # calculate mean and standard deviation
        meanpoles = numpy.mean(numpy.asarray(totalpoles2repair), axis=0)
        stdpoles = numpy.std(numpy.asarray(totalpoles2repair), axis=0)
        meancost = numpy.mean(numpy.asarray(totalcost2repair), axis=0)
        stdcost = numpy.std(numpy.asarray(totalcost2repair), axis=0)
        meantime = numpy.mean(numpy.asarray(totaltime2repair), axis=0)
        stdtime = numpy.std(numpy.asarray(totaltime2repair), axis=0)

        # create result
        ds_results = []
        damage_results = []

        for i in range(len(meanpoles)):
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = guid_list[i]
            ds_result["meanpoles"] = meanpoles[i]
            ds_result["stdpoles"] = stdpoles[i]
            ds_result["meancost"] = meancost[i]
            ds_result["stdcost"] = stdcost[i]
            ds_result["meantime"] = meantime[i]
            ds_result["stdtime"] = stdtime[i]
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazardval[i], "tornado")

            damage_result['guid'] = guid_list[i]
            damage_result["fragility_tower_id"] = self.fragility_tower_id
            damage_result["fragility_pole_id"] = self.fragility_pole_id
            damage_result["hazardtype"] = "Tornado"
            damage_result['hazardvals'] = hazardval[i]
            damage_result['demandtypes'] = demandtypes[i]
            damage_result['demandunits'] = demandunits[i]

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    """
    align coordinate values in a list as a single pair in order
    """

    def align_list_cooridnate(self, coord_list):
        coord_iterator = iter(coord_list)
        first = prev = next(coord_iterator)
        for coord in coord_iterator:
            yield prev, coord
            prev = coord

            # if it is polygon the following line is needed to close the polygon geometry
            # yield coord, first

    def set_tornado_variables(self, tornado_dataset):
        sim_num_list = []
        ef_rate_list = []

        for ef_poly in tornado_dataset:
            ef_string = ''
            if self.tornado_sim_field_name.lower() in ef_poly['properties']:
                sim_num_list.append(
                    int(ef_poly['properties'][
                        self.tornado_sim_field_name.lower()]))
            elif self.tornado_sim_field_name in ef_poly['properties']:
                sim_num_list.append(
                    int(ef_poly['properties'][self.tornado_sim_field_name]))

            if self.tornado_ef_field_name.lower() in ef_poly['properties']:
                ef_string = ef_poly['properties'][
                    self.tornado_ef_field_name.lower()]
            elif self.tornado_ef_field_name in ef_poly['properties']:
                ef_string = ef_poly['properties'][self.tornado_ef_field_name]
            # parse the number in EF and the format should be "EF0", "EF1", or something like it
            ef_rate_list.append(int(ef_string.lower().split("ef", 1)[1]))

        if len(sim_num_list) == 0 or len(ef_string) == 0:
            print("Could not convert tornado simulation value")
            sys.exit(0)

        self.nmcs = max(sim_num_list) + 1
        self.tornado_ef_rate = max(ef_rate_list) + 1

    def set_node_variables(self, node_dataset):
        i = 0

        for node_point in node_dataset:
            node_id = None
            indpnode_val = None
            if self.nodenwid_fld_name.lower() in node_point['properties']:
                node_id = int(
                    node_point['properties'][self.nodenwid_fld_name.lower()])
            elif self.nodenwid_fld_name in node_point['properties']:
                node_id = int(node_point['properties'][self.nodenwid_fld_name])

            if self.use_indpnode is True:
                if self.indpnode_fld_name.lower() in node_point['properties']:
                    indpnode_val = int(node_point['properties'][
                        self.indpnode_fld_name.lower()])
                elif self.indpnode_fld_name in node_point['properties']:
                    indpnode_val = int(
                        node_point['properties'][self.indpnode_fld_name])

            if node_id is None and indpnode_val is None:
                print("problem getting the value")
                sys.exit(1)

            if self.use_indpnode is True:
                if indpnode_val > 0:
                    self.indpnode.append(node_id)
                else:
                    self.nint.append(node_id)
            else:
                self.nint.append(node_id)

            if node_id > self.highest_node_num:
                self.highest_node_num = node_id
            i += 1

        self.nnode = i

    def get_spec(self):
        return {
            'name':
            'tornado-epn-damage',
            'description':
            'tornado epn damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'result dataset name',
                'type': str
            }, {
                'id': 'tornado_id',
                'required': True,
                'description': 'Tornado hazard id',
                'type': str
            }, {
                'id': 'seed',
                'required': False,
                'description': 'Initial seed for the tornado hazard value',
                'type': int
            }],
            'input_datasets': [{
                'id': 'epn_node',
                'required': True,
                'description': 'EPN Node',
                'type': ['incore:epnNodeVer1'],
            }, {
                'id': 'epn_link',
                'required': True,
                'description': 'EPN Link',
                'type': ['incore:epnLinkeVer1'],
            }, {
                'id': 'tornado',
                'required': False,
                'description': 'Tornado Dataset',
                'type': ['incore:tornadoWindfield'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'epn_node',
                'description':
                'CSV file of damages for electric power network by tornado',
                'type': 'incore:tornadoEPNDamageVer3'
            }, {
                'id': 'metadata',
                'parent_type': 'epn_node',
                'description':
                'Json file with information about applied hazard value and fragility',
                'type': 'incore:tornadoEPNDamageSupplement'
            }]
        }
Пример #22
0
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)
        self.datasetsvc = DataService(incore_client)
        self.fragility_tower_id = '5b201b41b1cf3e336de8fa67'
        self.fragility_pole_id = '5b201d91b1cf3e336de8fa68'

        # this is for deciding to use indpnode field. Not using this could be safer for general dataset
        self.use_indpnode = False
        self.nnode = 0
        self.highest_node_num = 0
        self.EF = 0
        self.nint = []
        self.indpnode = []
        self.mcost = 1435  # mean repair cost for single distribution pole
        self.vcost = (0.1 * self.mcost)**2
        self.sigmad = math.sqrt(math.log(
            self.vcost / (self.mcost**2) +
            1))  # convert to gaussian Std Deviation to be used in logncdf
        self.mud = math.log(
            (self.mcost**2) / math.sqrt(self.vcost + self.mcost**2))

        self.mcost = 400000  # mean repair cost for single transmission pole
        self.vcost = (0.1 * self.mcost)**2
        self.sigmat = math.sqrt(math.log(
            self.vcost / (self.mcost**2) +
            1))  # convert to gaussian Std Deviation to be used in logncdf
        self.mut = math.log(
            (self.mcost**2) / math.sqrt(self.vcost + self.mcost**2))

        self.tmut = 72  # mean repairtime for transmission tower in hrs
        self.tsigmat = 36  # std dev

        self.tmud = 5  # mean repairtime for poles in hrs
        self.tsigmad = 2.5

        self.totalcost2repairpath = []
        self.totalpoles2repair = []

        self.tornado_sim_field_name = 'SIMULATION'
        self.tornado_ef_field_name = 'EF_RATING'

        # tornado number of simulation and ef_rate
        self.nmcs = 0
        self.tornado_ef_rate = 0

        self.pole_distance = 38.1

        # node variables
        self.nodenwid_fld_name = "NODENWID"
        self.indpnode_fld_name = "INDPNODE"
        self.guid_fldname = 'GUID'

        # link variables
        self.tonode_fld_name = "TONODE"
        self.fromnode_fld_name = "FROMNODE"
        self.linetype_fld_name = "LINETYPE"

        # line type variable
        self.line_transmission = "transmission"
        self.line_distribution = "distribution"

        super(TornadoEpnDamage, self).__init__(incore_client)
Пример #23
0
class BuildingDamage(BaseAnalysis):
    """Building Damage Analysis calculates the probability of building damage based on
    different hazard type such as earthquake, tsunami, and tornado.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(BuildingDamage, self).__init__(incore_client)

    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        bldg_set = self.get_input_dataset("buildings").get_inventory_reader()

        # building retrofit strategy
        retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy")
        if retrofit_strategy_dataset is not None:
            retrofit_strategy = list(retrofit_strategy_dataset.get_csv_reader())
        else:
            retrofit_strategy = None

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BuildingUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bldg_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bldg_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input,
                                                                              num_workers,
                                                                              inventory_args,
                                                                              repeat(retrofit_strategy),
                                                                              repeat(hazard_type),
                                                                              repeat(hazard_dataset_id))

        self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("damage_result",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True

    def building_damage_concurrent_future(self, function_name, parallelism, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            parallelism (int): Number of workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_type, hazard_dataset_id):
        """Run analysis for multiple buildings.

        Args:
            buildings (list): Multiple buildings from input inventory set.
            retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional
            hazard_type (str): Hazard type, either earthquake, tornado, or tsunami.
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """

        fragility_key = self.get_parameter("fragility_key")
        fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings,
                                                           fragility_key, retrofit_strategy)
        values_payload = []
        unmapped_buildings = []
        mapped_buildings = []
        for b in buildings:
            bldg_id = b["id"]
            if bldg_id in fragility_sets:
                location = GeoUtil.get_location(b)
                loc = str(location.y) + "," + str(location.x)
                demands = AnalysisUtil.get_hazard_demand_types(b, fragility_sets[bldg_id], hazard_type)
                units = fragility_sets[bldg_id].demand_units
                value = {
                    "demands": demands,
                    "units": units,
                    "loc": loc
                }
                values_payload.append(value)
                mapped_buildings.append(b)
            else:
                unmapped_buildings.append(b)

        # not needed anymore as they are already split into mapped and unmapped
        del buildings

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload,
                                                                    self.get_parameter('seed'))
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload)
        elif hazard_type == 'flood':
            hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload)
        else:
            raise ValueError("The provided hazard type is not supported yet by this analysis")

        ds_results = []
        damage_results = []

        i = 0
        for b in mapped_buildings:
            ds_result = dict()
            damage_result = dict()
            dmg_probability = dict()
            dmg_interval = dict()
            b_id = b["id"]
            selected_fragility_set = fragility_sets[b_id]

            # TODO: Once all fragilities are migrated to new format, we can remove this condition
            if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve):
                # Supports multiple demand types in same fragility
                b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"])
                b_demands = hazard_vals[i]["demands"]
                b_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0

                # To calculate damage, use demand type name from fragility that will be used in the expression, instead
                # of using what the hazard service returns. There could be a difference "SA" in DFR3 vs "1.07 SA"
                # from hazard
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = b_haz_vals[j]
                    j += 1
                if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]):
                    building_args = selected_fragility_set.construct_expression_args_from_inventory(b)

                    building_period = selected_fragility_set.fragility_curves[0].get_building_period(
                        selected_fragility_set.curve_parameters, **building_args)

                    dmg_probability = selected_fragility_set.calculate_limit_state(
                        hval_dict, **building_args, period=building_period)
                    dmg_interval = selected_fragility_set.calculate_damage_interval(
                        dmg_probability, hazard_type=hazard_type, inventory_type="building")
            else:
                raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are "
                                 "seeing this please report the issue.")

            ds_result['guid'] = b['properties']['guid']
            damage_result['guid'] = b['properties']['guid']

            ds_result.update(dmg_probability)
            ds_result.update(dmg_interval)
            ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(b_haz_vals, hazard_type)

            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result['demandtype'] = b_demands
            damage_result['demandunits'] = b_units
            damage_result['hazardval'] = b_haz_vals

            ds_results.append(ds_result)
            damage_results.append(damage_result)
            i += 1

        for b in unmapped_buildings:
            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = b['properties']['guid']
            damage_result['guid'] = b['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result['demandtype'] = None
            damage_result['demandunits'] = None
            damage_result['hazardval'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the building damage analysis.

        Returns:
            obj: A JSON object of specifications of the building damage analysis.

        """
        return {
            'name': 'building-damage',
            'description': 'building damage analysis',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'result dataset name',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard Type (e.g. earthquake)',
                    'type': str
                },
                {
                    'id': 'hazard_id',
                    'required': True,
                    'description': 'Hazard ID',
                    'type': str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description': 'Fragility key to use in mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description': 'Use liquefaction',
                    'type': bool
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description': 'If using parallel execution, the number of cpus to request',
                    'type': int
                },
                {
                    'id': 'seed',
                    'required': False,
                    'description': 'Initial seed for the tornado hazard value',
                    'type': int
                }
            ],
            'input_datasets': [
                {
                    'id': 'buildings',
                    'required': True,
                    'description': 'Building Inventory',
                    'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5',
                             'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'],
                },
                {
                    'id': 'dfr3_mapping_set',
                    'required': True,
                    'description': 'DFR3 Mapping Set Object',
                    'type': ['incore:dfr3MappingSet'],
                },
                {
                    'id': 'retrofit_strategy',
                    'required': False,
                    'description': 'Building retrofit strategy that contains guid and retrofit method',
                    'type': ['incore:retrofitStrategy']
                }
            ],
            'output_datasets': [
                {
                    'id': 'ds_result',
                    'parent_type': 'buildings',
                    'description': 'CSV file of damage states for building structural damage',
                    'type': 'ergo:buildingDamageVer6'
                },
                {
                    'id': 'damage_result',
                    'parent_type': 'buildings',
                    'description': 'Json file with information about applied hazard value and fragility',
                    'type': 'incore:buildingDamageSupplement'
                }
            ]
        }
Пример #24
0
class RoadFailure(BaseAnalysis):
    """Computes road damage by hurricane inundation.

    Args:
        incore_client: Service client with authentication info

    """

    DEFAULT_HURRICANE_FRAGILITY_KEY = "inundationDuration"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(RoadFailure, self).__init__(incore_client)

    def run(self):
        """Execute road damage analysis """
        # road dataset
        road_dataset = self.get_input_dataset("roads").get_inventory_reader()

        # distance to shore table data frame
        distance_df = self.get_input_dataset(
            "distance_table").get_dataframe_from_csv()

        # TODO this has to be changed when semantic service lanuched based on it
        # set distance field name in the table
        distance_field_name = "distance"

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(road_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.road_damage_concurrent_future(
            self.road_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(distance_df), repeat(distance_field_name),
            repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def road_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with road damage values and other data/metadata.

        """
        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def road_damage_analysis_bulk_input(self, roads, distance_df,
                                        distance_field_name, hazard_type,
                                        hazard_dataset_id):
        """Run road damage analysis by hurricane inundation.

        Args:
            roads (list): multiple roads from road dataset.
            distance_df (object): data frame for distance to shore table
            distance_field_name (str): field name representing the distance to shore
            hazard_type (str): Hazard type
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with failure probability of road and other data/metadata.

        """
        result = []

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_HURRICANE_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # get fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key)

        for road in roads:
            if road["id"] in fragility_sets.keys():
                # find out distance value
                distance = float(
                    distance_df.loc[distance_df['guid'] == road['properties']
                                    ["guid"]][distance_field_name])

                result.append(
                    self.road_damage_analysis(road, distance, hazard_type,
                                              fragility_sets[road["id"]],
                                              hazard_dataset_id))

        return result

    def road_damage_analysis(self, road, distance, hazard_type, fragility_set,
                             hazard_dataset_id):
        """Run road damage for a single road segment.

        Args:
            road (obj): a single road feature.
            distance (float): distance to shore from the road
            hazard_type (str): hazard type.
            fragility_set (obj): A JSON description of fragility assigned to the road.
            hazard_dataset_id (str): A hazard dataset to use.

        Returns:
            OrderedDict: A dictionary with probability of failure values and other data/metadata.
        """

        road_results = collections.OrderedDict()

        if fragility_set is not None:
            demand_type = fragility_set.demand_type.lower()
            demand_units = fragility_set.demand_units
            location = GeoUtil.get_location(road)
            point = str(location.y) + "," + str(location.x)

            if hazard_type == 'hurricane':
                hazard_resp = self.hazardsvc.get_hurricane_values(
                    hazard_dataset_id, "inundationDuration", demand_units,
                    [point])
            else:
                raise ValueError("Hazard type are not currently supported.")

            dur_q = hazard_resp[0]['hazardValue']

            if dur_q <= 0.0:
                dur_q = 0.0

            fragility_vars = {'x': dur_q, 'y': distance}
            pf = fragility_set.calculate_custom_limit_state(
                fragility_vars)['failure']

            road_results['guid'] = road['properties']['guid']
            road_results['failprob'] = pf
            road_results['demandtype'] = demand_type
            road_results['demandunits'] = demand_units
            road_results['hazardtype'] = hazard_type
            road_results['hazardval'] = dur_q

        return road_results

    def get_spec(self):
        """Get specifications of the road damage analysis.

        Returns:
            obj: A JSON object of specifications of the road damage analysis.

        """
        return {
            'name':
            'road-damage',
            'description':
            'road damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'result dataset name',
                'type': str
            }, {
                'id': 'hazard_type',
                'required': True,
                'description': 'Hazard Type (e.g. earthquake)',
                'type': str
            }, {
                'id': 'hazard_id',
                'required': True,
                'description': 'Hazard ID',
                'type': str
            }, {
                'id': 'fragility_key',
                'required': False,
                'description': 'Fragility key to use in mapping dataset',
                'type': str
            }, {
                'id': 'num_cpu',
                'required': False,
                'description':
                'If using parallel execution, the number of cpus to request',
                'type': int
            }],
            'input_datasets': [{
                'id': 'roads',
                'required': True,
                'description': 'Road Inventory',
                'type': ['ergo:roadLinkTopo', 'ergo:roads'],
            }, {
                'id': 'distance_table',
                'required': True,
                'description': 'Distance to Shore Table',
                'type': ['incore:distanceToShore'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'roads',
                'type': 'incore:roadFailure'
            }]
        }
Пример #25
0
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(NonStructBuildingDamage, self).__init__(incore_client)
Пример #26
0
class EpfDamage(BaseAnalysis):
    """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"
    DEFAULT_FRAGILITY_KEY = "pga"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(EpfDamage, self).__init__(incore_client)

    def run(self):
        """Executes electric power facility damage analysis."""
        epf_set = self.get_input_dataset("epfs").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type, note this is here for future use if additional hazards are supported by this analysis
        hazard_type = self.get_parameter("hazard_type")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        if use_hazard_uncertainty:
            raise ValueError("Uncertainty is not implemented yet.")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(epf_set), user_defined_cpu)

        avg_bulk_input_size = int(len(epf_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(epf_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.epf_damage_concurrent_future(
            self.epf_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def epf_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def epf_damage_analysis_bulk_input(self, epfs, hazard_type,
                                       hazard_dataset_id):
        """Run analysis for multiple epfs.

        Args:
            epfs (list): Multiple epfs from input inventory set.
            hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        use_liquefaction = False
        liquefaction_available = False

        fragility_key = self.get_parameter("fragility_key")

        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key)

        if hazard_type == "earthquake":
            liquefaction_fragility_key = self.get_parameter(
                "liquefaction_fragility_key")
            if self.get_parameter("use_liquefaction") is True:
                if liquefaction_fragility_key is None:
                    liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY

                use_liquefaction = self.get_parameter("use_liquefaction")

                # Obtain the geology dataset
                geology_dataset_id = self.get_parameter(
                    "liquefaction_geology_dataset_id")

                if geology_dataset_id is not None:
                    fragility_sets_liq = self.fragilitysvc.match_inventory(
                        self.get_input_dataset("dfr3_mapping_set"), epfs,
                        liquefaction_fragility_key)

                    if fragility_sets_liq is not None:
                        liquefaction_available = True

        values_payload = []
        values_payload_liq = []
        unmapped_epfs = []
        mapped_epfs = []
        for epf in epfs:
            epf_id = epf["id"]
            if epf_id in fragility_set:
                location = GeoUtil.get_location(epf)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set[epf_id].demand_types
                units = fragility_set[epf_id].demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_epfs.append(epf)

                if liquefaction_available and epf["id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[epf["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_epfs.append(epf)

        if hazard_type == 'earthquake':
            hazard_vals = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tornado':
            hazard_vals = self.hazardsvc.post_tornado_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'hurricane':
            # TODO: implement hurricane
            raise ValueError('Hurricane hazard has not yet been implemented!')
        elif hazard_type == 'tsunami':
            hazard_vals = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError("Missing hazard type.")

        liquefaction_resp = None
        if liquefaction_available:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        ds_results = []
        damage_results = []

        i = 0
        for epf in mapped_epfs:
            ds_result = dict()
            damage_result = dict()
            selected_fragility_set = fragility_set[epf["id"]]

            if isinstance(selected_fragility_set.fragility_curves[0],
                          DFR3Curve):
                hazard_val = AnalysisUtil.update_precision_of_lists(
                    hazard_vals[i]["hazardValues"])
                input_demand_types = hazard_vals[i]["demands"]
                input_demand_units = hazard_vals[i]["units"]

                hval_dict = dict()
                j = 0
                for d in selected_fragility_set.demand_types:
                    hval_dict[d] = hazard_val[j]
                    j += 1

                epf_args = selected_fragility_set.construct_expression_args_from_inventory(
                    epf)
                limit_states = selected_fragility_set.calculate_limit_state(
                    hval_dict, inventory_type='electric_facility', **epf_args)

                if liquefaction_resp is not None:
                    fragility_set_liq = fragility_sets_liq[epf["id"]]

                    if isinstance(fragility_set_liq.fragility_curves[0],
                                  DFR3Curve):
                        liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                            liquefaction_resp[i]["pgdValues"])
                        liq_demand_types = liquefaction_resp[i]["demands"]
                        liq_demand_units = liquefaction_resp[i]["units"]
                        liquefaction_prob = liquefaction_resp[i][
                            'liqProbability']

                        hval_dict_liq = dict()

                        for j, d in enumerate(fragility_set_liq.demand_types):
                            hval_dict_liq[d] = liq_hazard_vals[j]

                        facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(
                            epf)
                        pgd_limit_states = \
                            fragility_set_liq.calculate_limit_state(
                                hval_dict_liq, inventory_type="electric_facility",
                                **facility_liq_args)
                    else:
                        raise ValueError(
                            "One of the fragilities is in deprecated format. "
                            "This should not happen If you are seeing this please report the issue."
                        )

                    limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                        limit_states, pgd_limit_states)

                dmg_interval = selected_fragility_set.calculate_damage_interval(
                    limit_states,
                    hazard_type=hazard_type,
                    inventory_type='electric_facility')
            else:
                raise ValueError(
                    "One of the fragilities is in deprecated format. This should not happen. If you are "
                    "seeing this please report the issue.")

            ds_result["guid"] = epf["properties"]["guid"]
            ds_result.update(limit_states)
            ds_result.update(dmg_interval)
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_val, hazard_type)

            damage_result['guid'] = epf['properties']['guid']
            damage_result['fragility_id'] = selected_fragility_set.id
            damage_result["demandtypes"] = input_demand_types
            damage_result["demandunits"] = input_demand_units
            damage_result["hazardtype"] = hazard_type
            damage_result["hazardvals"] = hazard_val

            if hazard_type == "earthquake" and use_liquefaction is True:
                if liquefaction_available:
                    damage_result['liq_fragility_id'] = fragility_sets_liq[
                        epf["id"]].id
                    damage_result['liqdemandtypes'] = liq_demand_types
                    damage_result['liqdemandunits'] = liq_demand_units
                    damage_result['liqhazval'] = liq_hazard_vals
                    damage_result['liqprobability'] = liquefaction_prob
                else:
                    damage_result['liq_fragility_id'] = None
                    damage_result['liqdemandtypes'] = None
                    damage_result['liqdemandunits'] = None
                    damage_result['liqhazval'] = None
                    damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

            i += 1

        #############################################################

        # unmapped
        for epf in unmapped_epfs:
            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = epf['properties']['guid']
            damage_result['guid'] = epf['properties']['guid']
            damage_result['fragility_id'] = None
            damage_result["demandtypes"] = None
            damage_result['demandunits'] = None
            damage_result["hazardtype"] = None
            damage_result['hazardval'] = None
            if hazard_type == "earthquake" and use_liquefaction is True:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the epf damage analysis.

        Returns:
            obj: A JSON object of specifications of the epf damage analysis.

        """
        return {
            'name':
            'epf-damage',
            'description':
            'Electric Power Facility damage analysis.',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'A name of the resulting dataset',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard type (e.g. earthquake).',
                    'type': str
                },
                {
                    'id':
                    'hazard_id',
                    'required':
                    True,
                    'description':
                    'Hazard ID which defines the particular hazard (e.g. New madrid earthquake '
                    'using Atkinson Boore 1995).',
                    'type':
                    str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in mapping dataset ()',
                    'type': str
                },
                {
                    'id': 'liquefaction_fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in liquefaction mapping dataset',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description':
                    'Use a ground liquifacition to modify damage interval.',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request.',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'epfs',
                'required': True,
                'description': 'Electric Power Facility Inventory',
                'type': ['incore:epf', 'ergo:epf'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'epfs',
                'type': 'incore:epfDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'epfs',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:epfDamageSupplement'
            }]
        }
Пример #27
0
class PipelineDamageRepairRate(BaseAnalysis):
    """Computes pipeline damage for a hazard.

    Args:
        incore_client: Service client with authentication info

    """
    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(PipelineDamageRepairRate, self).__init__(incore_client)

    def run(self):
        """Execute pipeline damage analysis """
        # Pipeline dataset
        pipeline_dataset = self.get_input_dataset(
            "pipeline").get_inventory_reader()

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(pipeline_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(pipeline_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.pipeline_damage_concurrent_future(
            self.pipeline_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True

    def pipeline_damage_concurrent_future(self, function_name, num_workers,
                                          *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with building damage values and other data/metadata.

        """
        output_ds = []
        output_dmg = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret1, ret2 in executor.map(function_name, *args):
                output_ds.extend(ret1)
                output_dmg.extend(ret2)

        return output_ds, output_dmg

    def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type,
                                            hazard_dataset_id):
        """Run pipeline damage analysis for multiple pipelines.

        Args:
            pipelines (list): multiple pipelines from pieline dataset.
            hazard_type (str): Hazard type
            hazard_dataset_id (str): An id of the hazard exposure.

        Returns:
            ds_results (list): A list of ordered dictionaries with pipeline damage values and other data/metadata.
            damage_results (list): A list of ordered dictionaries with pipeline damage metadata.
        """
        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # get fragility set
        fragility_sets = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), pipelines,
            fragility_key)

        # Get Liquefaction Fragility Key
        liquefaction_fragility_key = self.get_parameter(
            "liquefaction_fragility_key")
        if hazard_type == "earthquake" and liquefaction_fragility_key is None:
            liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY

        # Liquefaction
        use_liquefaction = False
        if hazard_type == "earthquake" and self.get_parameter(
                "use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset id
        geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")
        fragility_sets_liq = None
        if geology_dataset_id is not None:
            fragility_sets_liq = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), pipelines,
                liquefaction_fragility_key)

        values_payload = []
        values_payload_liq = []  # for liquefaction if used
        unmapped_pipelines = []
        mapped_pipelines = []
        for pipeline in pipelines:
            # if find a match fragility for that pipeline
            if pipeline["id"] in fragility_sets.keys():
                fragility_set = fragility_sets[pipeline["id"]]
                location = GeoUtil.get_location(pipeline)
                loc = str(location.y) + "," + str(location.x)
                demands = fragility_set.demand_types
                units = fragility_set.demand_units
                value = {"demands": demands, "units": units, "loc": loc}
                values_payload.append(value)
                mapped_pipelines.append(pipeline)

                # Check if liquefaction is applicable
                if use_liquefaction and \
                        geology_dataset_id is not None and \
                        fragility_sets_liq is not None and \
                        pipeline["id"] in fragility_sets_liq:
                    fragility_set_liq = fragility_sets_liq[pipeline["id"]]
                    demands_liq = fragility_set_liq.demand_types
                    units_liq = fragility_set_liq.demand_units
                    value_liq = {
                        "demands": demands_liq,
                        "units": units_liq,
                        "loc": loc
                    }
                    values_payload_liq.append(value_liq)
            else:
                unmapped_pipelines.append(pipeline)
        del pipelines

        if hazard_type == 'earthquake':
            hazard_resp = self.hazardsvc.post_earthquake_hazard_values(
                hazard_dataset_id, values_payload)
        elif hazard_type == 'tsunami':
            hazard_resp = self.hazardsvc.post_tsunami_hazard_values(
                hazard_dataset_id, values_payload)
        else:
            raise ValueError(
                "The provided hazard type is not supported yet by this analysis"
            )

        # Check if liquefaction is applicable
        if use_liquefaction is True and \
                fragility_sets_liq is not None and \
                geology_dataset_id is not None:
            liquefaction_resp = self.hazardsvc.post_liquefaction_values(
                hazard_dataset_id, geology_dataset_id, values_payload_liq)

        # calculate LS and DS
        ds_results = []
        damage_results = []
        for i, pipeline in enumerate(mapped_pipelines):
            # default
            pgv_repairs = None
            pgd_repairs = 0.0
            total_repair_rate = None
            break_rate = None
            leak_rate = None
            failure_probability = None
            num_pgv_repairs = None
            num_pgd_repairs = 0.0
            num_repairs = None

            liq_hazard_vals = None
            liq_demand_types = None
            liq_demand_units = None
            liquefaction_prob = None

            ds_result = dict()
            damage_result = dict()
            ds_result['guid'] = pipeline['properties']['guid']
            damage_result['guid'] = pipeline['properties']['guid']

            fragility_set = fragility_sets[pipeline["id"]]
            # TODO assume there is only one curve
            fragility_curve = fragility_set.fragility_curves[0]

            hazard_vals = AnalysisUtil.update_precision_of_lists(
                hazard_resp[i]["hazardValues"])
            demand_types = hazard_resp[i]["demands"]
            demand_units = hazard_resp[i]["units"]

            hval_dict = dict()
            for j, d in enumerate(fragility_set.demand_types):
                hval_dict[d] = hazard_vals[j]

            if not AnalysisUtil.do_hazard_values_have_errors(
                    hazard_resp[i]["hazardValues"]):
                pipeline_args = fragility_set.construct_expression_args_from_inventory(
                    pipeline)
                pgv_repairs = \
                    fragility_curve.solve_curve_expression(
                        hval_dict, fragility_set.curve_parameters, **pipeline_args)
                # Convert PGV repairs to SI units
                pgv_repairs = PipelineUtil.convert_result_unit(
                    fragility_curve.return_type["unit"], pgv_repairs)

                length = PipelineUtil.get_pipe_length(pipeline)

                # Number of PGV repairs
                num_pgv_repairs = pgv_repairs * length

                # Check if liquefaction is applicable
                if use_liquefaction is True \
                        and fragility_sets_liq is not None \
                        and geology_dataset_id is not None \
                        and liquefaction_resp is not None:
                    fragility_set_liq = fragility_sets_liq[pipeline["id"]]

                    # TODO assume there is only one curve
                    liq_fragility_curve = fragility_set_liq.fragility_curves[0]

                    liq_hazard_vals = AnalysisUtil.update_precision_of_lists(
                        liquefaction_resp[i]["pgdValues"])
                    liq_demand_types = liquefaction_resp[i]["demands"]
                    liq_demand_units = liquefaction_resp[i]["units"]
                    liquefaction_prob = liquefaction_resp[i]['liqProbability']
                    liq_hval_dict = dict()
                    for j, d in enumerate(liquefaction_resp[i]["demands"]):
                        liq_hval_dict[d] = liq_hazard_vals[j]

                    # !important! removing the liqProbability and passing in the "diameter"
                    # no fragility is actually using liqProbability
                    pipeline_args = fragility_set_liq.construct_expression_args_from_inventory(
                        pipeline)
                    pgd_repairs = \
                        liq_fragility_curve.solve_curve_expression(
                            liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args)
                    # Convert PGD repairs to SI units
                    pgd_repairs = PipelineUtil.convert_result_unit(
                        liq_fragility_curve.return_type["unit"], pgd_repairs)
                    num_pgd_repairs = pgd_repairs * length

                    # record results
                    if 'pipetype' in pipeline['properties']:
                        damage_result['pipeclass'] = pipeline['properties'][
                            'pipetype']
                    elif 'pipelinesc' in pipeline['properties']:
                        damage_result['pipeclass'] = pipeline['properties'][
                            'pipelinesc']
                    else:
                        damage_result['pipeclass'] = ""

                break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs
                leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs
                total_repair_rate = pgd_repairs + pgv_repairs
                failure_probability = 1 - math.exp(-1.0 * break_rate * length)
                num_repairs = num_pgd_repairs + num_pgv_repairs

            ds_result['pgvrepairs'] = pgv_repairs
            ds_result['pgdrepairs'] = pgd_repairs
            ds_result['repairspkm'] = total_repair_rate
            ds_result['breakrate'] = break_rate
            ds_result['leakrate'] = leak_rate
            ds_result['failprob'] = failure_probability
            ds_result['numpgvrpr'] = num_pgv_repairs
            ds_result['numpgdrpr'] = num_pgd_repairs
            ds_result['numrepairs'] = num_repairs
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazard_vals, hazard_type)

            damage_result['fragility_id'] = fragility_set.id
            damage_result['demandtypes'] = demand_types
            damage_result['demandunits'] = demand_units
            damage_result['hazardtype'] = hazard_type
            damage_result['hazardval'] = hazard_vals

            # Check if liquefaction is applicable
            if use_liquefaction is True \
                    and fragility_sets_liq is not None \
                    and geology_dataset_id is not None:
                damage_result['liq_fragility_id'] = fragility_sets_liq[
                    pipeline["id"]].id
                damage_result['liqdemandtypes'] = liq_demand_types
                damage_result['liqdemandunits'] = liq_demand_units
                damage_result['liqhazval'] = liq_hazard_vals
                damage_result['liqprobability'] = liquefaction_prob
            else:
                damage_result['liq_fragility_id'] = None
                damage_result['liqdemandtypes'] = None
                damage_result['liqdemandunits'] = None
                damage_result['liqhazval'] = None
                damage_result['liqprobability'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        # pipelines do not have matched mappings
        for pipeline in unmapped_pipelines:
            ds_result = dict()
            ds_result['guid'] = pipeline['properties']['guid']

            damage_result = dict()
            damage_result['guid'] = pipeline['properties']['guid']
            if 'pipetype' in pipeline['properties']:
                damage_result['pipeclass'] = pipeline['properties']['pipetype']
            elif 'pipelinesc' in pipeline['properties']:
                damage_result['pipeclass'] = pipeline['properties'][
                    'pipelinesc']
            else:
                damage_result['pipeclass'] = ""

            damage_result['fragility_id'] = None
            damage_result['demandtypes'] = None
            damage_result['demandunits'] = None
            damage_result['hazardtype'] = None
            damage_result['hazardval'] = None
            damage_result['liq_fragility_id'] = None
            damage_result['liqdemandtypes'] = None
            damage_result['liqdemandunits'] = None
            damage_result['liqhazval'] = None
            damage_result['liqhazval'] = None

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results

    def get_spec(self):
        """Get specifications of the pipeline damage analysis.

        Returns:
            obj: A JSON object of specifications of the pipeline damage analysis.

        """
        return {
            'name':
            'pipeline-damage',
            'description':
            'buried pipeline damage analysis',
            'input_parameters': [{
                'id': 'result_name',
                'required': True,
                'description': 'result dataset name',
                'type': str
            }, {
                'id': 'hazard_type',
                'required': True,
                'description': 'Hazard Type (e.g. earthquake)',
                'type': str
            }, {
                'id': 'hazard_id',
                'required': True,
                'description': 'Hazard ID',
                'type': str
            }, {
                'id': 'fragility_key',
                'required': False,
                'description': 'Fragility key to use in mapping dataset',
                'type': str
            }, {
                'id': 'use_liquefaction',
                'required': False,
                'description': 'Use liquefaction',
                'type': bool
            }, {
                'id': 'liquefaction_fragility_key',
                'required': False,
                'description':
                'Fragility key to use in liquefaction mapping dataset',
                'type': str
            }, {
                'id': 'num_cpu',
                'required': False,
                'description':
                'If using parallel execution, the number of cpus to request',
                'type': int
            }, {
                'id': 'liquefaction_geology_dataset_id',
                'required': False,
                'description': 'Geology dataset id',
                'type': str,
            }],
            'input_datasets': [{
                'id':
                'pipeline',
                'required':
                True,
                'description':
                'Pipeline Inventory',
                'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'pipeline',
                'type': 'ergo:pipelineDamageVer3'
            }, {
                'id':
                'metadata',
                'parent_type':
                'pipeline',
                'description':
                'additional metadata in json file about applied hazard value and '
                'fragility',
                'type':
                'incore:pipelineDamageSupplement'
            }]
        }
Пример #28
0
class EpfDamage(BaseAnalysis):
    """Computes electric power facility structural damage for an earthquake, tsunami, tornado, and hurricane hazards.

    Args:
        incore_client (IncoreClient): Service authentication.

    """

    DEFAULT_LIQ_FRAGILITY_KEY = "pgd"
    DEFAULT_FRAGILITY_KEY = "pga"

    def __init__(self, incore_client):
        self.hazardsvc = HazardService(incore_client)
        self.fragilitysvc = FragilityService(incore_client)

        super(EpfDamage, self).__init__(incore_client)

    def run(self):
        """Executes electric power facility damage analysis."""
        epf_set = self.get_input_dataset("epfs").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type, note this is here for future use if additional hazards are supported by this analysis
        hazard_type = self.get_parameter("hazard_type")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")
        liq_geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(epf_set), user_defined_cpu)

        avg_bulk_input_size = int(len(epf_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(epf_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.epf_damage_concurrent_future(
            self.epf_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id),
            repeat(use_hazard_uncertainty), repeat(use_liquefaction),
            repeat(liq_geology_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True

    def epf_damage_concurrent_future(self, function_name, num_workers, *args):
        """Utilizes concurrent.future module.

        Args:
            function_name (function): The function to be parallelized.
            num_workers (int): Maximum number workers in parallelization.
            *args: All the arguments in order to pass into parameter function_name.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """

        output = []
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=num_workers) as executor:
            for ret in executor.map(function_name, *args):
                output.extend(ret)

        return output

    def epf_damage_analysis_bulk_input(self, epfs, hazard_type,
                                       hazard_dataset_id,
                                       use_hazard_uncertainty,
                                       use_liquefaction,
                                       liq_geology_dataset_id):
        """Run analysis for multiple epfs.

        Args:
            epfs (list): Multiple epfs from input inventory set.
            hazard_type (str): A type of hazard exposure (earthquake, tsunami, tornado, or hurricane).
            hazard_dataset_id (str): An id of the hazard exposure.
            use_hazard_uncertainty (bool):  Hazard uncertainty. True for using uncertainty when computing damage,
                False otherwise.
            use_liquefaction (bool): Liquefaction. True for using liquefaction information to modify the damage,
                False otherwise.
            liq_geology_dataset_id (str): geology_dataset_id (str): A dataset id for geology dataset for liquefaction.

        Returns:
            list: A list of ordered dictionaries with epf damage values and other data/metadata.

        """
        result = []

        fragility_key = self.get_parameter("fragility_key")

        fragility_set = dict()
        fragility_set = self.fragilitysvc.match_inventory(
            self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key)
        epf_results = []

        # Converting list of epfs into a dictionary for ease of reference
        list_epfs = epfs
        epfs = dict()
        for epf in list_epfs:
            epfs[epf["id"]] = epf
        del list_epfs  # Clear as it's not needed anymore

        processed_epf = []
        grouped_epfs = AnalysisUtil.group_by_demand_type(epfs, fragility_set)
        for demand, grouped_epf_items in grouped_epfs.items():
            input_demand_type = demand[0]
            input_demand_units = demand[1]

            # For every group of unique demand and demand unit, call the end-point once
            epf_chunks = list(AnalysisUtil.chunks(grouped_epf_items, 50))
            for epf_chunk in epf_chunks:
                points = []
                for epf_id in epf_chunk:
                    location = GeoUtil.get_location(epfs[epf_id])
                    points.append(str(location.y) + "," + str(location.x))

                if hazard_type == 'earthquake':
                    hazard_vals = self.hazardsvc.get_earthquake_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                elif hazard_type == 'tornado':
                    hazard_vals = self.hazardsvc.get_tornado_hazard_values(
                        hazard_dataset_id, input_demand_units, points)
                elif hazard_type == 'hurricane':
                    # TODO: implement hurricane
                    raise ValueError(
                        'Hurricane hazard has not yet been implemented!')

                elif hazard_type == 'tsunami':
                    hazard_vals = self.hazardsvc.get_tsunami_hazard_values(
                        hazard_dataset_id, input_demand_type,
                        input_demand_units, points)
                else:
                    raise ValueError("Missing hazard type.")

                # Parse the batch hazard value results and map them back to the building and fragility.
                # This is a potential pitfall as we are relying on the order of the returned results
                i = 0
                for epf_id in epf_chunk:
                    epf_result = collections.OrderedDict()
                    epf = epfs[epf_id]
                    hazard_val = hazard_vals[i]['hazardValue']

                    # Sometimes the geotiffs give large negative values for out of bounds instead of 0
                    if hazard_val <= 0.0:
                        hazard_val = 0.0

                    std_dev = 0.0
                    if use_hazard_uncertainty:
                        raise ValueError("Uncertainty Not Implemented!")

                    selected_fragility_set = fragility_set[epf_id]
                    limit_states = selected_fragility_set.calculate_limit_state(
                        hazard_val, std_dev=std_dev)
                    dmg_interval = AnalysisUtil.calculate_damage_interval(
                        limit_states)

                    epf_result['guid'] = epf['properties']['guid']
                    epf_result.update(limit_states)
                    epf_result.update(dmg_interval)
                    epf_result['demandtype'] = input_demand_type
                    epf_result['demandunits'] = input_demand_units
                    epf_result['hazardtype'] = hazard_type
                    epf_result['hazardval'] = hazard_val

                    epf_results.append(epf_result)
                    processed_epf.append(epf_id)
                    i = i + 1

        # when there is liquefaction, limit state need to be modified
        if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None:
            liq_fragility_key = self.get_parameter(
                "liquefaction_fragility_key")
            if liq_fragility_key is None:
                liq_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY
            liq_fragility_set = self.fragilitysvc.match_inventory(
                self.get_input_dataset("dfr3_mapping_set"), epfs,
                liq_fragility_key)
            grouped_liq_epfs = AnalysisUtil.group_by_demand_type(
                epfs, liq_fragility_set)

            for liq_demand, grouped_liq_epf_items in grouped_liq_epfs.items():
                liq_input_demand_type = liq_demand[0]
                liq_input_demand_units = liq_demand[1]

                # For every group of unique demand and demand unit, call the end-point once
                liq_epf_chunks = list(
                    AnalysisUtil.chunks(grouped_liq_epf_items, 50))
                for liq_epf_chunk in liq_epf_chunks:
                    points = []
                    for liq_epf_id in liq_epf_chunk:
                        location = GeoUtil.get_location(epfs[liq_epf_id])
                        points.append(str(location.y) + "," + str(location.x))
                    liquefaction_vals = self.hazardsvc.get_liquefaction_values(
                        hazard_dataset_id, liq_geology_dataset_id,
                        liq_input_demand_units, points)

                    # Parse the batch hazard value results and map them back to the building and fragility.
                    # This is a potential pitfall as we are relying on the order of the returned results
                    i = 0
                    for liq_epf_id in liq_epf_chunk:
                        liq_hazard_val = liquefaction_vals[i][
                            liq_input_demand_type]

                        std_dev = 0.0
                        if use_hazard_uncertainty:
                            raise ValueError("Uncertainty Not Implemented!")

                        liquefaction_prob = liquefaction_vals[i][
                            'liqProbability']

                        selected_liq_fragility = liq_fragility_set[liq_epf_id]
                        pgd_limit_states = selected_liq_fragility.calculate_limit_state(
                            liq_hazard_val, std_dev=std_dev)

                        # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit
                        # states and dmg_interval
                        for epf_result in epf_results:
                            if epf_result['guid'] == epfs[liq_epf_id]['guid']:
                                limit_states = {
                                    "ls-slight": epf_result['ls-slight'],
                                    "ls-moderat": epf_result['ls-moderat'],
                                    "ls-extensi": epf_result['ls-extensi'],
                                    "ls-complet": epf_result['ls-complet']
                                }
                                liq_limit_states = AnalysisUtil.adjust_limit_states_for_pgd(
                                    limit_states, pgd_limit_states)
                                liq_dmg_interval = AnalysisUtil.calculate_damage_interval(
                                    liq_limit_states)
                                epf_result.update(liq_limit_states)
                                epf_result.update(liq_dmg_interval)
                                epf_result[
                                    'liqhaztype'] = liq_input_demand_type
                                epf_result['liqhazval'] = liq_hazard_val
                                epf_result[
                                    'liqprobability'] = liquefaction_prob
                        i = i + 1

        unmapped_limit_states = {
            "ls-slight": 0.0,
            "ls-moderat": 0.0,
            "ls-extensi": 0.0,
            "ls-complet": 0.0
        }
        unmapped_dmg_intervals = AnalysisUtil.calculate_damage_interval(
            unmapped_limit_states)
        for epf_id, epf in epfs.items():
            if epf_id not in processed_epf:
                unmapped_epf_result = collections.OrderedDict()
                unmapped_epf_result['guid'] = epf['properties']['guid']
                unmapped_epf_result.update(unmapped_limit_states)
                unmapped_epf_result.update(unmapped_dmg_intervals)
                unmapped_epf_result["demandtype"] = "None"
                unmapped_epf_result['demandunits'] = "None"
                unmapped_epf_result["hazardtype"] = "None"
                unmapped_epf_result['hazardval'] = 0.0
                unmapped_epf_result['liqhaztype'] = "NA"
                unmapped_epf_result['liqhazval'] = "NA"
                unmapped_epf_result['liqprobability'] = "NA"
                epf_results.append(unmapped_epf_result)

        return epf_results

    def get_spec(self):
        """Get specifications of the epf damage analysis.

        Returns:
            obj: A JSON object of specifications of the epf damage analysis.

        """
        return {
            'name':
            'epf-damage',
            'description':
            'Electric Power Facility damage analysis.',
            'input_parameters': [
                {
                    'id': 'result_name',
                    'required': True,
                    'description': 'A name of the resulting dataset',
                    'type': str
                },
                {
                    'id': 'hazard_type',
                    'required': True,
                    'description': 'Hazard type (e.g. earthquake).',
                    'type': str
                },
                {
                    'id':
                    'hazard_id',
                    'required':
                    True,
                    'description':
                    'Hazard ID which defines the particular hazard (e.g. New madrid earthquake '
                    'using Atkinson Boore 1995).',
                    'type':
                    str
                },
                {
                    'id': 'fragility_key',
                    'required': False,
                    'description':
                    'Fragility key to use in mapping dataset ()',
                    'type': str
                },
                {
                    'id': 'use_liquefaction',
                    'required': False,
                    'description':
                    'Use a ground liquifacition to modify damage interval.',
                    'type': bool
                },
                {
                    'id':
                    'liquefaction_geology_dataset_id',
                    'required':
                    False,
                    'description':
                    'Liquefaction geology/susceptibility dataset id. '
                    'If not provided, liquefaction will be ignored',
                    'type':
                    str
                },
                {
                    'id': 'use_hazard_uncertainty',
                    'required': False,
                    'description': 'Use hazard uncertainty',
                    'type': bool
                },
                {
                    'id': 'num_cpu',
                    'required': False,
                    'description':
                    'If using parallel execution, the number of cpus to request.',
                    'type': int
                },
            ],
            'input_datasets': [{
                'id': 'epfs',
                'required': True,
                'description': 'Electric Power Facility Inventory',
                'type': ['incore:epf', 'ergo:epf'],
            }, {
                'id': 'dfr3_mapping_set',
                'required': True,
                'description': 'DFR3 Mapping Set Object',
                'type': ['incore:dfr3MappingSet'],
            }],
            'output_datasets': [{
                'id': 'result',
                'parent_type': 'epfs',
                'type': 'incore:epfDamage'
            }]
        }
Пример #29
0
def run_with_base_class():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)

    # EQ Road Dataset - Seaside roads
    road_dataset_id = "5ee7af50772cf80008577ae3"

    hazard_type = "tsunami"
    # hazard_type = "earthquake"
    liq_geology_dataset_id = None

    if hazard_type == 'earthquake':
        # Seaside Earthquake
        hazard_id = "5ba8f379ec2309043520906f"

        # Earthquake mapping
        mapping_id = "5d545b0bb9219c0689f1f3f4"

        fragility_key = "pgd"
        liquefaction = False
    elif hazard_type == 'tsunami':
        # Seaside Tsunami
        hazard_id = "5bc9eaf7f7b08533c7e610e1"

        # Tsunami Mapping for Seaside
        mapping_id = "5ee7b2c9c54361000148de37"

        fragility_key = "Non-Retrofit inundationDepth Fragility ID Code"
        liquefaction = False
    else:
        raise ValueError(
            "Earthquake and tsunami are the only testable hazards with road damage currently"
        )

    uncertainty = False

    # Run road damage
    road_dmg = RoadDamage(client)
    road_dmg.load_remote_input_dataset("roads", road_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    road_dmg.set_parameter("result_name", "seaside_road_dmg_" + hazard_type)
    road_dmg.set_parameter("hazard_type", hazard_type)
    road_dmg.set_parameter("hazard_id", hazard_id)
    if fragility_key is not None:
        road_dmg.set_parameter("fragility_key", fragility_key)
    road_dmg.set_parameter("num_cpu", 1)
    road_dmg.set_parameter("use_liquefaction", liquefaction)
    if liquefaction and liq_geology_dataset_id is not None:
        road_dmg.set_parameter("liquefaction_geology_dataset_id",
                               liq_geology_dataset_id)
    road_dmg.set_parameter("use_hazard_uncertainty", uncertainty)

    # Run Analysis
    road_dmg.run_analysis()

    ######################################################################
    # test galveston hurricane road failure

    # road inventory for Galveston island
    road_dataset_id = "60ba5ee94219ee25a56b6999"
    # road damage by hurricane inundation mapping
    mapping_id = "60ba583b1f2b7d4a916faf03"
    # Galveston Deterministic Hurricane - Kriging inundationDuration
    hazard_type = "hurricane"
    hazard_id = "5f10837c01d3241d77729a4f"

    # Create road damage
    hurr_road_dmg = RoadDamage(client)
    # Load input datasets
    hurr_road_dmg.load_remote_input_dataset("roads", road_dataset_id)
    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    hurr_road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)
    # Specify the result name
    result_name = "galveston_hurricane_road_result"
    # Set analysis parameters
    hurr_road_dmg.set_parameter("result_name", result_name)
    hurr_road_dmg.set_parameter("hazard_type", hazard_type)
    hurr_road_dmg.set_parameter(
        "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code")
    hurr_road_dmg.set_parameter("hazard_id", hazard_id)
    hurr_road_dmg.set_parameter("num_cpu", 4)

    # Run road damage by hurricane inundation analysis
    hurr_road_dmg.run_analysis()
Пример #30
0
def run_with_base_class():
    client = IncoreClient(pyglobals.INCORE_API_DEV_URL)

    # New madrid earthquake using Atkinson Boore 1995
    hazard_type = "earthquake"
    hazard_id = "5b902cb273c3371e1236b36b"

    # NBSR bridges
    bridge_dataset_id = "5a284f2dc7d30d13bc082040"

    # Default Bridge Fragility Mapping on incore-service
    mapping_id = "5b47bcce337d4a37755e0cb2"

    # Use hazard uncertainty for computing damage
    use_hazard_uncertainty = False
    # Use liquefaction (LIQ) column of bridges to modify fragility curve
    use_liquefaction = False

    # Create bridge damage
    bridge_dmg = BridgeDamage(client)

    # Load input datasets
    bridge_dmg.load_remote_input_dataset("bridges", bridge_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    # Set analysis parameters
    bridge_dmg.set_parameter("result_name", "bridge_result")
    bridge_dmg.set_parameter("hazard_type", hazard_type)
    bridge_dmg.set_parameter("hazard_id", hazard_id)
    bridge_dmg.set_parameter("num_cpu", 4)

    # Run bridge damage analysis
    bridge_dmg.run_analysis()

    ###################################################################
    # Test liquefaction

    # south carolina eq damage
    hazard_type = "earthquake"
    hazard_id = "5ee9309bc9f1b70008fdbd26"

    # south carolina bridges
    bridge_dataset_id = "5ee92f884210b80008f9377e"

    # Default Bridge Fragility Mapping on incore-service
    mapping_id = "5b47bcce337d4a37755e0cb2"

    # Use hazard uncertainty for computing damage
    use_hazard_uncertainty = False
    # Use liquefaction (LIQ) column of bridges to modify fragility curve
    use_liquefaction = True

    # Create bridge damage
    bridge_dmg = BridgeDamage(client)

    # Load input datasets
    bridge_dmg.load_remote_input_dataset("bridges", bridge_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    mapping_set = MappingSet(fragility_service.get_mapping(mapping_id))
    bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set)

    # Set analysis parameters
    bridge_dmg.set_parameter("result_name", "bridge_result_w_liquefaction")
    bridge_dmg.set_parameter("hazard_type", hazard_type)
    bridge_dmg.set_parameter("hazard_id", hazard_id)
    bridge_dmg.set_parameter("use_liquefaction", use_liquefaction)
    bridge_dmg.set_parameter("num_cpu", 1)

    # Run bridge damage analysis
    bridge_dmg.run_analysis()

    ###################################################################
    # test Galveston Bridge Damage
    hazard_type = "hurricane"
    hazard_id = "5f11e50cc6491311a814584c"

    # Galveston bridge
    bridge_dataset_id = "6062058ac57ada48e48c31e3"

    # Galveston hurricane bridge mapping
    refactored_mapping_id = "6062254b618178207f66226c"

    # Create bridge damage
    bridge_dmg = BridgeDamage(client)

    # Load input datasets
    bridge_dmg.load_remote_input_dataset("bridges", bridge_dataset_id)

    # Load fragility mapping
    fragility_service = FragilityService(client)
    refactored_mapping_set = MappingSet(
        fragility_service.get_mapping(refactored_mapping_id))
    bridge_dmg.set_input_dataset('dfr3_mapping_set', refactored_mapping_set)

    # Set analysis parameters
    bridge_dmg.set_parameter(
        "fragility_key",
        "Hurricane SurgeLevel and WaveHeight Fragility ID Code")
    bridge_dmg.set_parameter("result_name", "galveston_bridge_dmg_result")
    bridge_dmg.set_parameter("hazard_type", hazard_type)
    bridge_dmg.set_parameter("hazard_id", hazard_id)
    bridge_dmg.set_parameter("num_cpu", 4)

    # Run bridge damage analysis
    bridge_dmg.run_analysis()