Beispiel #1
0
    def run(self):
        """Executes Cumulative Building Damage Analysis"""
        eq_damage_set = self.get_input_dataset("eq_bldg_dmg").get_csv_reader()
        eq_damage_df = pd.DataFrame(list(eq_damage_set))
        tsunami_damage_set = self.get_input_dataset(
            "tsunami_bldg_dmg").get_csv_reader()
        tsunami_damage_df = pd.DataFrame(list(tsunami_damage_set))

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(eq_damage_df), user_defined_cpu)

        avg_bulk_input_size = int(len(eq_damage_df) / num_workers)
        eq_damage_args = []
        count = 0

        while count < len(eq_damage_df):
            eq_damage_args.append(eq_damage_df[count:count +
                                               avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.cumulative_building_damage_concurrent_future(
            self.cumulative_building_damage_bulk_input, num_workers,
            eq_damage_args, repeat(tsunami_damage_df))

        self.set_result_csv_data("combined-result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True
Beispiel #2
0
    def run(self):
        """Executes bridge damage analysis."""
        # Bridge dataset
        bridge_set = self.get_input_dataset("bridges").get_inventory_reader()

        # Get hazard input
        hazard_type = self.get_parameter("hazard_type")
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(
            bridge_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bridge_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bridge_set)
        while count < len(inventory_list):
            inventory_args.append(
                inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.bridge_damage_concurrent_future(
            self.bridge_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type),
            repeat(hazard_dataset_id))

        self.set_result_csv_data("result", results,
                                 name=self.get_parameter("result_name"))

        return True
    def run(self):
        """Executes mc failure probability analysis."""

        # read in file and parameters
        damage = self.get_input_dataset("damage").get_csv_reader()
        damage_result = AnalysisUtil.get_csv_table_rows(damage,
                                                        ignore_first_row=False)

        # setting number of cpus to use
        user_defined_cpu = 1
        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(damage_result), user_defined_cpu)

        avg_bulk_input_size = int(len(damage_result) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = damage_result

        seed = self.get_parameter("seed")
        seed_list = []
        if seed is not None:
            while count < len(inventory_list):
                inventory_args.append(inventory_list[count:count +
                                                     avg_bulk_input_size])
                seed_list.append([
                    seed + i
                    for i in range(count - 1, count + avg_bulk_input_size - 1)
                ])
                count += avg_bulk_input_size
        else:
            while count < len(inventory_list):
                inventory_args.append(inventory_list[count:count +
                                                     avg_bulk_input_size])
                seed_list.append([
                    None
                    for i in range(count - 1, count + avg_bulk_input_size - 1)
                ])
                count += avg_bulk_input_size

        fs_results, fp_results, samples_results = self.monte_carlo_failure_probability_concurrent_future(
            self.monte_carlo_failure_probability_bulk_input, num_workers,
            inventory_args, seed_list)
        self.set_result_csv_data("sample_failure_state",
                                 fs_results,
                                 name=self.get_parameter("result_name") +
                                 "_failure_state")
        self.set_result_csv_data("failure_probability",
                                 fp_results,
                                 name=self.get_parameter("result_name") +
                                 "_failure_probability")
        self.set_result_csv_data("sample_damage_states",
                                 samples_results,
                                 name=self.get_parameter("result_name") +
                                 "_sample_damage_states")
        return True
Beispiel #4
0
    def run(self):
        """Executes electric power facility damage analysis."""
        epf_set = self.get_input_dataset("epfs").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type, note this is here for future use if additional hazards are supported by this analysis
        hazard_type = self.get_parameter("hazard_type")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter(
                "use_hazard_uncertainty")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")
        liq_geology_dataset_id = self.get_parameter(
            "liquefaction_geology_dataset_id")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(epf_set), user_defined_cpu)

        avg_bulk_input_size = int(len(epf_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(epf_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.epf_damage_concurrent_future(
            self.epf_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(hazard_type), repeat(hazard_dataset_id),
            repeat(use_hazard_uncertainty), repeat(use_liquefaction),
            repeat(liq_geology_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True
Beispiel #5
0
    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        building_set = self.get_input_dataset(
            "buildings").get_inventory_reader()

        # set Default Fragility key
        fragility_key_as = self.get_parameter("fragility_key_as")
        if fragility_key_as is None:
            self.set_parameter("fragility_key_as",
                               NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS)

        fragility_key_ds = self.get_parameter("fragility_key_ds")
        if fragility_key_ds is None:
            self.set_parameter("fragility_key_ds",
                               NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS)

        # Set Default Hazard Uncertainty
        use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")
        if use_hazard_uncertainty is None:
            self.set_parameter("use_hazard_uncertainty", False)

        # Set Default Liquefaction
        use_liquefaction = self.get_parameter("use_liquefaction")
        if use_liquefaction is None:
            self.set_parameter("use_liquefaction", False)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(building_set), user_defined_cpu)

        avg_bulk_input_size = int(len(building_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(building_set)

        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.building_damage_concurrent_future(
            self.building_damage_analysis_bulk_input, num_workers,
            inventory_args)

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("damage_result",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")
        return True
Beispiel #6
0
    def run(self):
        """Executes building damage analysis."""
        # Building dataset
        bldg_set = self.get_input_dataset("buildings").get_inventory_reader()

        # building retrofit strategy
        retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy")
        if retrofit_strategy_dataset is not None:
            retrofit_strategy = list(retrofit_strategy_dataset.get_csv_reader())
        else:
            retrofit_strategy = None

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \
                BuildingUtil.DEFAULT_FRAGILITY_KEY
            self.set_parameter("fragility_key", fragility_key)

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu)

        avg_bulk_input_size = int(len(bldg_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(bldg_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input,
                                                                              num_workers,
                                                                              inventory_args,
                                                                              repeat(retrofit_strategy),
                                                                              repeat(hazard_type),
                                                                              repeat(hazard_dataset_id))

        self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("damage_result",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True
    def run(self):
        """Performs Water facility damage analysis by using the parameters from the spec
        and creates an output dataset in csv format

        Returns:
            bool: True if successful, False otherwise
        """
        # Facility dataset
        inventory_set = self.get_input_dataset(
            "water_facilities").get_inventory_reader()

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Hazard type of the exposure
        hazard_type = self.get_parameter("hazard_type")

        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(inventory_set), user_defined_cpu)

        avg_bulk_input_size = int(len(inventory_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(inventory_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results,
         damage_results) = self.waterfacility_damage_concurrent_futures(
             self.waterfacilityset_damage_analysis_bulk_input, num_workers,
             inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True
Beispiel #8
0
    def run(self):
        """Execute road damage analysis """
        # road dataset
        road_dataset = self.get_input_dataset("roads").get_inventory_reader()

        # distance to shore table data frame
        distance_df = self.get_input_dataset(
            "distance_table").get_dataframe_from_csv()

        # TODO this has to be changed when semantic service lanuched based on it
        # set distance field name in the table
        distance_field_name = "distance"

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(road_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.road_damage_concurrent_future(
            self.road_damage_analysis_bulk_input, num_workers, inventory_args,
            repeat(distance_df), repeat(distance_field_name),
            repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))

        return True
    def run(self):
        """Execute pipeline damage analysis """
        # Pipeline dataset
        pipeline_dataset = self.get_input_dataset(
            "pipeline").get_inventory_reader()

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")
        user_defined_cpu = 1

        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        dataset_size = len(pipeline_dataset)
        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, dataset_size, user_defined_cpu)

        avg_bulk_input_size = int(dataset_size / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(pipeline_dataset)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.pipeline_damage_concurrent_future(
            self.pipeline_damage_analysis_bulk_input, num_workers,
            inventory_args, repeat(hazard_type), repeat(hazard_dataset_id))

        self.set_result_csv_data("result",
                                 ds_results,
                                 name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") +
                                  "_additional_info")

        return True
Beispiel #10
0
    def run(self):
        """Executes pipeline restoration analysis."""

        pipelines_df = self.get_input_dataset(
            "pipeline").get_dataframe_from_shapefile()

        pipeline_dmg = self.get_input_dataset(
            "pipeline_damage").get_csv_reader()
        pipelines_dmg_df = pd.DataFrame(list(pipeline_dmg))

        damage_result = pipelines_dmg_df.merge(pipelines_df, on='guid')
        damage_result = damage_result.to_dict(orient='records')

        user_defined_cpu = 1
        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(damage_result), user_defined_cpu)

        avg_bulk_input_size = int(len(damage_result) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = damage_result

        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        restoration_results = self.pipeline_restoration_concurrent_future(
            self.pipeline_restoration_bulk_input, num_workers, inventory_args)
        self.set_result_csv_data("pipeline_restoration",
                                 restoration_results,
                                 name=self.get_parameter("result_name"))
        return True
Beispiel #11
0
    def run(self):
        """Executes mean damage calculation."""

        # read in file and parameters
        damage = self.get_input_dataset("damage").get_csv_reader()
        damage_result = AnalysisUtil.get_csv_table_rows(damage,
                                                        ignore_first_row=False)

        dmg_ratio_csv = self.get_input_dataset("dmg_ratios").get_csv_reader()
        dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows(dmg_ratio_csv)

        # setting number of cpus to use
        user_defined_cpu = 1
        if not self.get_parameter("num_cpu") is None and self.get_parameter(
                "num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(
            self, len(damage_result), user_defined_cpu)

        avg_bulk_input_size = int(len(damage_result) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = damage_result
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count +
                                                 avg_bulk_input_size])
            count += avg_bulk_input_size

        results = self.mean_damage_concurrent_future(
            self.mean_damage_bulk_input, num_workers, inventory_args,
            repeat(dmg_ratio_tbl))
        self.set_result_csv_data("result",
                                 results,
                                 name=self.get_parameter("result_name"))
        return True
Beispiel #12
0
    def run(self):
        """Executes road damage analysis."""
        # Road dataset
        road_set = self.get_input_dataset("roads").get_inventory_reader()

        # Get Fragility key
        fragility_key = self.get_parameter("fragility_key")
        if fragility_key is None:
            fragility_key = self.DEFAULT_FRAGILITY_KEY

        # Get hazard input
        hazard_dataset_id = self.get_parameter("hazard_id")

        # Get hazard type
        hazard_type = self.get_parameter("hazard_type")

        # Liquefaction
        use_liquefaction = False
        if self.get_parameter("use_liquefaction") is not None:
            use_liquefaction = self.get_parameter("use_liquefaction")

        # Get geology dataset for liquefaction
        geology_dataset_id = None
        if self.get_parameter("liquefaction_geology_dataset_id") is not None:
            geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id")

        # Hazard Uncertainty
        use_hazard_uncertainty = False
        if self.get_parameter("use_hazard_uncertainty") is not None:
            use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty")

        user_defined_cpu = 1
        if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0:
            user_defined_cpu = self.get_parameter("num_cpu")

        num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu)

        avg_bulk_input_size = int(len(road_set) / num_workers)
        inventory_args = []
        count = 0
        inventory_list = list(road_set)
        while count < len(inventory_list):
            inventory_args.append(inventory_list[count:count + avg_bulk_input_size])
            count += avg_bulk_input_size

        (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input,
                                                                          num_workers,
                                                                          inventory_args,
                                                                          repeat(hazard_type),
                                                                          repeat(hazard_dataset_id),
                                                                          repeat(use_hazard_uncertainty),
                                                                          repeat(geology_dataset_id),
                                                                          repeat(fragility_key),
                                                                          repeat(use_liquefaction))

        self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name"))
        self.set_result_json_data("metadata",
                                  damage_results,
                                  name=self.get_parameter("result_name") + "_additional_info")

        return True