def test_required_sheets_exist(self):

        for model_file in self.model_xlsx_files:
            
            rootLogger.info(model_file)
            df = pd.read_excel(model_file, None)
            # check if the sheets is a subset of the required sheets
            self.assertTrue(set(self.required_sheets) <= set(df.keys()), "Required sheet name not found!")
示例#2
0
def calculate_response_for_hazard(hazard, scenario, infrastructure):
    """
    Exposes the components of the infrastructure to a hazard level
    within a scenario.
    :param infrastructure: containing for components
    :param hazard: The hazard  that the infrastructure is to be exposed to.
    :param scenario: The parameters for the scenario being simulated.
    :return: The state of the infrastructure after the exposure.
    """

    # keep track of the length of time the exposure takes
    code_start_time = time.time()

    # calculate the damage state probabilities
    rootLogger.info("Calculate System Response")
    expected_damage_state_of_components_for_n_simulations = \
        calculate_expected_damage_state_of_components_for_n_simulations(
            infrastructure, scenario, hazard)
    rootLogger.info("System Response: ")

    # calculate the component loss, functionality, output,
    #  economic loss and recovery output over time
    component_sample_loss, \
    comp_sample_func, \
    infrastructure_sample_output, \
    infrastructure_sample_economic_loss = \
        infrastructure.calc_output_loss(
            scenario,
            expected_damage_state_of_components_for_n_simulations)

    # Construct the dictionary containing the statistics of the response
    component_response_dict, comptype_response_dict = \
        infrastructure.calc_response(
            component_sample_loss,
            comp_sample_func,
            expected_damage_state_of_components_for_n_simulations)

    # determine average output for the output components
    infrastructure_output = {}
    for output_index, (output_comp_id, output_comp) in enumerate(
            infrastructure.output_nodes.items()):
        infrastructure_output[output_comp_id] = np.mean(
            infrastructure_sample_output[:, output_index])

    # log the elapsed time for this hazard level
    elapsed = timedelta(seconds=(time.time() - code_start_time))
    rootLogger.info("Hazard {} run time: {}".format(hazard.hazard_scenario_name,
                                                    str(elapsed)))

    # We combine the result data into a dictionary for ease of use
    response_for_a_hazard = {hazard.hazard_scenario_name: [
        expected_damage_state_of_components_for_n_simulations,
        infrastructure_output,
        component_response_dict,
        comptype_response_dict,
        infrastructure_sample_output,
        infrastructure_sample_economic_loss]}
    return response_for_a_hazard
示例#3
0
    def record_dirs(self):
        rootLogger.info("System model   : " + self.SYS_CONF_FILE_NAME)
        rootLogger.info("Input dir      : " + self.INPUT_PATH)
        rootLogger.info("Output dir     : " + self.OUTPUT_PATH)
        rootLogger.info("Raw output dir : " + self.RAW_OUTPUT_DIR + "\n")

        self.dir_dict = {}
        self.dir_dict["SYS_CONF_FILE_NAME"] = self.SYS_CONF_FILE_NAME
        self.dir_dict["INPUT_PATH"] = self.INPUT_PATH
        self.dir_dict["OUTPUT_PATH"] = self.OUTPUT_PATH
        self.dir_dict["RAW_OUTPUT_DIR"] = self.RAW_OUTPUT_DIR

        self.file_with_dirs \
            = os.path.splitext(rootLogger.logfile)[0]+"_dirs.json"
        with open(self.file_with_dirs, 'w') as dirfile:
            json.dump(self.dir_dict, dirfile)
示例#4
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-s", "--setup", type=str,
                        help="Setup file for simulation scenario, and \n"
                             "locations of inputs, outputs, and system model.")
    parser.add_argument("-v", "--verbose",  type=str,
                        help="Choose option for logging level from: \n"
                             "DEBUG, INFO, WARNING, ERROR, CRITICAL.")
    args = parser.parse_args()

    level = logging.DEBUG

    if args.verbose is not None:
        if args.verbose.upper() == "DEBUG":
            level = logging.DEBUG

        elif args.verbose.upper() == "INFO":
            level = logging.INFO

        elif args.verbose.upper() == "WARNING":
            level = logging.WARNING

        elif args.verbose.upper() == "ERROR":
            level = logging.ERROR

        elif args.verbose.upper() == "CRITICAL":
            level = logging.CRITICAL

    rootLogger.set_log_level(level)

    if args.setup is not None:
        rootLogger.info('Simulation initiated...')

        # ---------------------------------------------------------------------
        # Configure simulation model.
        # Read data and control parameters and construct objects.

        config = Configuration(args.setup)
        scenario = Scenario(config)
        hazards = HazardsContainer(config)
        infrastructure = ingest_model(config)

        # ---------------------------------------------------------------------
        # Run simulation.
        # Get the results of running a simulation
        #
        # response_list = [
        #     {},  # hazard level vs component damage state index
        #     {},  # hazard level vs infrastructure output
        #     {},  # hazard level vs component response
        #     {},  # hazard level vs component type response
        #     [],  # array of infrastructure output for each sample
        #     []]  # array infrastructure econ loss for each sample

        response_list = calculate_response(hazards, scenario, infrastructure)

        # ---------------------------------------------------------------------
        # Post simulation processing.
        # After the simulation has run the results are aggregated, saved
        # and the system fragility is calculated.

        write_system_response(response_list, infrastructure, scenario, hazards)
        economic_loss_array = response_list[5]
        plot_mean_econ_loss(scenario, economic_loss_array, hazards)

        if config.HAZARD_INPUT_METHOD == "hazard_array":
            pe_by_component_class(response_list, infrastructure,
                                  scenario, hazards)

        # ---------------------------------------------------------------------
        # Visualizations
        # Construct visualization for system topology

        sys_topology_view = SystemTopology(infrastructure, scenario)
        sys_topology_view.draw_sys_topology(viewcontext="as-built")
        # ---------------------------------------------------------------------

    else:
        print("Input file not found: " + str(args.setup))

    rootLogger.info('End')
示例#5
0
def calculate_response(hazards, scenario, infrastructure):
    """
    The response will be calculated by creating the hazard_levels,
    iterating through the range of hazards and calling the infrastructure
    systems expose_to method. This will return the results of the
    infrastructure to each hazard level exposure. A parameter in the
    scenario file determines whether the parmap.map function spawns threads
    that will perform parallel calculations.
    :param scenario: Parameters for the simulation.
    :param infrastructure: Model of the infrastructure.
    :param hazards: hazards container.
    :return: List of results for each hazard level.
    """

    # code_start_time = time.time() # start of the overall response calculation
    # capture the results from the map call in a list
    hazards_response = []
    # Use the parallel option in the scenario to determine how to run

    if scenario.run_parallel_proc:
        rootLogger.info("Start parallel run")
        hazards_response.extend(parmap.map(calculate_response_for_hazard,
                                           hazards.get_listOfhazards(),
                                           scenario,
                                           infrastructure,
                                           parallel=scenario.run_parallel_proc))
        rootLogger.info("End parallel run")
    else:
        rootLogger.info("Start serial run")
        for hazard in hazards.listOfhazards:
            hazards_response.append(
                calculate_response_for_hazard(hazard, scenario, infrastructure))
        rootLogger.info("End serial run")

    # combine the responses into one list
    post_processing_list = [{},  # hazard level vs component damage state index
                            {},  # hazard level vs infrastructure output
                            {},  # hazard level vs component response
                            {},  # hazard level vs component type response
                            [],  # array of infrastructure output per sample
                            []]  # array infrastructure econ loss per sample

    # iterate through the hazards
    for hazard_response in hazards_response:
        # iterate through the hazard response dictionary
        for key, value_list in hazard_response.items():
            for list_number in range(6):
                # the first four are dicts
                if list_number <= 3:
                    post_processing_list[list_number][key] \
                        = value_list[list_number]
                else:
                    # the last two are lists
                    post_processing_list[list_number]. \
                        append(value_list[list_number])

    # Convert the last 2 lists into arrays
    for list_number in range(4, 6):
        post_processing_list[list_number] \
            = np.array(post_processing_list[list_number])

    # Convert the calculated output array into the correct format
    post_processing_list[4] = np.sum(post_processing_list[4],
                                     axis=2).transpose()
    post_processing_list[5] = post_processing_list[5].transpose()

    # elapsed = timedelta(seconds=(time.time() - code_start_time))
    # logging.info("[ Run time: %s ]\n" % str(elapsed))

    return post_processing_list
示例#6
0
def calculate_expected_damage_state_of_components_for_n_simulations(
        infrastructure, scenario, hazard):
    """
    Calculate the probability that being exposed to a hazard level
    will exceed the given damage levels for each component. A monte
    carlo approach is taken by simulating the exposure for the number
    of samples given in the scenario.
    :param infrastructure: containing for components
    :param hazard: Level of the hazard
    :param scenario: Parameters for the scenario
    :return: An array of the probability that each of the damage states
             were exceeded.
    """
    if scenario.run_context:
        # TODO check whether to use seed for actual runs or not
        random_number = np.random.RandomState(seed=hazard.get_seed())
    else:
        # seeding was not used
        random_number = np.random.RandomState(seed=2)

    # record the number of component in infrastructure
    number_of_components = len(infrastructure.components)

    # construct a zeroed numpy array that can contain the number of samples
    # for each element.
    component_damage_state_ind = np.zeros(
        (scenario.num_samples, number_of_components), dtype=int)

    # create numpy array of uniformly distributed random numbers between (0,1)
    rnd = random_number.uniform(
        size=(scenario.num_samples, number_of_components))
    rootLogger.debug("Hazard Intensity {}".format(hazard.hazard_scenario_name))

    # iterate through the components
    for index, component_key in enumerate(
            sorted(infrastructure.components.keys())):
        component = infrastructure.components[component_key]
        # use the components expose_to method to retrieve the probabilities
        # of this hazard level exceeding each of the components damage levels

        rootLogger.info(
            "Start calculating probability of component in a damage state.")

        # create numpy array of length equal to the number of
        # damage states for the component
        component_pe_ds = np.zeros(len(component.damage_states))

        # iterate through each damage state for the component
        for damage_state_index in component.damage_states.keys():
            # find the hazard intensity component is exposed too
            longitude, latitude = component.get_location()
            hazard_intensity = hazard.get_hazard_intensity_at_location(
                longitude, latitude)

            #
            component_pe_ds[damage_state_index] = \
                component.damage_states[damage_state_index].response_function(
                    hazard_intensity)

        # Drop the default state (DS0 None) because its response will
        # always be zero which will be always be greater than random variable
        component_pe_ds = component_pe_ds[1:]

        rootLogger.info("Calculate System Response")
        rootLogger.info("Component {} : pe_ds {}". \
                        format(component.component_id, component_pe_ds))

        # This little piece of numpy magic calculates the damage level by
        # summing how many damage states were exceeded.
        #
        # Unpacking the calculation:
        # component_pe_ds is usually something like [0.01, 0.12, 0.21, 0.33]
        # rnd[:, index] gives the random numbers created for this component
        # with the first axis (denoted by :) containing the samples for this
        # hazard intensity. The [:, np.newaxis] broadcasts each
        # random number across the supplied component_pe_ds.
        #
        # LINK:
        # https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html
        #
        # If the last two numbers in component_pe_ds are greater than the
        # sample number, the comparison > will return:
        #       [False, False, True, True]
        # The np.sum will convert this to [0, 0, 1, 1] and return 2.
        #
        # This is the resulting damage level.
        # This will complete the comparison for all of the samples
        # for this component.

        component_pe_ds[np.isnan(component_pe_ds)] = -np.inf
        component_damage_state_ind[:, index] = \
            np.sum(component_pe_ds > rnd[:, index][:, np.newaxis], axis=1)

    return component_damage_state_ind
示例#7
0
def pe_by_component_class(response_list, infrastructure, scenario, hazards):
    """
    Calculated  probability of exceedence based on component classes
    :param response_list:
    :param infrastructure:
    :param scenario:
    :param hazard:
    :return:
    """
    # ------------------------------------------------------------------------
    # For Probability of Exceedence calculations based on component failures
    #   Damage state boundaries for Component Type Failures (Substations) are
    #   based on HAZUS MH MR3, p 8-66 to 8-68
    # ------------------------------------------------------------------------

    cp_classes_in_system = np.unique(
        list(infrastructure.get_component_class_list()))

    cp_class_map = {k: [] for k in cp_classes_in_system}
    for comp_id, component in infrastructure.components.items():
        cp_class_map[component.component_class].append(component)

    if infrastructure.system_class == 'Substation':
        cp_classes_costed = \
            [x for x in cp_classes_in_system
             if x not in infrastructure.uncosted_classes]

        # --- System fragility - Based on Failure of Component Classes ---
        comp_class_failures = \
            {cc: np.zeros((scenario.num_samples, hazards.num_hazard_pts))
             for cc in cp_classes_costed}

        comp_class_frag = \
            {cc: np.zeros((scenario.num_samples, hazards.num_hazard_pts))
             for cc in cp_classes_costed}

        # TODO check or correctness
        # for j, hazard_level in enumerate(hazard.hazard_range):
        #     for i in range(scenario.num_samples):
        #         for compclass in cp_classes_costed:
        #             for c in cp_class_map[compclass]:
        #                 comp_class_failures[compclass][i, j] += \
        #                     response_list[hazard_level.hazard_intensity]\
        #                                  [i, infrastructure.components[c]]
        #             comp_class_failures[compclass][i, j] /= \
        #                 len(cp_class_map[compclass])
        #
        #             comp_class_frag[compclass][i, j] = \
        #                 np.sum(comp_class_failures[compclass][i, j] > \
        #                        infrastructure.ds_lims_compclasses[compclass])

        # for j, hazard_intensity in enumerate(hazards.hazard_range):

        for j, (scenario_name, hazard_data) in \
                enumerate(hazards.scenario_hazard_data.items()):
            for i in range(scenario.num_samples):
                for compclass in cp_classes_costed:
                    # **************************************************
                    # TODO: CHECK COMPONENETS ARE SAVED IN CORRECT ORDER
                    # **************************************************
                    for comptype in cp_class_map[compclass]:
                        comp_ndx = infrastructure.components.keys().\
                            index(comptype.component_id)
                        comp_class_failures[compclass][i, j] += \
                            response_list[0][scenario_name][i, comp_ndx]
                    comp_class_failures[compclass][i, j] /= \
                        len(cp_class_map[compclass])

                    comp_class_frag[compclass][i, j] = \
                        np.sum(comp_class_failures[compclass][i, j] > \
                               infrastructure.ds_lims_compclasses[compclass])

        # Probability of Exceedence -- Based on Failure of Component Classes
        pe_sys_cpfailrate = np.zeros(
            (len(infrastructure.sys_dmg_states), hazards.num_hazard_pts))
        for p in range(hazards.num_hazard_pts):
            for d in range(len(infrastructure.sys_dmg_states)):
                ds_ss_ix = []
                for compclass in cp_classes_costed:
                    ds_ss_ix.append(
                        np.sum(comp_class_frag[compclass][:, p] >= d) /
                        float(scenario.num_samples))
                pe_sys_cpfailrate[d, p] = np.median(ds_ss_ix)

        # --- Save prob exceedance data as npy ---
        np.save(os.path.join(scenario.raw_output_dir, 'pe_sys_cpfailrate.npy'),
                pe_sys_cpfailrate)

    # ------------------------------------------------------------------------
    # Validate damage ratio of the system
    # ------------------------------------------------------------------------

    exp_damage_ratio = np.zeros(
        (len(infrastructure.components), hazards.num_hazard_pts))
    for l, hazard in enumerate(hazards.listOfhazards):
        # compute expected damage ratio
        for j, component in enumerate(infrastructure.components.values()):
            # TODO remove invalid Component accesses !!

            component_pe_ds = np.zeros(len(component.damage_states))
            for damage_state_index in component.damage_states.keys():
                long, lat = component.get_location()
                hazard_intensity \
                    = hazard.get_hazard_intensity_at_location(long, lat)
                component_pe_ds[damage_state_index] \
                    = component.damage_states[damage_state_index].\
                      response_function(hazard_intensity)

            component_pe_ds = component_pe_ds[1:]
            pb = pe2pb(component_pe_ds)

            dr = np.array([
                component.damage_states[int(ds)].damage_ratio
                for ds in infrastructure.sys_dmg_states
            ])
            cf = component.cost_fraction
            loss_list = dr * cf
            exp_damage_ratio[j, l] = np.sum(pb * loss_list)

    # ------------------------------------------------------------------------
    # Write analytical outputs to file
    # ------------------------------------------------------------------------

    # --- Output File --- summary output ---
    outfile_sys_response = os.path.join(scenario.output_path,
                                        'system_response.csv')
    out_cols = ['INTENSITY_MEASURE', 'Economic Loss', 'Mean Output']

    # create the arrays
    comp_response_list = response_list[2]
    economic_loss_array = response_list[5]
    calculated_output_array = response_list[4]

    outdat = {
        out_cols[0]: hazards.hazard_scenario_list,
        out_cols[1]: np.mean(economic_loss_array, axis=0),
        out_cols[2]: np.mean(calculated_output_array, axis=0)
    }
    df = pd.DataFrame(outdat)
    df.to_csv(outfile_sys_response, sep=',', index=False, columns=out_cols)

    # --- Output File --- response of each COMPONENT to hazard ---
    outfile_comp_resp = os.path.join(scenario.output_path,
                                     'component_response.csv')
    component_resp_df = pd.DataFrame(comp_response_list)
    component_resp_df.index.names = ['component_id', 'response']
    component_resp_df.columns = hazards.hazard_scenario_name
    component_resp_df.to_csv(outfile_comp_resp,
                             sep=',',
                             index_label=['component_id', 'response'])

    # --- Output File --- mean loss of component ---
    outfile_comp_loss = os.path.join(scenario.output_path,
                                     'component_meanloss.csv')
    component_loss_df = component_resp_df.iloc\
        [component_resp_df.index.get_level_values(1) == 'loss_mean']
    component_loss_df.reset_index(level='response', inplace=True)
    component_loss_df = component_loss_df.drop('response', axis=1)
    component_loss_df.to_csv(outfile_comp_loss,
                             sep=',',
                             index_label=['component_id'])

    # # --- Output File --- DataFrame of mean failures per component CLASS ---
    # outfile_compclass_failures = os.path.join(
    #     output_path, 'comp_class_meanfailures.csv')
    # compclass_failure_df.to_csv(outfile_compclass_failures, sep=',',
    #                         index_label=['component_class'])

    # ------------------------------------------------------------------------
    # *** Saving vars ***
    # ------------------------------------------------------------------------

    if scenario.save_vars_npy:
        np.save(
            os.path.join(scenario.raw_output_dir, 'economic_loss_array.npy'),
            economic_loss_array)

        np.save(
            os.path.join(scenario.raw_output_dir,
                         'calculated_output_array.npy'),
            calculated_output_array)

        np.save(os.path.join(scenario.raw_output_dir, 'exp_damage_ratio.npy'),
                exp_damage_ratio)

        # ------------------------------------------------------------------------
        rootLogger.info("Outputs saved in: " + scenario.output_path)