Ejemplo n.º 1
0
    def test_economic_loss_comparison_for_system_sanity(self):

        root_dir = os.path.dirname(os.path.abspath(__file__))
        conf_file_path = os.path.join(root_dir, "simulation_setup",
                                      "test_scenario_pscoal_600MW.json")

        config = Configuration(conf_file_path)
        scenario = Scenario(config)
        hazards = HazardsContainer(config)
        infrastructure = ingest_model(config)

        response_list = calculate_response(hazards, scenario, infrastructure)
        economic_loss_array = response_list[5]

        input_pickle_filename \
            = os.path.join(root_dir, "historical_data",
                           "economic_loss_for_system_sanity_testing.p")

        historical_economic_loss_array \
            = pickle.load(open(input_pickle_filename, 'rb'))

        self.assertTrue(
            np.array_equal(economic_loss_array,
                           historical_economic_loss_array),
            str(len(economic_loss_array)) + '\n' +
            str(len(historical_economic_loss_array)))
Ejemplo n.º 2
0
    def test_run_scenario_upper_limit(self):
        root_dir = os.path.dirname(os.path.abspath(__file__))
        conf_file_path = os.path.join(root_dir, "simulation_setup",
                                      "test_scenario_upper_limit.json")
        config = Configuration(conf_file_path)
        scenario = Scenario(config)
        hazards = HazardsContainer(config)
        infrastructure = ingest_model(config)
        response_list = calculate_response(hazards, scenario, infrastructure)

        self.assertTrue(int(response_list[4][0][0]) == int(0))
Ejemplo n.º 3
0
    def setUp(self):

        self.conf_file_paths = []

        root = os.path.join(os.getcwd(), 'simulation_setup')
        for root, dir_names, file_names in os.walk(root):
            for file_name in file_names:
                if file_name.endswith('.json'):
                    if 'simulation_setup' in root:
                        conf_file_path = os.path.join(root, file_name)
                        self.conf_file_paths.append(conf_file_path)

        self.confs = []
        for conf_file_path in self.conf_file_paths:
            conf = Configuration(conf_file_path)
            self.confs.append(conf)
Ejemplo n.º 4
0
    def test_run_scenario_lower_limit(self):
        root_dir = os.path.dirname(os.path.abspath(__file__))
        conf_file_path = os.path.join(root_dir, "simulation_setup",
                                      "test_scenario_lower_limit.json")

        config = Configuration(conf_file_path)
        scenario = Scenario(config)
        hazards = HazardsContainer(config)
        infrastructure = ingest_model(config)
        response_list = calculate_response(hazards, scenario, infrastructure)

        output_node_capacity = 0
        with open(config.SYS_CONF_FILE, 'r') as f:
            json_infra_model = json.load(f)
            output_node_capacity \
                = json_infra_model\
                  ["sysout_setup"]["output_node"]["output_node_capacity"]

        self.assertTrue(
            int(response_list[4][0][0]) == int(output_node_capacity))
Ejemplo n.º 5
0
    def test_compare_economic_loss_for_existing_models(self):

        print("\n\n>> Initiating comparison with existing " "models...\n")

        conf_file_paths = []
        root_dir = os.path.dirname(os.path.abspath(__file__))
        for root, dir_names, file_names in os.walk(root_dir):
            for file_name in file_names:
                if "tests" in root:
                    if "simulation_setup" in root:
                        # adjusted to keep track of old values in old branches
                        if 'test_scenario_pscoal_600MW' not in file_name and \
                                'test_setup' not in file_name:
                            conf_file_paths.append(
                                os.path.join(root, file_name))

        for conf_file_path in conf_file_paths:
            if os.path.isfile(conf_file_path):
                print("\nMatching results for : " +
                      os.path.basename(conf_file_path))

                config = Configuration(conf_file_path)
                scenario = Scenario(config)
                hazards = HazardsContainer(config)
                infrastructure = ingest_model(config)
                response_list = calculate_response(hazards, scenario,
                                                   infrastructure)
                economic_loss_of_model = response_list[5]
                pickle_flename = os.path.join(
                    root_dir, 'historical_data',
                    "economic_loss_for_" + config.SCENARIO_NAME + '.p')

                history_economic_loss_for_model \
                    = pickle.load(open(pickle_flename, 'rb'))

                self.assertTrue(
                    np.array_equal(economic_loss_of_model,
                                   history_economic_loss_for_model),
                    conf_file_path)
                print("OK")
Ejemplo n.º 6
0
def main():

    # --------------------------------------------------------------------------
    # *** BEGIN : SETUP ***

    # Read in SETUP data
    parser = argparse.ArgumentParser()
    parser.add_argument("-s",
                        "--setup",
                        type=str,
                        help="Setup file for simulation scenario, and \n"
                        "locations of inputs, outputs, and system model.")
    parser.add_argument("-v",
                        "--verbose",
                        type=str,
                        help="Choose option for logging level from: \n"
                        "DEBUG, INFO, WARNING, ERROR, CRITICAL.")
    parser.add_argument("-d",
                        "--dirfile",
                        type=str,
                        help="JSON file with location of input/output files "
                        "from past simulation that is to be analysed\n.")
    args = parser.parse_args()

    if args.setup is None:
        raise ValueError("Must provide a correct setup argument: "
                         "`-s` or `--setup`,\n"
                         "Setup file for simulation scenario, and \n"
                         "locations of inputs, outputs, and system model.\n")

    if not os.path.exists(args.dirfile):
        raise ValueError("Could not locate file with directory locations"
                         "with results from pre-run simulations.\n")

    # Define input files, output location, scenario inputs
    with open(args.dirfile, 'r') as dat:
        dir_dict = json.load(dat)

    # Configure simulation model.
    # Read data and control parameters and construct objects.
    config = Configuration(args.setup,
                           run_mode='analysis',
                           output_path=dir_dict["OUTPUT_PATH"])
    scenario = Scenario(config)
    hazards = HazardsContainer(config)
    infrastructure = ingest_model(config)

    if not config.SYS_CONF_FILE_NAME == dir_dict["SYS_CONF_FILE_NAME"]:
        raise NameError("Names for supplied system model names did not match."
                        "Aborting.\n")

    SYS_CONFIG_FILE = config.SYS_CONF_FILE
    OUTPUT_PATH = dir_dict["OUTPUT_PATH"]
    RAW_OUTPUT_DIR = dir_dict["RAW_OUTPUT_DIR"]

    RESTORATION_STREAMS = scenario.restoration_streams
    FOCAL_HAZARD_SCENARIOS = hazards.focal_hazard_scenarios

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # READ in raw output files from prior analysis of system fragility

    economic_loss_array = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'economic_loss_array.npy'))

    calculated_output_array = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'calculated_output_array.npy'))

    exp_damage_ratio = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'exp_damage_ratio.npy'))

    sys_frag = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'sys_frag.npy'))

    # TODO: NEED TO IMPLEMENT.
    # output_array_given_recovery = \
    #     np.load(os.path.join(
    #         RAW_OUTPUT_DIR, 'output_array_given_recovery.npy'))
    #
    # required_time = \
    #     np.load(os.path.join(RAW_OUTPUT_DIR, 'required_time.npy'))

    if infrastructure.system_class.lower() == 'powerstation':
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_econloss.npy'))
    elif infrastructure.system_class.lower() == 'substation':
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_cpfailrate.npy'))
    elif infrastructure.system_class.lower() == \
            'PotableWaterTreatmentPlant'.lower():
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_econloss.npy'))

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Read in SIMULATED HAZARD RESPONSE for <COMPONENT TYPES>

    comptype_resp_df = \
        pd.read_csv(os.path.join(OUTPUT_PATH, 'comptype_response.csv'),
                    index_col=['component_type', 'response'],
                    skipinitialspace=True)
    comptype_resp_df.columns = hazards.hazard_scenario_name

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Nodes not considered in the loss calculations
    # NEED TO MOVE THESE TO A MORE LOGICAL PLACE

    uncosted_comptypes = [
        'CONN_NODE', 'SYSTEM_INPUT', 'SYSTEM_OUTPUT', 'JUNCTION_NODE',
        'JUNCTION', 'Generation Source', 'Grounding'
    ]

    cp_types_in_system = infrastructure.get_component_types()
    cp_types_costed = [
        x for x in cp_types_in_system if x not in uncosted_comptypes
    ]

    comptype_resp_df = comptype_resp_df.drop(uncosted_comptypes,
                                             level='component_type',
                                             axis=0)

    # Get list of only those components that are included in cost calculations:
    cpmap = {
        c: sorted(list(infrastructure.get_components_for_type(c)))
        for c in cp_types_in_system
    }
    comps_costed = [v for x in cp_types_costed for v in cpmap[x]]

    nodes_all = infrastructure.components.keys()
    nodes_all.sort()
    comps_uncosted = list(set(nodes_all).difference(comps_costed))

    ctype_failure_mean = comptype_resp_df.xs('num_failures', level='response')
    ctype_failure_mean.columns.names = ['Scenario Name']

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Value of component types relative to system value

    comptype_value_dict = {}
    for ct in sorted(cp_types_costed):
        comp_val = [
            infrastructure.components[comp_id].cost_fraction
            for comp_id in cpmap[ct]
        ]
        comptype_value_dict[ct] = sum(comp_val)

    comptype_value_list = [
        comptype_value_dict[ct] for ct in sorted(comptype_value_dict.keys())
    ]

    component_response = \
        pd.read_csv(os.path.join(OUTPUT_PATH, 'component_response.csv'),
                    index_col=['component_id', 'response'],
                    skiprows=0, skipinitialspace=True)
    # component_response = component_response.drop(
    #     comps_uncosted, level='component_id', axis=0)
    component_meanloss = \
        component_response.query('response == "loss_mean"').\
            reset_index('response').drop('response', axis=1)

    comptype_resp_df = comptype_resp_df.drop(uncosted_comptypes,
                                             level='component_type',
                                             axis=0)

    # TODO : ADD THIS BACK IN!!
    # # Read in the <SYSTEM FRAGILITY MODEL> fitted to simulated data
    # system_fragility_mdl = \
    #     pd.read_csv(os.path.join(OUTPUT_PATH, 'system_model_fragility.csv'),
    #                 index_col=0)
    # system_fragility_mdl.index.name = "Damage States"

    # Define the system as a network, with components as nodes
    # Network setup with igraph
    G = infrastructure._component_graph
    # --------------------------------------------------------------------------
    # *** END : SETUP ***

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    # Set weighting criteria for edges:
    # This influences the path chosen for restoration
    # Options are:
    #   [1] None
    #   [2] 'MIN_COST'
    #   [3] 'MIN_TIME'
    weight_criteria = 'MIN_COST'

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    col_tp = []
    for h in FOCAL_HAZARD_SCENARIOS:
        col_tp.extend(zip([h] * len(RESTORATION_STREAMS), RESTORATION_STREAMS))
    mcols = pd.MultiIndex.from_tuples(col_tp,
                                      names=['Hazard', 'Restoration Streams'])
    time_to_full_restoration_for_lines_df = \
        pd.DataFrame(index=infrastructure.output_nodes.keys(), columns=mcols)
    time_to_full_restoration_for_lines_df.index.name = 'Output Lines'

    # --------------------------------------------------------------------------
    # *** BEGIN : FOCAL_HAZARD_SCENARIOS FOR LOOP ***
    for sc_haz_str in FOCAL_HAZARD_SCENARIOS:

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # Differentiated setup based on hazard input type - scenario vs array
        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        sc_haz_str = "{:.3f}".format(float(sc_haz_str))
        if config.HAZARD_INPUT_METHOD == "hazard_array":
            scenario_header = hazards.hazard_scenario_name[
                hazards.hazard_scenario_list.index(sc_haz_str)]
        elif config.HAZARD_INPUT_METHOD == "scenario_file":
            scenario_header = sc_haz_str
        scenario_tag = str(sc_haz_str) \
                       + " " + hazards.intensity_measure_unit \
                       + " " + hazards.intensity_measure_param

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # Extract scenario-specific values from the 'hazard response' dataframe
        # Scenario response: by component type
        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        ctype_resp_scenario \
            = comptype_resp_df[scenario_header].unstack(level=-1)
        ctype_resp_scenario = ctype_resp_scenario.sort_index()

        ctype_resp_scenario['loss_per_type']\
            = ctype_resp_scenario['loss_mean']/comptype_value_list

        ctype_resp_scenario['loss_per_type_std']\
            = ctype_resp_scenario['loss_std'] \
              * [len(list(infrastructure.get_components_for_type(ct)))
                 for ct in ctype_resp_scenario.index.values.tolist()]

        ctype_resp_sorted = ctype_resp_scenario.sort_values(by='loss_tot',
                                                            ascending=False)

        ctype_loss_vals_tot \
            = ctype_resp_sorted['loss_tot'].values * 100
        ctype_loss_by_type \
            = ctype_resp_sorted['loss_per_type'].values * 100
        ctype_lossbytype_rank = \
            len(ctype_loss_by_type) - \
            stats.rankdata(ctype_loss_by_type, method='dense').astype(int)

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # Economic loss percentage for component types

        # fig_name = 'fig_SC_' + sc_haz_str + '_loss_sys_vs_comptype_s0.png'
        # draw_component_loss_barchart_bidir(ctype_loss_vals_tot,
        #                                 ctype_loss_by_type,
        #                                 ctype_lossbytype_rank,
        #                                 ctype_resp_sorted,
        #                                 scenario_tag,
        #                                 hazards.hazard_type,
        #                                 OUTPUT_PATH,
        #                                 fig_name)

        fig_name = 'fig_SC_' + sc_haz_str + '_loss_sys_vs_comptype_s1.png'
        draw_component_loss_barchart_s1(ctype_resp_sorted, scenario_tag,
                                        hazards.hazard_type, OUTPUT_PATH,
                                        fig_name)

        fig_name = 'fig_SC_' + sc_haz_str + '_loss_sys_vs_comptype_s2.png'
        draw_component_loss_barchart_s2(ctype_resp_sorted, scenario_tag,
                                        hazards.hazard_type, OUTPUT_PATH,
                                        fig_name)

        fig_name = 'fig_SC_' + sc_haz_str + '_loss_sys_vs_comptype_s3.png'
        draw_component_loss_barchart_s3(ctype_resp_sorted, scenario_tag,
                                        hazards.hazard_type, OUTPUT_PATH,
                                        fig_name)

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # FAILURE RATE -- PERCENTAGE of component types

        fig_name = 'fig_SC_' + sc_haz_str + '_comptype_failures.png'
        draw_component_failure_barchart(uncosted_comptypes, ctype_failure_mean,
                                        scenario_header, scenario_tag,
                                        hazards.hazard_type, OUTPUT_PATH,
                                        fig_name)

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # RESTORATION PROGNOSIS for specified scenarios

        component_fullrst_time, ctype_scenario_outcomes = \
            calc_comptype_damage_scenario_given_hazard(infrastructure,
                                                       scenario,
                                                       hazards,
                                                       ctype_resp_sorted,
                                                       component_response,
                                                       cp_types_costed,
                                                       scenario_header)

        # All the nodes that need to be fixed for each output node:
        repair_list_combined = prep_repair_list(
            infrastructure, component_meanloss, component_fullrst_time,
            comps_uncosted, weight_criteria, scenario_header)

        repair_path = copy.deepcopy(repair_list_combined)
        output_node_list = infrastructure.output_nodes.keys()

        for num_rst_steams in RESTORATION_STREAMS:
            # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            # SYSTEM RESTORATION for given scenario & restoration setup
            rst_setup_filename = 'restoration_setup_' + sc_haz_str + '_' \
                                 + hazards.intensity_measure_unit + '_' \
                                 + hazards.intensity_measure_param + '.csv'

            rst_setup_df = calc_restoration_setup(
                component_meanloss, output_node_list, comps_uncosted,
                repair_list_combined, num_rst_steams, RESTORATION_OFFSET,
                component_fullrst_time, scenario.output_path, scenario_header,
                rst_setup_filename)

            # Check if nothing to repair, i.e. if repair list is empty:
            comps_to_repair = rst_setup_df.index.values.tolist()
            if not comps_to_repair:
                print("\n*** Scenario: " + scenario_tag)
                print("Nothing to repair. Time to repair is zero.")
                print("Skipping repair visualisation for this scenario. \n")
                break

            fig_rst_gantt_name = \
                'fig_SC_' + sc_haz_str + '_str' + str(num_rst_steams) + \
                '_restoration.png'
            restoration_timeline_array, time_to_full_restoration_for_lines = \
                vis_restoration_process(
                    scenario,
                    infrastructure,
                    rst_setup_df,
                    num_rst_steams,
                    repair_path,
                    fig_rst_gantt_name,
                    scenario_tag,
                    hazards.hazard_type
                    )

            time_to_full_restoration_for_lines_df[(sc_haz_str, num_rst_steams)]\
                = [time_to_full_restoration_for_lines[x]
                   for x in output_node_list]

            # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            # COMPONENT CRITICALITY for given scenario & restoration setup

            fig_name = 'fig_SC_' + sc_haz_str + '_str' +\
                       str(num_rst_steams) + '_component_criticality.png'
            component_criticality(infrastructure, scenario,
                                  ctype_scenario_outcomes, hazards.hazard_type,
                                  scenario_tag, fig_name)

    # --------------------------------------------------------------------------
    # *** END : FOCAL_HAZARD_SCENARIOS FOR LOOP ***

    time_to_full_restoration_for_lines_csv = \
        os.path.join(OUTPUT_PATH, 'line_restoration_prognosis.csv')
    time_to_full_restoration_for_lines_df.to_csv(
        time_to_full_restoration_for_lines_csv, sep=',')

    print("--- --- --- --- --- --- --- --- ---")
    print(Fore.YELLOW + "Scenario loss analysis complete." + Fore.RESET + "\n")
Ejemplo n.º 7
0
def main():
    # ------------------------------------------------------------------------
    # Read in SETUP data
    parser = argparse.ArgumentParser()
    parser.add_argument("-s", "--setup", type=str,
                        help="Setup file for simulation scenario, and \n"
                             "locations of inputs, outputs, and system model.")
    parser.add_argument("-v", "--verbose",  type=str,
                        help="Choose option for logging level from: \n"
                             "DEBUG, INFO, WARNING, ERROR, CRITICAL.")
    parser.add_argument("-d", "--dirfile", type=str,
                        help="JSON file with location of input/output files "
                             "from past simulation that is to be analysed\n.")
    args = parser.parse_args()

    if args.setup is None:
        raise ValueError("Must provide a correct setup argument: "
                         "`-s` or `--setup`,\n"
                         "Setup file for simulation scenario, and \n"
                         "locations of inputs, outputs, and system model.\n")

    if not os.path.exists(args.dirfile):
        raise ValueError("Could not locate file with directory locations"
                         "with results from pre-run simulations.\n")

    # Define input files, output location, scenario inputs
    with open(args.dirfile, 'r') as dat:
        dir_dict = json.load(dat)

    # Configure simulation model.
    # Read data and control parameters and construct objects.
    config = Configuration(args.setup,
                           run_mode='analysis',
                           output_path=dir_dict["OUTPUT_PATH"])
    scenario = Scenario(config)
    hazards = HazardsContainer(config)
    infrastructure = ingest_model(config)

    if not config.SYS_CONF_FILE_NAME == dir_dict["SYS_CONF_FILE_NAME"]:
        raise NameError("Names for supplied system model names did not match."
                        "Aborting.\n")

    # --------------------------------------------------------------------------

    OUTPUT_PATH = dir_dict["OUTPUT_PATH"]
    RAW_OUTPUT_DIR = dir_dict["RAW_OUTPUT_DIR"]
    hazard_scenarios = hazards.hazard_scenario_list

    sys_limit_states = infrastructure.get_system_damage_states()
    # one_comp = infrastructure.components.values()[0]
    # sys_limit_states = [one_comp.damage_states[ds].damage_state_name
    #                     for ds in one_comp.damage_states]

    # Test switches
    FIT_PE_DATA = scenario.fit_pe_data
    # SWITCH_FIT_RESTORATION_DATA = scenario.SWITCH_FIT_RESTORATION_DATA
    # RESTORATION_TIME_RANGE = scenario.restoration_time_range

    # ------------------------------------------------------------------------
    # READ in raw output files from prior analysis of system fragility

    economic_loss_array = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'economic_loss_array.npy'))

    calculated_output_array = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'calculated_output_array.npy'))

    exp_damage_ratio = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'exp_damage_ratio.npy'))

    sys_frag = \
        np.load(os.path.join(
            RAW_OUTPUT_DIR, 'sys_frag.npy'))

    # output_array_given_recovery = \
    #     np.load(os.path.join(
    #         RAW_OUTPUT_DIR, 'output_array_given_recovery.npy'))

    # required_time = \
    #     np.load(os.path.join(RAW_OUTPUT_DIR, 'required_time.npy'))

    # --------------------------------------------------------------------------

    if infrastructure.system_class.lower() == 'powerstation':
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_econloss.npy'))
    elif infrastructure.system_class.lower() == 'substation':
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_cpfailrate.npy'))
    # elif infrastructure.system_class.lower() == 'substation':
    #     pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_econloss.npy'))
    elif infrastructure.system_class.lower() in [
        "potablewatertreatmentplant", "pwtp",
        "wastewatertreatmentplant", "wwtp",
        "watertreatmentplant", "wtp"]:
        pe_sys = np.load(os.path.join(RAW_OUTPUT_DIR, 'pe_sys_econloss.npy'))

    # --------------------------------------------------------------------------
    # Calculate & Plot Fitted Models

    if FIT_PE_DATA:
        fit_prob_exceed_model(hazard_scenarios,
                              pe_sys,
                              sys_limit_states,
                              OUTPUT_PATH,
                              config)
Ejemplo n.º 8
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-s", "--setup", type=str,
                        help="Setup file for simulation scenario, and \n"
                             "locations of inputs, outputs, and system model.")
    parser.add_argument("-v", "--verbose",  type=str,
                        help="Choose option for logging level from: \n"
                             "DEBUG, INFO, WARNING, ERROR, CRITICAL.")
    args = parser.parse_args()

    level = logging.DEBUG

    if args.verbose is not None:
        if args.verbose.upper() == "DEBUG":
            level = logging.DEBUG

        elif args.verbose.upper() == "INFO":
            level = logging.INFO

        elif args.verbose.upper() == "WARNING":
            level = logging.WARNING

        elif args.verbose.upper() == "ERROR":
            level = logging.ERROR

        elif args.verbose.upper() == "CRITICAL":
            level = logging.CRITICAL

    rootLogger.set_log_level(level)

    if args.setup is not None:
        rootLogger.info('Simulation initiated...')

        # ---------------------------------------------------------------------
        # Configure simulation model.
        # Read data and control parameters and construct objects.

        config = Configuration(args.setup)
        scenario = Scenario(config)
        hazards = HazardsContainer(config)
        infrastructure = ingest_model(config)

        # ---------------------------------------------------------------------
        # Run simulation.
        # Get the results of running a simulation
        #
        # response_list = [
        #     {},  # hazard level vs component damage state index
        #     {},  # hazard level vs infrastructure output
        #     {},  # hazard level vs component response
        #     {},  # hazard level vs component type response
        #     [],  # array of infrastructure output for each sample
        #     []]  # array infrastructure econ loss for each sample

        response_list = calculate_response(hazards, scenario, infrastructure)

        # ---------------------------------------------------------------------
        # Post simulation processing.
        # After the simulation has run the results are aggregated, saved
        # and the system fragility is calculated.

        write_system_response(response_list, infrastructure, scenario, hazards)
        economic_loss_array = response_list[5]
        plot_mean_econ_loss(scenario, economic_loss_array, hazards)

        if config.HAZARD_INPUT_METHOD == "hazard_array":
            pe_by_component_class(response_list, infrastructure,
                                  scenario, hazards)

        # ---------------------------------------------------------------------
        # Visualizations
        # Construct visualization for system topology

        sys_topology_view = SystemTopology(infrastructure, scenario)
        sys_topology_view.draw_sys_topology(viewcontext="as-built")
        # ---------------------------------------------------------------------

    else:
        print("Input file not found: " + str(args.setup))

    rootLogger.info('End')