Esempio n. 1
0
def load_args():
    """
    Process command line arguments.
    """
    # read
    parser = ArgumentParser()
    parser.add_argument("rrup", help="path to RRUP file", type=Path)
    parser.add_argument(
        "--imcsv",
        nargs=2,
        required=True,
        help=
        "path to IM file and label. can be used more than once to plot multiple sets of data",
        action="append",
    )
    parser.add_argument(
        "--config",
        help="path to .yaml empirical config file (requires SRF info)",
        type=Path,
    )
    parser.add_argument("--srf", help="path to srf info file", type=Path)
    parser.add_argument("--dist_min",
                        default=0.1,
                        type=float,
                        help="GMPE param DistMin, default 0.1 km")
    parser.add_argument(
        "--dist_max",
        default=100.0,
        type=float,
        help="GMPE param DistMax, default 100.0 km",
    )
    parser.add_argument("--n_val",
                        default=51,
                        type=int,
                        help="GMPE param n_val, default 51")
    parser.add_argument("--bars",
                        help="also plot error bars",
                        action="store_true")
    parser.add_argument("--out_dir",
                        help="output folder to place plot",
                        default=Path.cwd(),
                        type=Path)
    parser.add_argument(
        "--run_name",
        help="run_name for title and filename",
        default="event-yyyymmdd_location_mMpM_sim-yyyymmddhhmm",
    )
    parser.add_argument("--comp", help="component", default="geom")
    args = parser.parse_args()

    validate_args(args)

    setup_dir(args.out_dir)

    return args
Esempio n. 2
0
def main():
    args = load_args()

    file_type = calc.FILE_TYPE_DICT[args.file_type]
    run_type = calc.META_TYPE_DICT[args.run_type]

    im = args.im

    im_options = {}

    valid_periods = calc.validate_period(args.period, args.extended_period)
    if "pSA" in im:
        im_options["pSA"] = valid_periods
    if "SDI" in im:
        im_options["SDI"] = valid_periods
    if "FAS" in im:
        im_options["FAS"] = calc.validate_fas_frequency(args.fas_frequency)

    # Create output dir
    utils.setup_dir(args.output_path)
    if not args.simple_output:
        utils.setup_dir(os.path.join(args.output_path, calc.OUTPUT_SUBFOLDER))

    # TODO: this may need to be updated to read file if the length of list becomes an issue
    station_names = args.station_names
    if args.advanced_ims is not None:
        components = advanced_IM_factory.COMP_DICT.keys()
        advanced_im_config = advanced_IM_factory.advanced_im_config(
            args.advanced_ims, args.advanced_im_config, args.OpenSees_path)
    else:
        components = args.components
        advanced_im_config = None

    # multiprocessor
    calc.compute_measures_multiprocess(
        args.input_path,
        file_type,
        wave_type=None,
        station_names=station_names,
        ims=im,
        comp=components,
        im_options=im_options,
        output=args.output_path,
        identifier=args.identifier,
        rupture=args.rupture,
        run_type=run_type,
        version=args.version,
        process=args.process,
        simple_output=args.simple_output,
        units=args.units,
        advanced_im_config=advanced_im_config,
    )

    print("Calculations are output to {}".format(args.output_path))
def check_data(s1, s2):
    """load data of two timeseries objs to txt files and compare"""
    utils.setup_dir(TXT_DIR_1)
    utils.setup_dir(TXT_DIR_2)
    s1.all2txt(prefix="./{}/".format(TXT_DIR_1))
    s2.all2txt(prefix="./{}/".format(TXT_DIR_2))
    for f in os.listdir(TXT_DIR_1)[:200]:
        out, err = shared.exe(
            "diff {} {}".format(os.path.join(TXT_DIR_1, f), os.path.join(TXT_DIR_2, f))
        )

    shutil.rmtree(TXT_DIR_1)
    shutil.rmtree(TXT_DIR_2)
Esempio n. 4
0
def create_temp_dirs(sim_dir, outer_dir_name, inner_dir_name=""):
    """
    creates two nested temp dirs containing files to be tared
    :param sim_dir: path to realization folder
    :param outer_dir_name: name of temporary dir for storing submission/lf related files to be tared
    :param inner_dir_name: name of sub_dir inside the temporary dir for storing submission/lf related files to be tared
    :return: paths to outer_dir and inner dir
    """
    outer_dir = os.path.join(sim_dir, outer_dir_name)
    utils.setup_dir(outer_dir)
    inner_dir = ""
    if inner_dir_name is not "":
        inner_dir = os.path.join(sim_dir, outer_dir_name, inner_dir_name)
        utils.setup_dir(inner_dir)
    return outer_dir, inner_dir
def main():
    args = load_args()
    setup_dir(args.output)
    calculate_empirical(
        args.identifier,
        args.srf_info,
        args.output,
        args.config,
        args.stations,
        args.vs30_file,
        args.vs30_default,
        args.im,
        args.rupture_distance,
        args.max_rupture_distance,
        args.period,
        args.extended_period,
        args.components,
        args.gmm_param_config,
    )
Esempio n. 6
0
def write_xyz(imcsv, stat_file, out_dir, component="geom"):
    utils.setup_dir(out_dir)

    stat_df = formats.load_station_file(stat_file)
    im_df = formats.load_im_file_pd(imcsv, comp=component)

    # must have compatible index names to merge
    stat_df.index.rename("station", inplace=True)

    xyz_df = im_df.merge(stat_df,
                         left_index=True,
                         right_index=True,
                         how="inner")
    xyz_real_station_df = xyz_df[[
        not shared.is_virtual_station(stat)
        for stat in xyz_df.index.get_level_values(0)
    ]]

    ims = im_df.columns
    columns = ["lon", "lat", *ims]

    non_uniform_filepath = out_dir / "non_uniform_im.xyz"
    real_station_filepath = out_dir / "real_station_im.xyz"

    xyz_df[columns].to_csv(non_uniform_filepath,
                           sep=" ",
                           header=None,
                           index=None)
    xyz_real_station_df[columns].to_csv(real_station_filepath,
                                        sep=" ",
                                        header=None,
                                        index=None)

    im_col_file = out_dir / "im_order.txt"
    with open(im_col_file, "w") as fp:
        fp.write(" ".join(ims))

    print("xyz files are output to {}".format(out_dir))
Esempio n. 7
0
def load_args():
    """
    Process command line arguments.
    """
    # read
    parser = ArgumentParser()
    parser.add_argument("rrup", help="path to RRUP file", type=os.path.abspath)
    parser.add_argument("sim", help="path to SIMULATED IM file", type=os.path.abspath)
    parser.add_argument("srf", help="path to srf info file", type=os.path.abspath)
    parser.add_argument(
        "--config", help="path to .yaml empirical config file", type=os.path.abspath
    )
    parser.add_argument(
        "--dist_min", default=0.1, type=float, help="GMPE param DistMin, default 0.1 km"
    )
    parser.add_argument(
        "--dist_max",
        default=100.0,
        type=float,
        help="GMPE param DistMax, default 100.0 km",
    )
    parser.add_argument(
        "--n_val", default=51, type=int, help="GMPE param n_val, default 51"
    )
    parser.add_argument(
        "--out_file",
        help="output folder to place plot",
        default="./epsilons.xyz",
        type=os.path.abspath,
    )
    parser.add_argument("--comp", help="component", default="geom")
    args = parser.parse_args()

    validate_args(args)

    setup_dir(os.path.dirname(args.out_file))
    return args
from IM_calculation.test.test_common_set_up import INPUT, OUTPUT, compare_dicts, set_up

# This is a hack, to allow loading of the test pickle objects
import sys
import IM_calculation.IM as IM

sys.modules["IM"] = IM

PARSER = argparse.ArgumentParser()
BSC_PERIOD = [0.05, 0.1, 5.0, 10.0]
TEST_IMS = ["PGA", "PGV", "Ds575", "pSA"]

FAKE_DIR = (
    "fake_dir"
)  # should be created in set_up module and remove in tear_down module
utils.setup_dir("fake_dir")


@pytest.mark.parametrize(
    "test_period, test_extended, expected_period",
    [
        (BSC_PERIOD, False, np.array(BSC_PERIOD)),
        (BSC_PERIOD, True,
         np.unique(np.append(BSC_PERIOD, constants.EXT_PERIOD))),
    ],
)
def test_validate_period(test_period, test_extended, expected_period):
    assert all(
        np.equal(calculate_ims.validate_period(test_period, test_extended),
                 expected_period))
def install_fault(
    fault_name,
    n_rel,
    root_folder,
    version,
    stat_file_path,
    seed=HF_DEFAULT_SEED,
    extended_period=False,
    vm_perturbations=False,
    ignore_vm_perturbations=False,
    vm_qpqs_files=False,
    ignore_vm_qpqs_files=False,
    keep_dup_station=True,
    components=None,
    logger: Logger = get_basic_logger(),
):

    config_dict = utils.load_yaml(
        os.path.join(
            platform_config[PLATFORM_CONFIG.TEMPLATES_DIR.name],
            "gmsim",
            version,
            ROOT_DEFAULTS_FILE_NAME,
        )
    )
    # Load variables from cybershake config

    v1d_full_path = os.path.join(
        platform_config[PLATFORM_CONFIG.VELOCITY_MODEL_DIR.name],
        "Mod-1D",
        config_dict.get("v_1d_mod"),
    )
    site_v1d_dir = config_dict.get("site_v1d_dir")
    hf_stat_vs_ref = config_dict.get("hf_stat_vs_ref")

    vs30_file_path = stat_file_path.replace(".ll", ".vs30")
    vs30ref_file_path = stat_file_path.replace(".ll", ".vs30ref")

    # this variable has to be empty
    # TODO: fix this legacy issue, very low priority
    event_name = ""

    # get all srf from source
    srf_dir = simulation_structure.get_srf_dir(root_folder, fault_name)

    list_srf = glob.glob(os.path.join(srf_dir, "*_REL*.srf"))
    if len(list_srf) == 0:
        list_srf = glob.glob(os.path.join(srf_dir, "*.srf"))

    list_srf.sort()
    if n_rel is not None and len(list_srf) != n_rel:
        message = (
            "Error: fault {} failed. Number of realisations do "
            "not match number of SRF files".format(fault_name)
        )
        logger.log(NOPRINTCRITICAL, message)
        raise RuntimeError(message)

    # Get & validate velocity model directory
    vel_mod_dir = simulation_structure.get_fault_VM_dir(root_folder, fault_name)
    valid_vm, message = validate_vm.validate_vm(vel_mod_dir, srf=list_srf[0])
    if not valid_vm:
        message = "Error: VM {} failed {}".format(fault_name, message)
        logger.log(NOPRINTCRITICAL, message)
        raise RuntimeError(message)
    # Load the variables from vm_params.yaml
    vm_params_path = os.path.join(vel_mod_dir, VM_PARAMS_FILE_NAME)
    vm_params_dict = utils.load_yaml(vm_params_path)
    yes_model_params = (
        False  # statgrid should normally be already generated with Velocity Model
    )

    sim_root_dir = simulation_structure.get_runs_dir(root_folder)
    fault_yaml_path = simulation_structure.get_fault_yaml_path(sim_root_dir, fault_name)
    root_yaml_path = simulation_structure.get_root_yaml_path(sim_root_dir)
    for srf in list_srf:
        logger.info("Installing {}".format(srf))
        # try to match find the stoch with same basename
        realisation_name = os.path.splitext(os.path.basename(srf))[0]
        stoch_file_path = simulation_structure.get_stoch_path(
            root_folder, realisation_name
        )
        sim_params_file = simulation_structure.get_source_params_path(
            root_folder, realisation_name
        )

        if not os.path.isfile(stoch_file_path):
            message = "Error: Corresponding Stoch file is not found: {}".format(
                stoch_file_path
            )
            logger.log(NOPRINTCRITICAL, message)
            raise RuntimeError(message)

        # install pairs one by one to fit the new structure
        sim_dir = simulation_structure.get_sim_dir(root_folder, realisation_name)

        (root_params_dict, fault_params_dict, sim_params_dict) = install_simulation(
            version=version,
            sim_dir=sim_dir,
            rel_name=realisation_name,
            run_dir=sim_root_dir,
            vel_mod_dir=vel_mod_dir,
            srf_file=srf,
            stoch_file=stoch_file_path,
            stat_file_path=stat_file_path,
            vs30_file_path=vs30_file_path,
            vs30ref_file_path=vs30ref_file_path,
            yes_statcords=False,
            fault_yaml_path=fault_yaml_path,
            root_yaml_path=root_yaml_path,
            cybershake_root=root_folder,
            site_v1d_dir=site_v1d_dir,
            hf_stat_vs_ref=hf_stat_vs_ref,
            v1d_full_path=v1d_full_path,
            sim_params_file=sim_params_file,
            seed=seed,
            logger=logger,
            extended_period=extended_period,
            vm_perturbations=vm_perturbations,
            ignore_vm_perturbations=ignore_vm_perturbations,
            vm_qpqs_files=vm_qpqs_files,
            ignore_vm_qpqs_files=ignore_vm_qpqs_files,
            components=components,
        )

        if (
            root_params_dict is None
            or fault_params_dict is None
            or sim_params_dict is None
        ):
            # Something has gone wrong, returning without saving anything
            logger.critical(f"Critical Error some params dictionary are None")
            return

        if root_params_dict is not None and not isclose(
            vm_params_dict["flo"], root_params_dict["flo"]
        ):
            logger.critical(
                "The parameter 'flo' does not match in the VM params and root params files. "
                "Please ensure you are installing the correct gmsim version"
            )
            return

        create_mgmt_db.create_mgmt_db(
            [], simulation_structure.get_mgmt_db(root_folder), srf_files=srf
        )
        utils.setup_dir(os.path.join(root_folder, "mgmt_db_queue"))
        root_params_dict["mgmt_db_location"] = root_folder

        # Generate the fd files, create these at the fault level
        fd_statcords, fd_statlist = generate_fd_files(
            simulation_structure.get_fault_dir(root_folder, fault_name),
            vm_params_dict,
            stat_file=stat_file_path,
            logger=logger,
            keep_dup_station=keep_dup_station,
        )

        fault_params_dict[FaultParams.stat_coords.value] = fd_statcords
        fault_params_dict[FaultParams.FD_STATLIST.value] = fd_statlist

        #     root_params_dict['hf_stat_vs_ref'] = cybershake_cfg['hf_stat_vs_ref']
        dump_all_yamls(sim_dir, root_params_dict, fault_params_dict, sim_params_dict)

        # test if the params are accepted by steps HF and BB
        sim_params = utils.load_sim_params(os.path.join(sim_dir, "sim_params.yaml"))
        # check hf

        # temporary change the script name to hf_sim, due to how error message are shown
        main_script_name = sys.argv[0]
        sys.argv[0] = "hf_sim.py"

        command_template, add_args = hf_gen_command_template(
            sim_params, list(HPC)[0].name, seed
        )
        run_command = gen_args_cmd(
            ProcessType.HF.command_template, command_template, add_args
        )
        hf_args_parser(cmd=run_command)

        # check bb
        sys.argv[0] = "bb_sim.py"

        command_template, add_args = bb_gen_command_template(sim_params)
        run_command = gen_args_cmd(
            ProcessType.BB.command_template, command_template, add_args
        )
        bb_args_parser(cmd=run_command)
        # change back, to prevent unexpected error
        sys.argv[0] = main_script_name
Esempio n. 10
0
def setup_module(module):
    utils.setup_dir(os.path.dirname(TEST_DB_FILE))