def collect_simulation_params(args): """ This function collects simulation-wide parameters """ # Get paths to one xml and a SRC file first_realization = args.realizations[0] xml_dir = os.path.join(args.input_dir, "Xml") xml_files = glob.glob("%s/*.xml" % (xml_dir)) xml_path = os.path.join(xml_dir, xml_files[0]) src_dir = os.path.join(args.top_level_indir, args.realizations[0]) src_files = glob.glob("%s/*.src" % (src_dir)) src_path = os.path.join(src_dir, src_files[0]) html_dir = os.path.join(args.top_level_outdir, args.realizations[0]) html_file = glob.glob("%s/*.html" % (html_dir))[0] # Get simulation method from html file args.general_method = get_method_from_html(html_file).lower() # Parse SRC and get magnitude src_keys = bband_utils.parse_src_file(src_path) args.general_magnitude = src_keys["magnitude"] # Parse XML file workflow_obj = xml_handler.parse_xml(xml_path) args.bbp_software_info_version = str(workflow_obj.version) modules = [] for item in workflow_obj.workflow: modules.append(str(item.getName())) args.bbp_software_info_modules = modules if "WccSiteamp" in modules: args.bbp_software_info_site = "GP2014" else: args.bbp_software_info_site = "None" args.general_eqid = "-999"
def calculate_distances(src_files, site): """ Calculate Rrup, Rjb, Rx using multiple SRC files """ rrup = 10000000 rjb = 10000000 rx = 10000000 for src_file in src_files: src_keys = bband_utils.parse_src_file(src_file) origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) my_rjb, my_rrup, my_rx = putils.DistanceToSimpleFaultSurface( site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) rjb = min(my_rjb, rjb) rrup = min(my_rrup, rrup) rx = min(my_rx, rx) return rrup, rjb, rx
def __init__(self, a_srcname=None): # Get pointers to all directories install = InstallCfg.getInstance() # Parse SRC File if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname) # # Name and Path to executables self.R_UC_DECON_EXE = "deconvBBP" self.A_UC_DECON_EXE = os.path.join(install.A_UCSB_BIN_DIR, self.R_UC_DECON_EXE) self.R_SLL2XY = "statLL2XY" self.A_SLL2XY = os.path.join(install.A_UCSB_BIN_DIR, self.R_SLL2XY) self.R_STITCH = "stitchBBP" self.A_STITCH = os.path.join(install.A_UCSB_BIN_DIR, self.R_STITCH) # # Define name used when input station file is converted into a UC lat/lon version # of the station file # self.R_UC_STATION_FILE = "uc_stations.ll" self.R_UC_VS30_FILE = "stations.vs30" self.COMPS = ['000', '090', 'ver']
def run(self): """ Do all steps needed for creating the ratio of maximum to median response across orientations (RotD100/RotD50) """ print("RotD100".center(80, '-')) # Initialize install = install_cfg.InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id), "%d.rotd100_%s.log" % (sim_id, sta_base)) a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id)) a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_tmpdir_seis = os.path.join(install.A_TMP_DATA_DIR, str(sim_id), "obs_seis_%s" % (sta_base)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_validation_outdir = os.path.join(a_outdir, "validations", "baker_rd100") # # Make sure the tmp and out directories exist # bband_utils.mkdirs([a_indir, a_tmpdir, a_tmpdir_seis, a_outdir, a_validation_outdir], print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(a_indir, self.r_srcfile) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Calculate RotD100/RotD50 for simulated seismograms self.calculate_simulated(a_statfile, a_tmpdir, a_outdir, a_validation_outdir) # Calculate RotD100/RotD50 for observation seismograms self.calculate_observations(a_indir, a_statfile, a_tmpdir_seis, a_validation_outdir) # Calculate ratios for simulated and observation data self.calculate_ratios(a_statfile, a_validation_outdir) # Generate comparison data table self.calculate_residuals(a_statfile, a_validation_outdir) # Generate bias plot showing the comparison between # simulations and observations self.generate_plot(a_statfile, a_validation_outdir) # All done! print("RotD100 Completed".center(80, '-'))
def run_validation(self): """ Do all steps needed for creating the ratio of maximum to median response across orientations (RotD100/RotD50) """ print("RotDXX".center(80, '-')) # Initialize install = install_cfg.InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id), "%d.rotd100_%s.log" % (sim_id, sta_base)) a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id)) a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_tmpdir_seis = os.path.join(install.A_TMP_DATA_DIR, str(sim_id), "obs_seis_%s" % (sta_base)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_validation_outdir = os.path.join(a_outdir, "validations", "baker_rd100") # # Make sure the tmp and out directories exist # bband_utils.mkdirs( [a_indir, a_tmpdir, a_tmpdir_seis, a_outdir, a_validation_outdir], print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(a_indir, self.r_srcfile) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Calculate RotD100/RotD50 for simulated seismograms self.calculate_simulated(a_statfile, a_tmpdir, a_outdir, a_validation_outdir) # Calculate RotD100/RotD50 for observation seismograms self.calculate_observations(a_indir, a_statfile, a_tmpdir_seis, a_validation_outdir) # Calculate ratios for simulated and observation data self.calculate_ratios(a_statfile, a_validation_outdir) # Generate comparison data table self.calculate_residuals(a_statfile, a_validation_outdir) # Generate bias plot showing the comparison between # simulations and observations self.generate_plot(a_statfile, a_validation_outdir) # All done! print("RotDXX Completed".center(80, '-'))
def __init__(self, a_srcname=None): """ Sets basic class parameters, then parses a_srcname for more information. """ self.VS = 3.5 self.DT = 0.025 self.DENS = 2.7 self.GENSRF = "gen_srf" if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname)
def __init__(self, a_srcname=None): """ Sets basic class parameters, then parses a_srcname for more information. """ self.VS = 3.5 self.DT = 0.025 self.DENS = 2.7 self.GENSRF = "gen_srf" if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname)
def __init__(self, a_srcname=None): """ Sets basic class parameters, then parses a_srcname for more information. """ # Available options 'tri', 'rec', 'pliu', 'etinti' self.svf_type = 'etinti' self.svf_dt = 0.1 if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname)
def __init__(self, a_srcfiles): """ Sets basic class parameters, then parses a_srcname for more information. """ # Available options 'tri', 'rec', 'pliu', 'etinti' self.svf_type = 'etinti' self.svf_dt = 0.1 self.num_srcfiles = len(a_srcfiles) self.seg_hypocenter = 0 self.CFGDICT = [] for a_srcname in a_srcfiles: self.CFGDICT.append(bband_utils.parse_src_file(a_srcname))
def __init__(self, a_srcname=None): """ Set up parameters for ExSim """ self.MAX_STATIONS = 300 self.KAPPA = 0.04 self.STRESS = 150.0 self.PARAM_FILE = "EXSIM12.params" self.EMPIRICAL_AMPS = "empirical_amps.txt" if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname) self.convert_to_exsim()
def __init__(self, vmodel_name, a_srcname=None): install = InstallCfg.getInstance() # # Name and Path to executable # self.R_UC_FFSP_EXE = "ffsp_v2" self.A_UC_FFSP_EXE = os.path.join(install.A_UCSB_BIN_DIR, self.R_UC_FFSP_EXE) self.FFSP_OUTPUT_PREFIX = "FFSP_OUTPUT" self.FMAX = 50.0 # Nyquist -- use 50 for 100Hz vmodel_obj = velocity_models.get_velocity_model_by_name(vmodel_name) if vmodel_obj is None: raise IndexError("Cannot find velocity model: %s" % (vmodel_name)) vmodel_params = vmodel_obj.get_codebase_params('ucsb') # Configure DT based on information from velocity model if 'GF_DT' in vmodel_params: self.DT = float(vmodel_params['GF_DT']) else: raise KeyError("%s parameter missing in velocity model %s" % ("GF_DT", vmodel_name)) # Other region-specific parameters if 'RV_AVG' in vmodel_params: self.RV_AVG = float(vmodel_params['RV_AVG']) else: self.RV_AVG = 2.5 if 'TP_TR' in vmodel_params: self.TP_TR = float(vmodel_params['TP_TR']) else: self.TP_TR = 0.1 if 'LF_VELMODEL' in vmodel_params: self.A_UC_LF_VELMODEL = os.path.join(vmodel_obj.base_dir, vmodel_params['LF_VELMODEL']) else: raise KeyError("%s parameter missing in velocity model %s" % ("LF_VELMODEL", vmodel_name)) if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname) # RV_AVG is optional! # If SRC file has it, it overrides the region and the default values if "rv_avg" in self.CFGDICT: self.RV_AVG = self.CFGDICT["rv_avg"]
def run(self): """ Calculate GMPEs, create bias plot comparisons """ print("Calculate GMPE".center(80, '-')) # Initialize basic variables install = InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) # Input, tmp, and output directories a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_outdir_gmpe = os.path.join(a_outdir, "gmpe_data_%s" % (sta_base)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) self.log = os.path.join(a_logdir, "%d.gmpe_compare.log" % (sim_id)) # # Make sure the output and tmp directories exist # dirs = [a_outdir_gmpe, a_outdir, a_logdir] bband_utils.mkdirs(dirs, print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_src_file) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) slo = StationList(a_statfile) site_list = slo.getStationList() # Go through each station, and print comparison headers for # the first station we process for site in site_list: stat = site.scode print("==> Calculating GMPE for station: %s" % (stat)) output_file = os.path.join(a_outdir_gmpe, "%s-gmpe.ri50" % (stat)) self.calculate_gmpe(site, output_file) # All done print("Calculate GMPE Completed".center(80, '-'))
def __init__(self, a_srcfiles): """ Sets basic class parameters, then parses a_srcnames for more information. """ self.VS = 3.5 self.DT = 0.025 self.DENS = 2.7 self.VEL_RUP_FRAC = 0.8 self.GENSRF = "gen_srf" self.GENSRFSEGMENT = "gen_srf_segment" self.SUMSEG = "sum_seg" self.CFGDICT = [] self.num_srcfiles = len(a_srcfiles) for a_srcfile in a_srcfiles: self.CFGDICT.append(bband_utils.parse_src_file(a_srcfile))
def run(self): """ Calculate GMPEs, create bias plot comparisons """ print("Calculate GMPE".center(80, '-')) # Initialize basic variables install = InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) # Input, tmp, and output directories a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_outdir_gmpe = os.path.join(a_outdir, "gmpe_data_%s" % (sta_base)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) self.log = os.path.join(a_logdir, "%d.gmpe_compare.log" % (sim_id)) # # Make sure the output and tmp directories exist # dirs = [a_outdir_gmpe, a_outdir, a_logdir] bband_utils.mkdirs(dirs, print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_src_file) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) slo = StationList(a_statfile) site_list = slo.getStationList() # Go through each station, and print comparison headers for # the first station we process for site in site_list: stat = site.scode print("==> Calculating GMPE for station: %s" % (stat)) output_file = os.path.join(a_outdir_gmpe, "%s-gmpe.ri50" % (stat)) self.calculate_gmpe(site, output_file) # All done print("Calculate GMPE Completed".center(80, '-'))
def __init__(self, i_r_stations, i_r_srcfile, plot_vel, plot_acc, sim_id=0): """ Initialize basic class parameters """ self.r_stations = i_r_stations self.plot_vel = plot_vel self.plot_acc = plot_acc self.sim_id = sim_id install = InstallCfg.getInstance() a_indir = os.path.join(install.A_IN_DATA_DIR, str(self.sim_id)) if i_r_srcfile is not None and i_r_srcfile != "": i_a_srcfile = os.path.join(a_indir, i_r_srcfile) self.src_keys = bband_utils.parse_src_file(i_a_srcfile) else: self.src_keys = None
def __init__(self, a_srcname=None): """ Sets basic class parameters, then parses a_srcname for more information """ # User defined parms self.SLIP_SIGMA = 0.85 # This is now the default inside genslip-3.3, so don't need to use it # self.RAND_RAKE_RANGE = 60 self.RTDEP = 6.5 self.RTDEP_RANGE = 1.5 self.MEAN_RVFAC = 0.8 self.RANGE_RVFAC = 0.05 self.SHAL_VRUP = 0.6 # Default RISETIME_COEF set for western US simulations, # override in velocity model config file. This parameter used # to be set to 1.6, but was modified by RWG in November 2013 # when the Rupture Generator was updated to version 3.3. The # value was reset to 1.6 for Genslip 5.0.1 self.RISETIME_COEF = 1.6 # self.EXTRA_RTFAC = 0.0 self.RISETIME_FAC = 2 self.RT_SCALEFAC = 1 self.RT_RAND = 0 # As in genslip-3.3, we are using 'Mliu' stype, which is the default # self.STYPE = "ucsb" # Extra parameters in genslip-3.3, updated for genslip-5.0.1 self.SLIP_WATER_LEVEL = -1 self.DEEP_RISETIMEDEP = 17.5 self.DEEP_RISETIMEDEP_RANGE = 2.5 self.DEEP_RISETIME_FAC = 2.0 # Read SRC FILE if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname)
def __init__(self, a_srcname=None): """ Sets basic class parameters, then parses a_srcname for more information """ # User defined parms self.SLIP_SIGMA = 0.85 # This is now the default inside genslip-3.3, so don't need to use it # self.RAND_RAKE_RANGE = 60 self.RTDEP = 6.5 self.RTDEP_RANGE = 1.5 self.MEAN_RVFAC = 0.8 self.RANGE_RVFAC = 0.05 self.SHAL_VRUP = 0.6 # Default RISETIME_COEF set for western US simulations, # override in velocity model config file. This parameter used # to be set to 1.6, but was modified by RWG in November 2013 # when the Rupture Generator was updated to version 3.3. The # value was reset to 1.6 for Genslip 5.0.1 self.RISETIME_COEF = 1.6 # self.EXTRA_RTFAC = 0.0 self.RISETIME_FAC = 2 self.RT_SCALEFAC = 1 self.RT_RAND = 0 # As in genslip-3.3, we are using 'Mliu' stype, which is the default # self.STYPE = "ucsb" # Extra parameters in genslip-3.3, updated for genslip-5.0.1 self.SLIP_WATER_LEVEL = -1 self.DEEP_RISETIMEDEP = 17.5 self.DEEP_RISETIMEDEP_RANGE = 2.5 self.DEEP_RISETIME_FAC = 2.0 # Read SRC FILE if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname)
def __init__(self, i_r_stations, i_r_srcfile, plot_vel, plot_acc, sim_id=0): """ Initialize basic class parameters """ self.r_stations = i_r_stations self.plot_vel = plot_vel self.plot_acc = plot_acc self.sim_id = sim_id install = InstallCfg.getInstance() a_indir = os.path.join(install.A_IN_DATA_DIR, str(self.sim_id)) if i_r_srcfile is not None and i_r_srcfile != "": i_a_srcfile = os.path.join(a_indir, i_r_srcfile) self.src_keys = bband_utils.parse_src_file(i_a_srcfile) else: self.src_keys = None
station_list = sys.argv[1] src_file = sys.argv[2] sim_id_1 = int(sys.argv[3]) sim_id_2 = int(sys.argv[4]) output_dir = sys.argv[5] # Create directory paths install = InstallCfg.getInstance() config = GPGofCfg() a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id_1)) a_outdir1 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_1)) a_outdir2 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_2)) # Src file a_srcfile = os.path.join(a_indir, src_file) src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(a_indir, station_list) slo = StationList(a_statfile) site_list = slo.getStationList() # Capture event_label bias_file = glob.glob("%s%s*.bias" % (a_outdir1, os.sep)) if len(bias_file) < 1: raise bband_utils.ProcessingError("Cannot find event label!") bias_file = bias_file[0] # Let's capture the event label event_label = os.path.basename(bias_file).split("-")[0] print_header_rd50 = 1
def run(self): """ This function in the main entry point for this module. It runs the gp_gof component. """ print("GP GoF".center(80, '-')) # Initialize basic variables self.install = InstallCfg.getInstance() self.config = GPGofCfg() install = self.install config = self.config sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id), "%d.gp_gof.log" % (sim_id)) # Input, tmp, and output directories a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_outdir_seis = os.path.join(install.A_OUT_DATA_DIR, str(sim_id), "obs_seis_%s" % (sta_base)) a_outdir_gmpe = os.path.join(install.A_OUT_DATA_DIR, str(sim_id), "gmpe_data_%s" % (sta_base)) # Source file, parse it! a_srcfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_srcfile) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) # List of observed seismogram files filelist = os.listdir(a_outdir_seis) slo = StationList(a_statfile) site_list = slo.getStationList() # check cutoff value if self.max_cutoff is None: self.max_cutoff = config.MAX_CDST print_header_rd50 = 1 # Remove rd50 resid file rd50_resid_output = os.path.join(a_outdir, "%s-%d.rd50-resid.txt" % (self.comp_label, sim_id)) if os.path.exists(rd50_resid_output): os.remove(rd50_resid_output) for site in site_list: slon = float(site.lon) slat = float(site.lat) stat = site.scode # Now process rd50 files expected_rd50_file = os.path.join(a_outdir, "%d.%s.rd50" % (sim_id, stat)) if not os.path.exists(expected_rd50_file): # just skip it print("Skipping rotd50/psa5 for station %s..." % (stat)) continue # See if the rd50 file exist for comparison. If it doesn't # exist, skip this station rd50_file = None if ("%s.rd50" % (stat)) in filelist: rd50_file = "%s.rd50" % (stat) else: # Skip this station continue # Calculate Rrup origin = (self.src_keys['lon_top_center'], self.src_keys['lat_top_center']) dims = (self.src_keys['fault_length'], self.src_keys['dlen'], self.src_keys['fault_width'], self.src_keys['dwid'], self.src_keys['depth_to_top']) mech = (self.src_keys['strike'], self.src_keys['dip'], self.src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) # Create path names and check if their sizes are within bounds datafile1 = os.path.join(a_outdir_seis, rd50_file) simfile1 = os.path.join(a_outdir, "%d.%s.rd50" % (sim_id, stat)) outfile = os.path.join(a_outdir, "%s-%d.rd50-resid.txt" % (self.comp_label, self.sim_id)) bband_utils.check_path_lengths([datafile1, simfile1, outfile], bband_utils.GP_MAX_FILENAME) cmd = ("%s/gen_resid_tbl_3comp bbp_format=1 " % (install.A_GP_BIN_DIR) + "datafile1=%s simfile1=%s " % (datafile1, simfile1) + "comp1=psa5n comp2=psa5e comp3=rotd50 " + "eqname=%s mag=%s stat=%s lon=%.4f lat=%.4f " % (self.comp_label, self.mag, stat, slon, slat) + "vs30=%d cd=%.2f " % (site.vs30, rrup) + "flo=%f fhi=%f " % (site.low_freq_corner, site.high_freq_corner) + "print_header=%d >> %s 2>> %s" % (print_header_rd50, outfile, self.log)) bband_utils.runprog(cmd, abort_on_error=True, print_cmd=False) # Only need to print header the first time if print_header_rd50 == 1: print_header_rd50 = 0 # Finished per station processing, now summarize and plot the data if os.path.exists(rd50_resid_output): self.summarize_rotd50(site_list, a_outdir, a_outdir_gmpe) print("GP GoF Completed".center(80, '-'))
def create_resid_data_file(comp_label, input_indir, input_obsdir, combined_file, temp_dir): """ This function creates a file containing the combined residuals from the simulation data from all stations """ # Copy header for first file, set logfile copy_header = 1 logfile = os.path.join(temp_dir, "log.txt") # Figure out where out binaries are if "BBP_DIR" in os.environ: install_root = os.path.normpath(os.environ["BBP_DIR"]) else: raise bband_utils.ProcessingError("BBP_DIR is not set!") gp_bin_dir = os.path.join(install_root, "src", "gp", "bin") # Get realizations realizations = sorted(os.listdir(input_indir)) one_realization = realizations[0] basedir = os.path.join(input_indir, one_realization) # Get the station list a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep)) if len(a_statfile) != 1: raise bband_utils.ProcessingError("Cannot get station list!") a_statfile = a_statfile[0] slo = StationList(a_statfile) site_list = slo.getStationList() # Get source file a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep)) if len(a_srcfile) == 0: raise bband_utils.ProcessingError("Cannot get src file!") a_srcfile = a_srcfile[0] # Parse it! src_keys = bband_utils.parse_src_file(a_srcfile) # Get the obsdir realizations = sorted(os.listdir(input_obsdir)) one_realization = realizations[0] basedir = os.path.join(input_obsdir, one_realization) obs_dir = glob.glob("%s%sobs_seis*" % (basedir, os.sep)) if len(obs_dir) != 1: raise bband_utils.ProcessingError("Cannot get observation dir!") obs_dir = obs_dir[0] # Go through all stations for site in site_list: slon = float(site.lon) slat = float(site.lat) stat = site.scode # Calculate Rrup origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface( site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) simfile1 = os.path.join(temp_dir, "%s.rd50" % (stat)) datafile1 = os.path.join(obs_dir, "%s.rd50" % (stat)) cmd = ("%s bbp_format=1 " % (os.path.join(gp_bin_dir, "gen_resid_tbl_3comp")) + "datafile1=%s simfile1=%s " % (datafile1, simfile1) + "comp1=psa5n comp2=psa5e comp3=rotd50 " + "eqname=%s mag=0.0 stat=%s lon=%.4f lat=%.4f " % (comp_label, stat, slon, slat) + "vs30=%d cd=%.2f " % (site.vs30, rrup) + "flo=%f fhi=%f " % (site.low_freq_corner, site.high_freq_corner) + "print_header=%d >> %s 2>> %s" % (copy_header, combined_file, logfile)) bband_utils.runprog(cmd, abort_on_error=True) if copy_header == 1: copy_header = 0
def run(self): """ Calculate GMPEs, create bias plot comparisons """ print("GMPE Comparison".center(80, '-')) # Initialize basic variables install = InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) # Input, tmp, and output directories a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_tmpdir_seis = os.path.join(install.A_TMP_DATA_DIR, str(sim_id), "obs_seis_%s" % (sta_base)) a_outdir_gmpe = os.path.join(install.A_OUT_DATA_DIR, str(sim_id), "gmpe_data_%s" % (sta_base)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) self.log = os.path.join(a_logdir, "%d.gmpe_compare.log" % (sim_id)) # # Make sure the output and tmp directories exist # dirs = [a_tmpdir, a_tmpdir_seis, a_outdir_gmpe, a_outdir, a_logdir] bband_utils.mkdirs(dirs, print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_src_file) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) slo = StationList(a_statfile) site_list = slo.getStationList() # Go through each station, and print comparison headers for # the first station we process print_headers = True gmpe_models = [] for site in site_list: stat = site.scode obs_file = os.path.join(a_tmpdir_seis, "%s.rd50" % (stat)) gmpe_file = os.path.join(a_outdir_gmpe, "%s-gmpe.ri50" % (stat)) # Skip station if we don't have observation file if not os.access(obs_file, os.R_OK): continue gmpe_data, gmpe_models[:] = self.read_gmpe(gmpe_file) obs_periods, obs_data = self.read_rotd50(obs_file) # Loop through the NGA methods for gmpe_model in gmpe_models: resid_file = os.path.join( a_outdir_gmpe, "%s-%d.resid.txt" % (gmpe_model.lower(), sim_id)) period_set = self.calculate_residuals(site, gmpe_model, gmpe_data, obs_periods, obs_data, resid_file, print_headers) print_headers = False for gmpe_model in gmpe_models: # Now call the resid2uncer_varN program to summarize the # residuals and create the files needed for the GOF plot resid_file = os.path.join( a_outdir_gmpe, "%s-%d.resid.txt" % (gmpe_model.lower(), sim_id)) fileroot = os.path.join( a_outdir, "%s-GMPE-%d_r%d-all-rd50-%s" % (self.comp_label, sim_id, 0, gmpe_model.lower())) cmd = ("%s/resid2uncer_varN " % (install.A_GP_BIN_DIR) + "residfile=%s fileroot=%s " % (resid_file, fileroot) + "comp=%s nstat=%d nper=%d " % (gmpe_model.lower(), len(site_list), len(period_set)) + "min_cdst=%d >> %s 2>&1" % (0, self.log)) bband_utils.runprog(cmd, abort_on_error=True, print_cmd=False) # Plot GOF plot gmpe_group = gmpe_config.GMPES[self.gmpe_group_name] gmpe_labels = gmpe_group["labels"] plotter = PlotGoF() plottitle = "Comparison between GMPEs and %s" % (self.comp_label) fileroot = "%s-GMPE-%d_r%d-all-rd50-" % (self.comp_label, sim_id, 0) dataroot = [ "%s%s" % (fileroot, model.lower()) for model in gmpe_models ] plotter.multi_plot(plottitle, dataroot, a_outdir, a_outdir, gmpe_labels, len(site_list)) print("GMPE Comparison Completed".center(80, '-'))
def create_resid_data_file(comp_label, input_indir, input_obsdir, combined_file, temp_dir): """ This function creates a file containing the combined residuals from the simulation data from all stations """ # Copy header for first file, set logfile if os.path.isfile(combined_file): # But not, if file already exists copy_header = 0 else: copy_header = 1 logfile = os.path.join(temp_dir, "log.txt") # Figure out where out binaries are if "BBP_DIR" in os.environ: install_root = os.path.normpath(os.environ["BBP_DIR"]) else: raise bband_utils.ProcessingError("BBP_DIR is not set!") gp_bin_dir = os.path.join(install_root, "src", "gp", "bin") # Get realizations realizations = sorted(os.listdir(input_indir)) one_realization = realizations[0] basedir = os.path.join(input_indir, one_realization) # Get the station list a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep)) if len(a_statfile) != 1: raise bband_utils.ProcessingError("Cannot get station list!") a_statfile = a_statfile[0] slo = StationList(a_statfile) site_list = slo.getStationList() # Get source file a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep)) if len(a_srcfile) != 1: raise bband_utils.ProcessingError("Cannot get src file!") a_srcfile = a_srcfile[0] # Parse it! src_keys = bband_utils.parse_src_file(a_srcfile) # Get the obsdir print input_obsdir realizations = sorted(os.listdir(input_obsdir)) one_realization = realizations[0] basedir = os.path.join(input_obsdir, one_realization) obs_dir = glob.glob("%s%sobs_seis*" % (basedir, os.sep)) if len(obs_dir) != 1: raise bband_utils.ProcessingError("Cannot get observation dir!") obs_dir = obs_dir[0] # Go through all stations for site in site_list: slon = float(site.lon) slat = float(site.lat) stat = site.scode # Calculate Rrup origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) simfile1 = os.path.join(temp_dir, "%s.rd50" % (stat)) datafile1 = os.path.join(obs_dir, "%s.rd50" % (stat)) cmd = ("%s/gen_resid_tbl_3comp bbp_format=1 " % (gp_bin_dir) + "datafile1=%s simfile1=%s " % (datafile1, simfile1) + "comp1=psa5n comp2=psa5e comp3=rotd50 " + "eqname=%s mag=0.0 stat=%s lon=%.4f lat=%.4f " % (comp_label, stat, slon, slat) + "vs30=%d cd=%.2f " % (site.vs30, rrup) + "flo=%f fhi=%f " % (site.low_freq_corner, site.high_freq_corner) + "print_header=%d >> %s 2>> %s" % (copy_header, combined_file, logfile)) bband_utils.runprog(cmd, abort_on_error=True) if copy_header == 1: copy_header = 0
def run(self): """ Run the AS16 validation for all stations """ print("AS2016".center(80, '-')) # Load configuration, set sim_id install = InstallCfg.getInstance() sim_id = self.sim_id # Build directory paths a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) a_validation_outdir = os.path.join(a_outdir, "validations", "stewart_duration_gmpe") # Make sure the output and tmp directories exist bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir, a_validation_outdir], print_cmd=False) # Now the file paths self.log = os.path.join(a_logdir, "%d.as16.log" % (sim_id)) sta_file = os.path.join(a_indir, self.stations) a_srcfile = os.path.join(a_indir, self.srcfile) # Read SRC file src_keys = bband_utils.parse_src_file(a_srcfile) # Load information from SRC file origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) # Set region to be unknown -- this has no effect in the AS16 # method as z1 is not provided and that causes dz1 to be set # to zero and override the cj parameter cj = -999 # Figure out what mechanism to use # 0 = unknown # 1 = normal # 2 = reverse # 3 = strike-slip rake = src_keys['rake'] if abs(rake) <= 30 or abs(rake) >= 150: mechanism = 3 elif rake > 30 and rake < 150: mechanism = 2 elif rake < -30 and rake > -150: mechanism = 1 else: print("Warning: unknown mechanism for rake = %f" % (rake)) mechanism = 0 # Get station list slo = StationList(sta_file) site_list = slo.getStationList() # Create output file, add header out_file = open( os.path.join(a_validation_outdir, '%d.as16.%s.txt' % (self.sim_id, self.eventname)), 'w') out_file.write("#station, rrup, vs30, sd575, sd595, sd2080," " tau575, tau595, tau2080, phi575, phi595, phi2080\n") # Go through each station for site in site_list: stat = site.scode vs30 = float(site.vs30) # Calculate Rrup site_geom = [site.lon, site.lat, 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface( site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) results = calculate_as16(src_keys['magnitude'], rrup, mechanism, vs30, -999.0, cj) out_file.write("%s, %3.2f, %3.2f" % (stat, rrup, vs30)) for piece in results: out_file.write(", %7.5f" % (piece)) out_file.write("\n") # All done, close output file out_file.close() # All done! print("AS2016 Completed".center(80, '-'))
station_list = sys.argv[1] src_file = sys.argv[2] sim_id_1 = int(sys.argv[3]) sim_id_2 = int(sys.argv[4]) output_dir = sys.argv[5] # Create directory paths install = InstallCfg.getInstance() config = GPGofCfg() a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id_1)) a_outdir1 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_1)) a_outdir2 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_2)) # Src file a_srcfile = os.path.join(a_indir, src_file) src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(a_indir, station_list) slo = StationList(a_statfile) site_list = slo.getStationList() # Capture event_label bias_file = glob.glob("%s%s*.bias" % (a_outdir1, os.sep)) if len(bias_file) < 1: raise bband_utils.ProcessingError("Cannot find event label!") bias_file = bias_file[0] # Let's capture the event label event_label = os.path.basename(bias_file).split("-")[0] print_header_rd50 = 1
def collect_realization_params(args, realization): """ Collects parameters for one realization """ indir = os.path.join(args.top_level_indir, realization) outdir = os.path.join(args.top_level_outdir, realization) src_files = glob.glob("%s/*.src" % (indir)) stl_file = glob.glob("%s/*.stl" % (indir))[0] data = {} # Compile data from SRC file(s) data["num_src"] = len(src_files) # Save info in args too for first realization if "num_src" not in args: args.num_src = len(src_files) for i, src_file in zip(range(1, len(src_files) + 1), src_files): src_index = "bbp_src_%d" % (i) src_keys = bband_utils.parse_src_file(src_file) src_keys["mechanism"] = calculate_mechanism(src_keys["rake"]) data[src_index] = src_keys # Combine SRC information data["segments_length"] = data["bbp_src_1"]["fault_length"] data["segments_width"] = data["bbp_src_1"]["fault_width"] data["segments_ztor"] = data["bbp_src_1"]["depth_to_top"] data["segments_strike"] = data["bbp_src_1"]["strike"] data["segments_rake"] = data["bbp_src_1"]["rake"] data["segments_dip"] = data["bbp_src_1"]["dip"] data["total_length"] = float(data["bbp_src_1"]["fault_length"]) data["average_strike"] = [float(data["bbp_src_1"]["strike"])] data["average_rake"] = [float(data["bbp_src_1"]["rake"])] data["average_dip"] = [float(data["bbp_src_1"]["dip"])] data["average_width"] = [float(data["bbp_src_1"]["fault_width"])] data["average_ztor"] = [float(data["bbp_src_1"]["depth_to_top"])] for i in range(2, len(src_files) + 1): src_index = "bbp_src_%d" % (i) data["segments_length"] = "%s,%s" % (data["segments_length"], data[src_index]["fault_length"]) data["segments_width"] = "%s,%s" % (data["segments_width"], data[src_index]["fault_width"]) data["segments_ztor"] = "%s,%s" % (data["segments_ztor"], data[src_index]["depth_to_top"]) data["segments_strike"] = "%s,%s" % (data["segments_strike"], data[src_index]["strike"]) data["segments_rake"] = "%s,%s" % (data["segments_rake"], data[src_index]["rake"]) data["segments_dip"] = "%s,%s" % (data["segments_dip"], data[src_index]["dip"]) data["total_length"] = (data["total_length"] + float(data[src_index]["fault_length"])) data["average_strike"].append(data[src_index]["strike"]) data["average_rake"].append(data[src_index]["rake"]) data["average_dip"].append(data[src_index]["dip"]) data["average_width"].append(data[src_index]["fault_width"]) data["average_ztor"].append(data[src_index]["depth_to_top"]) data["average_strike"] = np.average(data["average_strike"]) data["average_rake"] = np.average(data["average_rake"]) data["average_dip"] = np.average(data["average_dip"]) data["average_width"] = np.average(data["average_width"]) data["average_ztor"] = np.average(data["average_ztor"]) data["average_mechanism"] = calculate_mechanism(data["average_rake"]) # Get velocity model data html_file = glob.glob("%s/*.html" % (outdir))[0] data["vmodel_name"] = get_vmodel_from_html(html_file) vel_obj = velocity_models.get_velocity_model_by_name(data["vmodel_name"]) if vel_obj is None: print("ERROR: Cannot find velocity model %s!" % (data["vmodel_name"])) sys.exit(-1) if args.general_method in ["gp", "sdsu", "song"]: vmodel_params = vel_obj.get_codebase_params('gp') vmodel_file = vel_obj.get_velocity_model('gp') data["gf_name"] = vmodel_params['GF_NAME'] data["vs_30"] = calculate_vs30(vmodel_file) data["gf_dt"] = float(vmodel_params['GF_DT']) elif args.general_method in ["ucsb"]: vmodel_params = vel_obj.get_codebase_params('ucsb') vmodel_file = vel_obj.get_velocity_model('ucsb') data["gf_name"] = vmodel_params['GREEN_SOIL'] data["vs_30"] = "-999" data["gf_dt"] = float(vmodel_params['GF_DT']) else: data["gf_name"] = "-888" data["vs_30"] = "-888" data["gf_dt"] = "-888" # Parse STL file slo = StationList(stl_file) site_list = slo.getStationList() station_names = [] for site in site_list: station_names.append(site.scode) data["station_names"] = station_names stations = {} for site in site_list: stations[site.scode] = {} if args.bbp_software_info_site == "None": vs_30 = data["vs_30"] elif site.vs30 is None: vs_30 = data["vs_30"] else: vs_30 = site.vs30 collect_station_params(site, stations[site.scode], src_files, args, realization, vs_30) collect_rd50_values(stations[site.scode], args) collect_rd100_values(stations[site.scode], args) # Save data data["stations"] = stations # Save realization data args.data[realization] = data
def load_all_data(comp_label, input_indir, input_obsdir, combined_file, temp_dir, component): """ This function loads all data from each station file and creates the structures needed for plotting. """ data = {} # Get realizations realizations = sorted(os.listdir(input_indir)) one_realization = realizations[0] basedir = os.path.join(input_indir, one_realization) # Get the GMPE data for the RZZ2015 metrics base_outdir = os.path.join(input_obsdir, one_realization, "validations", "rzz2015_gmpe") a_rzz2015_gmpe = glob.glob("%s%s%s.rzz2015gmpe.txt" % (base_outdir, os.sep, one_realization)) a_rzz2015_gmpe = a_rzz2015_gmpe[0] # Get the station list a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep)) if len(a_statfile) != 1: raise bband_utils.ProcessingError("Cannot get station list!") a_statfile = a_statfile[0] slo = StationList(a_statfile) site_list = slo.getStationList() # Get source file a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep)) if len(a_srcfile) != 1: raise bband_utils.ProcessingError("Cannot get src file!") a_srcfile = a_srcfile[0] # Parse it! src_keys = bband_utils.parse_src_file(a_srcfile) # Go through all stations for site in site_list: slon = float(site.lon) slat = float(site.lat) stat = site.scode # Calculate Rrup origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) # Read data for this station data_file = os.path.join(temp_dir, "%s.rzz2015" % (stat)) data[stat] = {} data[stat]["dist"] = rrup data[stat]["r1"] = [] data[stat]["r2"] = [] data[stat]["r3"] = [] data[stat]["r4"] = [] data[stat]["r5"] = [] data[stat]["r1_obs"] = None data[stat]["r2_obs"] = None data[stat]["r3_obs"] = None data[stat]["r4_obs"] = None data[stat]["r5_obs"] = None data[stat]["r1_gmpe"] = None data[stat]["r2_gmpe"] = None data[stat]["r3_gmpe"] = None data[stat]["r4_gmpe"] = None data[stat]["r5_gmpe"] = None in_file = open(data_file, 'r') for line in in_file: line = line.strip() if line.startswith("#"): # Skip comments continue pieces = line.split(",") comp = pieces[1].strip() # Check if we want this component if component != "both": if comp != component: # Skip continue # We want this data point pieces = pieces[2:] pieces = [float(piece) for piece in pieces] # Get observation values if data[stat]["r1_obs"] is None: data[stat]["r1_obs"] = pieces[6] if data[stat]["r2_obs"] is None: data[stat]["r2_obs"] = pieces[8] if data[stat]["r3_obs"] is None: data[stat]["r3_obs"] = pieces[10] if data[stat]["r4_obs"] is None: data[stat]["r4_obs"] = pieces[12] if data[stat]["r5_obs"] is None: data[stat]["r5_obs"] = pieces[14] # Get simulated data values data[stat]["r1"].append(pieces[7]) data[stat]["r2"].append(pieces[9]) data[stat]["r3"].append(pieces[11]) data[stat]["r4"].append(pieces[13]) data[stat]["r5"].append(pieces[15]) in_file.close() gmpe_file = open(a_rzz2015_gmpe, 'r') for line in gmpe_file: line = line.strip() # Skip comments if line.startswith("#"): continue pieces = line.split(",") stat = pieces[0].strip() pieces = pieces[1:] pieces = [float(piece.strip()) for piece in pieces] data[stat]["r1_gmpe"] = pieces[2] data[stat]["r2_gmpe"] = pieces[3] data[stat]["r3_gmpe"] = pieces[2]/pieces[3] data[stat]["r4_gmpe"] = pieces[5] data[stat]["r5_gmpe"] = pieces[6] gmpe_file.close() # Return all data return data
def __init__(self, vmodel_name, a_srcname=None): # Get pointers to all directories install = InstallCfg.getInstance() # Parse SRC File if a_srcname: self.CFGDICT = bband_utils.parse_src_file(a_srcname) # # Name and Path to executables self.R_SLL2XY = "statLL2XY" self.A_SLL2XY = os.path.join(install.A_UCSB_BIN_DIR, self.R_SLL2XY) self.R_SRF2XY = "srfLL2XYKM" self.A_SRF2XY = os.path.join(install.A_UCSB_BIN_DIR, self.R_SRF2XY) self.R_SYN1D = "syn_1d" self.A_SYN1D = os.path.join(install.A_UCSB_BIN_DIR, self.R_SYN1D) self.R_CONV = "conv3CompBB" self.A_CONV = os.path.join(install.A_UCSB_BIN_DIR, self.R_CONV) self.R_STITCH = "stitch" self.A_STITCH = os.path.join(install.A_UCSB_BIN_DIR, self.R_STITCH) # # Define name used when input station file is converted into a # UC lat/lon version of the station file # self.R_UC_STATION_FILE = "uc_stations.ll" self.R_UC_VS30_FILE = "stations.vs30" self.R_UC_SOURCE_MODEL = "source_model.list" self.R_FFSP_FILE = "FFSP_OUTPUT.001" self.MAX_STATIONS = 300 vmodel_obj = velocity_models.get_velocity_model_by_name(vmodel_name) if vmodel_obj is None: raise IndexError("Cannot find velocity model: %s" % (vmodel_name)) vmodel_params = vmodel_obj.get_codebase_params('ucsb') # Configure needed parameters from velocity model if 'LF_VELMODEL' in vmodel_params: self.A_UC_LF_VELMODEL = os.path.join(vmodel_obj.base_dir, vmodel_params['LF_VELMODEL']) else: raise KeyError("%s parameter missing in velocity model %s" % ("LF_VELMODEL", vmodel_name)) if 'HF_VELMODEL' in vmodel_params: self.A_UC_HF_VELMODEL = os.path.join(vmodel_obj.base_dir, vmodel_params['HF_VELMODEL']) else: raise KeyError("%s parameter missing in velocity model %s" % ("HF_VELMODEL", vmodel_name)) if 'GREENBANK' in vmodel_params: self.A_UC_GREENBANK = os.path.join(vmodel_obj.base_dir, vmodel_params['GREENBANK']) else: raise KeyError("%s parameter missing in velocity model %s" % ("GREENBANK", vmodel_name)) if 'GREEN_SOIL' in vmodel_params: self.A_UC_GREEN_SOIL = os.path.join(vmodel_obj.base_dir, vmodel_params['GREEN_SOIL']) else: raise KeyError("%s parameter missing in velocity model %s" % ("GREEN_SOIL", vmodel_name)) if 'HF_GREENBANK' in vmodel_params: self.A_UC_HF_GREENBANK = os.path.join(vmodel_obj.base_dir, vmodel_params['HF_GREENBANK']) else: raise KeyError("%s parameter missing in velocity model %s" % ("HF_GREENBANK", vmodel_name)) if 'HF_GREEN_SOIL' in vmodel_params: self.A_UC_HF_GREEN_SOIL = os.path.join(vmodel_obj.base_dir, vmodel_params['HF_GREEN_SOIL']) else: raise KeyError("%s parameter missing in velocity model %s" % ("HF_GREEN_SOIL", vmodel_name)) if 'SYN1D_INP_FILE' in vmodel_params: self.A_UC_SYN1D_INP_FILE = os.path.join(vmodel_obj.base_dir, vmodel_params['SYN1D_INP_FILE']) else: raise KeyError("%s parameter missing in velocity model %s" % ("SYN1D_INP_FILE", vmodel_name))
def run(self): """ Calculate GMPEs, create bias plot comparisons """ print("GMPE Comparison".center(80, '-')) # Initialize basic variables install = InstallCfg.getInstance() sim_id = self.sim_id sta_base = os.path.basename(os.path.splitext(self.r_stations)[0]) # Input, tmp, and output directories a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_tmpdir_seis = os.path.join(install.A_TMP_DATA_DIR, str(sim_id), "obs_seis_%s" % (sta_base)) a_outdir_gmpe = os.path.join(install.A_OUT_DATA_DIR, str(sim_id), "gmpe_data_%s" % (sta_base)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) self.log = os.path.join(a_logdir, "%d.gmpe_compare.log" % (sim_id)) # # Make sure the output and tmp directories exist # dirs = [a_tmpdir, a_tmpdir_seis, a_outdir_gmpe, a_outdir, a_logdir] bband_utils.mkdirs(dirs, print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_src_file) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Station file a_statfile = os.path.join(install.A_IN_DATA_DIR, str(sim_id), self.r_stations) slo = StationList(a_statfile) site_list = slo.getStationList() # Go through each station, and print comparison headers for # the first station we process print_headers = True gmpe_models = [] for site in site_list: stat = site.scode obs_file = os.path.join(a_tmpdir_seis, "%s.rd50" % (stat)) gmpe_file = os.path.join(a_outdir_gmpe, "%s-gmpe.ri50" % (stat)) # Skip station if we don't have observation file if not os.access(obs_file, os.R_OK): continue gmpe_data, gmpe_models[:] = self.read_gmpe(gmpe_file) obs_periods, obs_data = self.read_rotd50(obs_file) # Loop through the NGA methods for gmpe_model in gmpe_models: resid_file = os.path.join(a_outdir_gmpe, "%s-%d.resid.txt" % (gmpe_model.lower(), sim_id)) period_set = self.calculate_residuals(site, gmpe_model, gmpe_data, obs_periods, obs_data, resid_file, print_headers) print_headers = False for gmpe_model in gmpe_models: # Now call the resid2uncer_varN program to summarize the # residuals and create the files needed for the GOF plot resid_file = os.path.join(a_outdir_gmpe, "%s-%d.resid.txt" % (gmpe_model.lower(), sim_id)) fileroot = os.path.join(a_outdir, "%s-GMPE-%d_r%d-all-rd50-%s" % (self.comp_label, sim_id, 0, gmpe_model.lower())) cmd = ("%s/resid2uncer_varN " % (install.A_GP_BIN_DIR) + "residfile=%s fileroot=%s " % (resid_file, fileroot) + "comp=%s nstat=%d nper=%d " % (gmpe_model.lower(), len(site_list), len(period_set)) + "min_cdst=%d >> %s 2>&1" % (0, self.log)) bband_utils.runprog(cmd, abort_on_error=True, print_cmd=False) # Plot GOF plot gmpe_group = gmpe_config.GMPES[self.gmpe_group_name] gmpe_labels = gmpe_group["labels"] plotter = PlotGoF() plottitle = "Comparison between GMPEs and %s" % (self.comp_label) fileroot = "%s-GMPE-%d_r%d-all-rd50-" % (self.comp_label, sim_id, 0) dataroot = ["%s%s" % (fileroot, model.lower()) for model in gmpe_models] plotter.multi_plot(plottitle, dataroot, a_outdir, a_outdir, gmpe_labels, len(site_list)) print("GMPE Comparison Completed".center(80, '-'))
def run(self): """ Runs the GMPEs for the six parameters in Rezaeian (2015) """ print("RZZ2015 GMPE".center(80, '-')) # Load configuration, set sim_id install = InstallCfg.getInstance() sim_id = self.sim_id # Build directory paths a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) a_validation_outdir = os.path.join(a_outdir, "validations", "rzz2015_gmpe") # Make sure the output and tmp directories exist bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir, a_validation_outdir], print_cmd=False) # Source file, parse it! a_srcfile = os.path.join(a_indir, self.srcfile) self.src_keys = bband_utils.parse_src_file(a_srcfile) # Now the file paths self.log = os.path.join(a_logdir, "%d.rzz2015gmpe.log" % (sim_id)) sta_file = os.path.join(a_indir, self.stations) # Get station list slo = StationList(sta_file) site_list = slo.getStationList() # Initialize random seed np.random.seed(int(self.src_keys['seed'])) # Create output file, add header out_file = open( os.path.join(a_validation_outdir, '%d.rzz2015gmpe.txt' % (self.sim_id)), 'w') out_file.write("#station, r_rup, vs_30," " ai_mean, d595_mean, tmid_mean," " wmid_mean, wslp_mean, zeta_mean," " ai_stddev, d595_stddev, tmid_stddev," " wmid_stddev, wslp_stddev, zeta_stddev\n") # Go through each station for site in site_list: stat = site.scode print("==> Processing station: %s" % (stat)) # Calculate Rrup origin = (self.src_keys['lon_top_center'], self.src_keys['lat_top_center']) dims = (self.src_keys['fault_length'], self.src_keys['dlen'], self.src_keys['fault_width'], self.src_keys['dwid'], self.src_keys['depth_to_top']) mech = (self.src_keys['strike'], self.src_keys['dip'], self.src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface( site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) vs30 = site.vs30 mag = self.src_keys['magnitude'] # Fault type is 1 (Reverse) unless condition below is met # Then it is 0 (Strike-pp) fault_type = 1 rake = self.src_keys['rake'] if ((rake >= -180 and rake < -150) or (rake >= -30 and rake <= 30) or (rake > 150 and rake <= 180)): fault_type = 0 #rrup = 13.94 #fault_type = 1 #vs30 = 659.6 #mag = 7.35 [ai_mean, d595_mean, tmid_mean, wmid_mean, wslp_mean, zeta_mean] = self.calculate_mean_values(rrup, vs30, mag, fault_type) # Randomize parameters using standard deviations and correlations sta_ai = [] sta_d595 = [] sta_tmid = [] sta_wmid = [] sta_wslp = [] sta_zeta = [] # Simulate number_of_samples realizations of the error # term for each parameter for _ in range(0, self.number_of_samples): # Simulate zero-mean normal correlated parameters with # stdv = sqrt(sigmai^2+taui^2) # totalerror = eps+etha=[eps1+etha1 eps2+etha2 eps3+etha3 eps4+etha4 # eps5+etha5 eps6+etha6] # mean error vector # m_totalerror = [0, 0, 0, 0, 0, 0] # Covariance matrix std1 = np.sqrt(self.sigma1**2 + self.tau1**2) std2 = np.sqrt(self.sigma2**2 + self.tau2**2) std3 = np.sqrt(self.sigma3**2 + self.tau3**2) std4 = np.sqrt(self.sigma4**2 + self.tau4**2) std5 = np.sqrt(self.sigma5**2 + self.tau5**2) std6 = np.sqrt(self.sigma6**2 + self.tau6**2) s_total_error = [ [ std1**2, std1 * std2 * self.rho_totalerror[0][1], std1 * std3 * self.rho_totalerror[0][2], std1 * std4 * self.rho_totalerror[0][3], std1 * std5 * self.rho_totalerror[0][4], std1 * std6 * self.rho_totalerror[0][5] ], [ std2 * std1 * self.rho_totalerror[1][0], std2**2, std2 * std3 * self.rho_totalerror[1][2], std2 * std4 * self.rho_totalerror[1][3], std2 * std5 * self.rho_totalerror[1][4], std2 * std6 * self.rho_totalerror[1][5] ], [ std3 * std1 * self.rho_totalerror[2][0], std3 * std2 * self.rho_totalerror[2][1], std3**2, std3 * std4 * self.rho_totalerror[2][3], std3 * std5 * self.rho_totalerror[2][4], std3 * std6 * self.rho_totalerror[2][5] ], [ std4 * std1 * self.rho_totalerror[3][0], std4 * std2 * self.rho_totalerror[3][1], std4 * std3 * self.rho_totalerror[3][2], std4**2, std4 * std5 * self.rho_totalerror[3][4], std4 * std6 * self.rho_totalerror[3][5] ], [ std5 * std1 * self.rho_totalerror[4][0], std5 * std2 * self.rho_totalerror[4][1], std5 * std3 * self.rho_totalerror[4][2], std5 * std4 * self.rho_totalerror[4][3], std5**2, std5 * std6 * self.rho_totalerror[4][5] ], [ std6 * std1 * self.rho_totalerror[5][0], std6 * std2 * self.rho_totalerror[5][1], std6 * std3 * self.rho_totalerror[5][2], std6 * std4 * self.rho_totalerror[5][3], std6 * std5 * self.rho_totalerror[5][4], std6**2 ] ] # Matlab returns upper-triangular while Python returns # lower-triangular by default -- no need to transpose later! r_total_error = np.linalg.cholesky(s_total_error) y_total_error = np.random.normal(0, 1, 6) total_error = np.dot(r_total_error, y_total_error) # Generate randomize parameters in the standardnormal space: ui u1 = (self.beta1[0] + self.beta1[1] * (mag / 7.0) + self.beta1[2] * fault_type + self.beta1[3] * math.log(rrup / 25.0) + self.beta1[4] * math.log(vs30 / 750.0)) + total_error[0] u2 = (self.beta2[0] + self.beta2[1] * mag + self.beta2[2] * fault_type + self.beta2[3] * rrup + self.beta2[4] * vs30) + total_error[1] u3 = (self.beta3[0] + self.beta3[1] * mag + self.beta3[2] * fault_type + self.beta3[3] * rrup + self.beta3[4] * vs30) + total_error[2] u4 = (self.beta4[0] + self.beta4[1] * mag + self.beta4[2] * fault_type + self.beta4[3] * rrup + self.beta4[4] * vs30) + total_error[3] u5 = (self.beta5[0] + self.beta5[1] * mag + self.beta5[2] * fault_type + self.beta5[3] * rrup + self.beta5[4] * vs30) + total_error[4] u6 = (self.beta6[0] + self.beta6[1] * mag + self.beta6[2] * fault_type + self.beta6[3] * rrup + self.beta6[4] * vs30) + total_error[5] # Transform parameters ui from standardnormal to the physical space: # thetai (constraint: tmid < d_5_95, removed) theta1 = norm.ppf(norm.cdf(u1), -4.8255, 1.4318) theta2 = 5.0 + (45 - 5) * beta.ppf(norm.cdf(u2), 1.1314, 2.4474) theta3 = 0.5 + (40 - 0.5) * beta.ppf(norm.cdf(u3), 1.5792, 3.6405) theta4 = gamma.ppf(norm.cdf(u4), 4.0982, scale=1.4330) theta5 = self.slpinv(norm.cdf(u5), 17.095, 6.7729, 4.8512, -2, 0.5) theta6 = 0.02 + (1 - 0.02) * beta.ppf(norm.cdf(u6), 1.4250, 5.7208) sta_ai.append(math.exp(theta1)) sta_d595.append(theta2) sta_tmid.append(theta3) sta_wmid.append(theta4) sta_wslp.append(theta5) sta_zeta.append(theta6) # Write output to gmpe file out_file.write( "%s, %7.4f, %7.2f, " % (stat, rrup, vs30) + "%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f, " % (ai_mean, d595_mean, tmid_mean, wmid_mean, wslp_mean, zeta_mean) + "%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f\n" % (np.std(sta_ai), np.std(sta_d595), np.std(sta_tmid), np.std(sta_wmid), np.std(sta_wslp), np.std(sta_zeta))) ## Write output to file #sta_out_file = open(os.path.join(a_validation_outdir, # '%d.rzz2015gmpe.%s.txt' % # (self.sim_id, stat)), 'w') #sta_out_file.write("#ai(s.g^2), d595(s), tmid(s), " # "wmid(Hz), wslp(Hz/sec), zeta(ratio)\n") #for ai, d595, tmid, wmid, wslp, zeta in zip(sta_ai, sta_d595, # sta_tmid, sta_wmid, # sta_wslp, sta_zeta): # sta_out_file.write("%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f\n" % # (ai, d595, tmid, wmid, wslp, zeta)) #sta_out_file.close() # Generate Plots self.plot(stat, a_validation_outdir, rrup, fault_type, vs30, mag, sta_ai, sta_d595, sta_tmid, sta_wmid, sta_wslp, sta_zeta, ai_mean, d595_mean, tmid_mean, wmid_mean, wslp_mean, zeta_mean) # Close output file out_file.close() print("RZZ2015 GMPE Completed".center(80, '-'))
def __init__(self, a_srcfile=None): """ Set up some parameters for HFSim """ # Parse src file, if given if a_srcfile: self.CFGDICT = bband_utils.parse_src_file(a_srcfile) else: self.CFGDICT = {} # # Name of executable # self.HFSIM = "hb_high_v6.0.3" # # Seismic Parameters # # As per Rob Graves on 29-November-2012: # The SITEAMP parameter is a boolean (0 or 1) flag to tell the code to # apply impedance amplification factors related to the prescribed # velocity model. It should be set to 1. self.SITEAMP = 1 # The following parameters are set for western US simulations, # override in velocity model configuration file self.DEFAULT_SDROP = 50 self.DEFAULT_FCFAC = 0.0 self.DEFAULT_QFEXP = 0.6 self.DEFAULT_C0 = 57 self.DEFAULT_C1 = 34 self.RAYSET = [2, 1, 2] self.TLEN = 102.4 # The DT in the high frequency simulation will be set to the # value below unless the velocity model used specifies a # different high frequency DT value via the HF_DT key. self.DT = 0.01 # As per Rob Graves on 8-February-2013: Actually, the FMAX # parameter is not used by the code (but is still required in # the input list). The use of KAPPA overrides FMAX in the # algorithm. So, it is kind of a "legacy" parameter. # As per Rob Graves on 29-November-2012: The FMAX parameter is # over-ridden by KAPPA when KAPPA > 0. Since all of our runs # are using KAPPA > 0, it doesn't matter what we set FMAX to. self.FMAX = 10.0 self.KAPPA = 0.04 # These are default values for the WUS region self.DEFAULT_DX = 1.0 self.DEFAULT_DY = 1.0 self.RUPV = -1.0 self.MEAN_RVFAC = 0.8 self.RANGE_RVFAC = 0.05 self.SHAL_RVFAC = 0.6 self.DEFAULT_EXTRA_FCFAC = 0.0 self.UNITS = -1 self.DEFAULT_VSMOHO = 999.9 self.PATH_DUR_MODEL = 11 self.DEEP_RVFAC = 0.6 self.RVSIG = 0.1 self.C_ZERO = 2.0 # Extra parameters required by hfsims V5.4 self.C_ALPHA = -99 self.FA_SIG1 = 0.0 # Extra parameter required by hfsims V6.0.3 # ISPAR_ADJUST = 1 for Western US/Japan # ISPAR_ADJUST = 2 for CEUS self.ISPAR_ADJUST = 1
def run(self): """ Run the AS16 validation for all stations """ print("AS2016".center(80, '-')) # Load configuration, set sim_id install = InstallCfg.getInstance() sim_id = self.sim_id # Build directory paths a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id)) a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id)) a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id)) a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id)) a_validation_outdir = os.path.join(a_outdir, "validations", "stewart_duration_gmpe") # Make sure the output and tmp directories exist bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir, a_validation_outdir], print_cmd=False) # Now the file paths self.log = os.path.join(a_logdir, "%d.as16.log" % (sim_id)) sta_file = os.path.join(a_indir, self.stations) a_srcfile = os.path.join(a_indir, self.srcfile) # Read SRC file src_keys = bband_utils.parse_src_file(a_srcfile) # Load information from SRC file origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) # Set region to be unknown -- this has no effect in the AS16 # method as z1 is not provided and that causes dz1 to be set # to zero and override the cj parameter cj = -999 # Figure out what mechanism to use # 0 = unknown # 1 = normal # 2 = reverse # 3 = strike-slip rake = src_keys['rake'] if abs(rake) <= 30 or abs(rake) >= 150: mechanism = 3 elif rake > 30 and rake < 150: mechanism = 2 elif rake < -30 and rake > -150: mechanism = 1 else: print("Warning: unknown mechanism for rake = %f" % (rake)) mechanism = 0 # Get station list slo = StationList(sta_file) site_list = slo.getStationList() # Create output file, add header out_file = open(os.path.join(a_validation_outdir, '%d.as16.%s.txt' % (self.sim_id, self.eventname)), 'w') out_file.write("#station, rrup, vs30, sd575, sd595, sd2080," " tau575, tau595, tau2080, phi575, phi595, phi2080\n") # Go through each station for site in site_list: stat = site.scode vs30 = float(site.vs30) # Calculate Rrup site_geom = [site.lon, site.lat, 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) results = calculate_as16(src_keys['magnitude'], rrup, mechanism, vs30, -999.0, cj) out_file.write("%s, %3.2f, %3.2f" % (stat, rrup, vs30)) for piece in results: out_file.write(", %7.5f" % (piece)) out_file.write("\n") # All done, close output file out_file.close() # All done! print("AS2016 Completed".center(80, '-'))
def load_all_data(comp_label, input_indir, input_obsdir, combined_file, temp_dir, component): """ This function loads all data from each station file and creates the structures needed for plotting. """ data = {} # Get realizations realizations = sorted(os.listdir(input_indir)) one_realization = realizations[0] basedir = os.path.join(input_indir, one_realization) # Get the GMPE data for the RZZ2015 metrics base_outdir = os.path.join(input_obsdir, one_realization, "validations", "rzz2015_gmpe") a_rzz2015_gmpe = glob.glob("%s%s%s.rzz2015gmpe.txt" % (base_outdir, os.sep, one_realization)) a_rzz2015_gmpe = a_rzz2015_gmpe[0] # Get the station list a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep)) if len(a_statfile) != 1: raise bband_utils.ProcessingError("Cannot get station list!") a_statfile = a_statfile[0] slo = StationList(a_statfile) site_list = slo.getStationList() # Get source file a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep)) if len(a_srcfile) != 1: raise bband_utils.ProcessingError("Cannot get src file!") a_srcfile = a_srcfile[0] # Parse it! src_keys = bband_utils.parse_src_file(a_srcfile) # Go through all stations for site in site_list: slon = float(site.lon) slat = float(site.lat) stat = site.scode # Calculate Rrup origin = (src_keys['lon_top_center'], src_keys['lat_top_center']) dims = (src_keys['fault_length'], src_keys['dlen'], src_keys['fault_width'], src_keys['dwid'], src_keys['depth_to_top']) mech = (src_keys['strike'], src_keys['dip'], src_keys['rake']) site_geom = [float(site.lon), float(site.lat), 0.0] (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech) _, rrup, _ = putils.DistanceToSimpleFaultSurface( site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip) # Read data for this station data_file = os.path.join(temp_dir, "%s.rzz2015" % (stat)) data[stat] = {} data[stat]["dist"] = rrup data[stat]["r1"] = [] data[stat]["r2"] = [] data[stat]["r3"] = [] data[stat]["r4"] = [] data[stat]["r5"] = [] data[stat]["r1_obs"] = None data[stat]["r2_obs"] = None data[stat]["r3_obs"] = None data[stat]["r4_obs"] = None data[stat]["r5_obs"] = None data[stat]["r1_gmpe"] = None data[stat]["r2_gmpe"] = None data[stat]["r3_gmpe"] = None data[stat]["r4_gmpe"] = None data[stat]["r5_gmpe"] = None in_file = open(data_file, 'r') for line in in_file: line = line.strip() if line.startswith("#"): # Skip comments continue pieces = line.split(",") comp = pieces[1].strip() # Check if we want this component if component != "both": if comp != component: # Skip continue # We want this data point pieces = pieces[2:] pieces = [float(piece) for piece in pieces] # Get observation values if data[stat]["r1_obs"] is None: data[stat]["r1_obs"] = pieces[6] if data[stat]["r2_obs"] is None: data[stat]["r2_obs"] = pieces[8] if data[stat]["r3_obs"] is None: data[stat]["r3_obs"] = pieces[10] if data[stat]["r4_obs"] is None: data[stat]["r4_obs"] = pieces[12] if data[stat]["r5_obs"] is None: data[stat]["r5_obs"] = pieces[14] # Get simulated data values data[stat]["r1"].append(pieces[7]) data[stat]["r2"].append(pieces[9]) data[stat]["r3"].append(pieces[11]) data[stat]["r4"].append(pieces[13]) data[stat]["r5"].append(pieces[15]) in_file.close() gmpe_file = open(a_rzz2015_gmpe, 'r') for line in gmpe_file: line = line.strip() # Skip comments if line.startswith("#"): continue pieces = line.split(",") stat = pieces[0].strip() pieces = pieces[1:] pieces = [float(piece.strip()) for piece in pieces] data[stat]["r1_gmpe"] = pieces[2] data[stat]["r2_gmpe"] = pieces[3] data[stat]["r3_gmpe"] = pieces[2] / pieces[3] data[stat]["r4_gmpe"] = pieces[5] data[stat]["r5_gmpe"] = pieces[6] gmpe_file.close() # Return all data return data
def __init__(self, a_srcfile=None): """ Set up some parameters for HFSim """ # Parse src file, if given if a_srcfile: self.CFGDICT = bband_utils.parse_src_file(a_srcfile) else: self.CFGDICT = {} # # Name of executable # self.HFSIM = "hb_high_v5.4.3" # # Seismic Parameters # # As per Rob Graves on 29-November-2012: # The SITEAMP parameter is a boolean (0 or 1) flag to tell the code to # apply impedance amplification factors related to the prescribed # velocity model. It should be set to 1. self.SITEAMP = 1 # The following parameters are set for western US simulations, # override in velocity model configuration file self.DEFAULT_SDROP = 50 self.DEFAULT_FCFAC = 0.0 self.DEFAULT_QFEXP = 0.6 self.DEFAULT_C0 = 57 self.DEFAULT_C1 = 34 self.RAYSET = [2, 1, 2] self.TLEN = 102.4 # The DT in the high frequency simulation will be set to the # value below unless the velocity model used specifies a # different high frequency DT value via the HF_DT key. self.DT = 0.01 # As per Rob Graves on 8-February-2013: Actually, the FMAX # parameter is not used by the code (but is still required in # the input list). The use of KAPPA overrides FMAX in the # algorithm. So, it is kind of a "legacy" parameter. # As per Rob Graves on 29-November-2012: The FMAX parameter is # over-ridden by KAPPA when KAPPA > 0. Since all of our runs # are using KAPPA > 0, it doesn't matter what we set FMAX to. self.FMAX = 10.0 self.KAPPA = 0.04 # These are default values for the WUS region self.DEFAULT_DX = 2.0 self.DEFAULT_DY = 2.0 self.RUPV = -1.0 self.MEAN_RVFAC = 0.8 self.RANGE_RVFAC = 0.05 self.SHAL_RVFAC = 0.7 self.DEFAULT_EXTRA_FCFAC = 0.0 self.UNITS = -1 self.DEFAULT_VSMOHO = 999.9 self.PATH_DUR_MODEL = 0 self.DEEP_RVFAC = 1.0 self.RVSIG = 0.0 # Extra parameters required by hfsims V5.4 self.C_ALPHA = 0.1 self.FA_SIG1 = 0.0