Пример #1
0
def bbp2sdsu_statlist(working_dir, slo, sdsu_stalist,
                      srf_file, xyz_srf_file, extended_fault,
                      tmpfile="station.coords"):
    """
    Takes BBP station list object and writes SDSU station list
    """

    # Convert station coordinates to cartesian coordinates
    a_srf_file = os.path.join(working_dir, srf_file)
    a_xyz_srf_file = os.path.join(working_dir, xyz_srf_file)
    out_coords = os.path.join(working_dir, tmpfile)

    gbb = GeoBBSRF()
    gbb.run(slo, out_coords, extended_fault, 'n',
            a_srf_file, a_xyz_srf_file, 'y')
    if not os.path.exists(out_coords):
        raise bband_utils.ProcessingError("Error converting station coordinates"
                                          " to SDSU format, exiting.")
    coords_fp = open(out_coords, 'r')
    coords_data = coords_fp.readlines()
    coords_fp.close()

    # Write station file
    stat_list = slo.getStationList()
    stalist_fp = open(sdsu_stalist, 'w')
    for i in range(0, len(stat_list)):
        pieces = coords_data[i].split()
        stalist_fp.write("%f  %f  %s  -1  -1  -1\n" % (float(pieces[0]),
                                                       float(pieces[1]),
                                                       stat_list[i].scode))
    stalist_fp.flush()
    stalist_fp.close()

    # All done!
    return "%s.param" % out_coords
Пример #2
0
    def read_gmpe(self, input_file):
        """
        Reads the GMPE input_file and returns the periods along with the data
        """
        # Start empty
        gmpe_data = []
        gmpe_models = None

        gmpefile = open(input_file, 'r')
        for line in gmpefile:
            line = line.strip()
            if line.startswith('#period'):
                # This line contains the models we want to use
                gmpe_models = line.split(None, 1)[1]
                gmpe_models = gmpe_models.split()
            if line.startswith('#'):
                # Skip other comments
                continue
            values = line.split()
            values = [float(value) for value in values]
            # Extract period and rotd50 value
            period = values[0]
            medians = values[1:]
            gmpe_data.append((period, medians))
        gmpefile.close()

        # Make sure we parsed the line with the model names
        if gmpe_models is None:
            raise bband_utils.ProcessingError("Cannot find GMPE models in %s" %
                                              (input_file))

        # Return the station median data
        return gmpe_data, gmpe_models
Пример #3
0
def combine_realizations_data(input_dir, temp_dir):
    """
    This function creates a single file averaging the rd50 files for
    each of the stations across all realizations
    """
    # Get realizations
    realizations = sorted(os.listdir(input_dir))
    one_realization = realizations[0]
    basedir = os.path.join(input_dir, one_realization)
    # Figure out what our stations are
    rd50_files = glob.glob("%s%s%s.*.rd50" %
                           (basedir, os.sep, one_realization))
    rd50_files = [os.path.basename(each_file) for each_file in rd50_files]
    stations = [station.split(".")[1] for station in rd50_files]
    # Capture event_label
    bias_file = glob.glob("%s%s*.bias" % (basedir, os.sep))
    if len(bias_file) < 1:
        raise bband_utils.ProcessingError("Cannot find event label!")
    bias_file = bias_file[0]
    # Let's capture the event label
    event_label = os.path.basename(bias_file).split("-")[0]

    # Now walk through all realizations and combine stations data
    for station in stations:
        print("working on station: %s" % (station))
        combine_station_data(station, input_dir, temp_dir)

    return event_label, len(realizations), len(stations)
Пример #4
0
def load_station_data(input_outdir, data, station):
    """
    Load station data into the DATA dictionary
    """
    # Get realizations
    realizations = sorted(os.listdir(input_outdir))
    for realization in realizations:
        basedir = os.path.join(input_outdir, realization,
                               "validations", "anderson_gof")
        data_file = glob.glob("%s%sgof-*-anderson-%s.txt" % (basedir,
                                                             os.sep,
                                                             station))
        if len(data_file) != 1:
            raise bband_utils.ProcessingError("Data for station %s " %
                                              (station) +
                                              "not found for "
                                              "realization %s!" %
                                              (realization))
        data_file = data_file[0]
        stat_data = readfmt(data_file)
        # Add data to the data dict
        for c_idx in range(10):
            for b_idx in range(BMAX):
                if station not in data[c_idx][b_idx]:
                    # Add 3rd level if it doesn't exist - stations
                    data[c_idx][b_idx][station] = []
                data[c_idx][b_idx][station].append(stat_data[b_idx, c_idx])
Пример #5
0
def combine_realizations_data(input_dir, temp_dir):
    """
    This function creates a single file averaging the rd100 files for
    each of the stations across all realizations
    """
    # Get realizations
    realizations = sorted(os.listdir(input_dir))
    one_realization = realizations[0]
    basedir = os.path.join(input_dir, one_realization, "validations",
                           "rzz2015")
    basedir_gmpe = os.path.join(input_dir, one_realization, "validations",
                                "rzz2015_gmpe")
    # Figure out what our stations are
    rzzgmpe_files = glob.glob("%s%s%s.rzz2015gmpe.*.png" %
                              (basedir_gmpe, os.sep, one_realization))
    rzzgmpe_files = [
        os.path.basename(each_file) for each_file in rzzgmpe_files
    ]
    stations = [station.split(".")[2] for station in rzzgmpe_files]
    # Capture event_label
    rzz_file = glob.glob("%s%s%s.rzz2015.*.txt" %
                         (basedir, os.sep, one_realization))
    if len(rzz_file) < 1:
        raise bband_utils.ProcessingError("Cannot find event label!")
    rzz_file = rzz_file[0]
    # Let's capture the event label
    event_label = os.path.basename(rzz_file).split(".")[2]

    # Now walk through all realizations and combine stations data
    for station in stations:
        print "working on station: %s" % (station)
        combine_station_data(station, input_dir, temp_dir)

    return event_label, len(realizations), len(stations)
Пример #6
0
def combine_realization_data(tmpdir, period):
    """
    This function reads the resid files from all realizations and
    returns the combined data set
    """
    data = {}

    realizations = sorted(os.listdir(tmpdir))
    for realization in realizations:
        basedir = os.path.join(tmpdir, realization)
        resid_file = glob.glob("%s%s*-resid-%.3f-rotd50.txt" %
                               (basedir, os.sep, period))
        if len(resid_file) != 1:
            raise bband_utils.ProcessingError("Residuals file not found for "
                                              "realization %s!" % (realization))
        resid_file = resid_file[0]
        input_file = open(resid_file, 'r')
        for line in input_file:
            line = line.strip()
            # Skip comments and empty lines
            if line.startswith("#") or line.startswith("%") or not line:
                continue
            pieces = line.split()
            # Make sure we have enough tokens
            if len(pieces) != 2:
                continue
            # Convert to floats
            pieces = [float(piece) for piece in pieces]
            dist = pieces[0]
            val = pieces[1]
            if dist in data:
                data[dist].append(val)
            else:
                data[dist] = [val]
        input_file.close()
    # Ok, processed all realizations, now combine the data
    sta_dist_data = []
    sta_min_data = []
    sta_max_data = []
    sta_resid_data = []

    for item in data:
        sta_dist_data.append(item)
        sta_min_data.append(numpy.std(data[item]))
        sta_max_data.append(numpy.std(data[item]))
        sta_resid_data.append(numpy.mean(data[item]))
    # Return the data we found
    return sta_dist_data, sta_min_data, sta_max_data, sta_resid_data
Пример #7
0
def read_input_bias_data(input_dir):
    """
    Read the bias data from all realizations
    """
    periods = []
    data = []
    event_label = None

    realizations = sorted(os.listdir(input_dir))
    for realization in realizations:
        basedir = os.path.join(input_dir, realization)
        bias_file = glob.glob("%s%s*-rotd50.bias" % (basedir, os.sep))
        if len(bias_file) != 1:
            raise bband_utils.ProcessingError("Bias file not found for "
                                              "realization %s!" %
                                              (realization))
        bias_file = bias_file[0]
        # Let's capture the event label
        if event_label is None:
            event_label = os.path.basename(bias_file).split("-")[0]

        input_file = open(bias_file, 'r')
        cur_periods = []
        cur_data = []
        for line in input_file:
            line = line.strip()
            # Skip comments and empty lines
            if line.startswith("#") or line.startswith("%") or not line:
                continue
            tokens = [float(token) for token in line.split()]
            cur_periods.append(tokens[0])
            cur_data.append(tokens[1])
        # Close input_file
        input_file.close()
        # Keep list of periods if not already done
        if not periods:
            periods = cur_periods
        # And keep data
        data.append(cur_data)

    bias_data = {}
    bias_data["num_periods"] = len(periods)
    bias_data["periods"] = periods
    bias_data["num_realizations"] = len(realizations)
    bias_data["data"] = data
    bias_data["event_label"] = event_label

    return bias_data
Пример #8
0
def create_resid_data_file(input_dir, combined_file):
    """
    This function creates a file containing the combined residuals
    from the simulation data from all realizations
    """
    copy_header = True
    event_label = None
    num_stations = 0

    # Open output file, write header
    outfile = open(combined_file, 'w')

    realizations = sorted(os.listdir(input_dir))
    for realization in realizations:
        basedir = os.path.join(input_dir, realization)
        resid_file = glob.glob("%s%s*.rd50-resid.txt" % (basedir, os.sep))
        if len(resid_file) != 1:
            raise bband_utils.ProcessingError("Residuals file not found for "
                                              "realization %s!" %
                                              (realization))
        resid_file = resid_file[0]
        # Let's capture the event label
        if event_label is None:
            event_label = os.path.basename(resid_file).split("-")[0]
        input_file = open(resid_file, 'r')
        for line in input_file:
            line = line.strip()
            # Skip comments and empty lines
            if line.startswith("#") or line.startswith("%") or not line:
                continue
            if line.startswith("EQ"):
                # This is the header line, skip if already done
                if not copy_header:
                    continue
                # If not done, set flag
                copy_header = False
            if line.find("rotd50") > 0:
                num_stations = num_stations + 1
            outfile.write("%s\n" % (line))
        input_file.close()

    # All done!
    outfile.close()

    # Return event label
    return event_label, len(realizations), num_stations
Пример #9
0
def combine_station_data(station, input_dir, temp_dir):
    """
    This function combines data for a given station across multiple
    realizations, writting a single output file in temp_dir
    """
    data = {}
    # Get realizations
    realizations = sorted(os.listdir(input_dir))
    for realization in realizations:
        basedir = os.path.join(input_dir, realization)
        data_file = glob.glob("%s%s%s.%s.rd50" % (basedir, os.sep,
                                                  realization,
                                                  station))
        if len(data_file) != 1:
            raise bband_utils.ProcessingError("Data for station %s " %
                                              (station) +
                                              "not found for "
                                              "realization %s!" %
                                              (realization))
        data_file = data_file[0]
        in_data = open(data_file, 'r')
        for line in in_data:
            line = line.strip()
            # Skip comments
            if line.startswith("#"):
                continue
            pieces = line.split()
            pieces = [float(piece) for piece in pieces]
            key = pieces[0]
            pieces = pieces[1:]
            if not key in data:
                # Key is new to dictionary
                empty_set = [[] for _ in pieces]
                data[key] = empty_set
            for idx, value in enumerate(pieces):
                data[key][idx].append(value)
        in_data.close()

    # Now, write the output file
    out_file = open((os.path.join(temp_dir, "%s.rd50" % (station))), 'w')
    keys = sorted(data.keys())
    for key in keys:
        out_file.write("%10.4f" % (key))
        for comp in data[key]:
            out_file.write(" %10.5e" % (numpy.mean(comp)))
        out_file.write("\n")
Пример #10
0
def summarize_rotd50(tmpdir, outdir, combined_resid_file, comp_label,
                     num_stations, num_realization, codebase):
    """
    This function summarizes all rotd50 data and creates the combined
    rotd50 GOF plot
    """
    config = GPGofCfg()

    # Figure out where out binaries are
    if "BBP_DIR" in os.environ:
        install_root = os.path.normpath(os.environ["BBP_DIR"])
    else:
        raise bband_utils.ProcessingError("BBP_DIR is not set!")
    gp_bin_dir = os.path.join(install_root, "src", "gp", "bin")

    logfile = os.path.join(tmpdir, "log.txt")

    for comp in config.COMPS_PSA5:
        # Build paths and check lengths
        fileroot = os.path.join(
            tmpdir, "%s-%s-combined-rd50-%s" % (codebase, comp_label, comp))
        bband_utils.check_path_lengths([combined_resid_file, fileroot],
                                       bband_utils.GP_MAX_FILENAME)

        cmd = ("%s " % (os.path.join(gp_bin_dir, "resid2uncer_varN")) +
               "residfile=%s fileroot=%s " % (combined_resid_file, fileroot) +
               "comp=%s nstat=%d nper=63 " % (comp, num_stations) +
               " >> %s 2>&1" % (logfile))
        bband_utils.runprog(cmd, abort_on_error=True)

    plottitle = ("Combined GOF Plot for %s\n%d Realizations - %s Method" %
                 (comp_label, num_realization, codebase.upper()))
    fileroot = "%s-%s-combined-rd50" % (codebase, comp_label)
    plotter = PlotGoF()
    plotter.plot(plottitle,
                 fileroot,
                 tmpdir,
                 outdir,
                 cutoff=0,
                 mode="rd50-single",
                 colorset="combined")

    print("Stations used: %s" % (num_stations))
Пример #11
0
    def correct_station(self, station, extension):
        """
        This function applies the user-provided correction factors to
        the station's amplitudes, and outputs the corrected file
        """
        if not station in self.factors:
            raise bband_utils.ParameterError("Unknown station %s!" % (station))

        orig_file = os.path.join(self.proc_dir,
                                 "%s-orig.%s" % (station, extension))
        corr_file = os.path.join(self.proc_dir, "%s.%s" % (station, extension))

        # Make sure input files exist
        if not os.path.exists(orig_file):
            raise bband_utils.ProcessingError("File %s not found!" %
                                              (orig_file))

        # Pick set of correction factors
        factors = self.factors[station]

        # Correct rd50 file
        self.correct_file(factors, orig_file, corr_file)
Пример #12
0
def combine_station_data(station, input_dir, temp_dir):
    """
    This function combines data for a given station across multiple
    realizations, writting a single output file in temp_dir
    """
    data = []
    # Get realizations
    realizations = sorted(os.listdir(input_dir))
    for realization in realizations:
        basedir = os.path.join(input_dir, realization, "validations",
                               "rzz2015")
        data_file = glob.glob("%s%s%s.rzz2015.*.txt" %
                              (basedir, os.sep, realization))
        if len(data_file) != 1:
            raise bband_utils.ProcessingError("Data for station %s " %
                                              (station) + "not found for "
                                              "realization %s!" %
                                              (realization))
        data_file = data_file[0]
        in_data = open(data_file, 'r')
        for line in in_data:
            line = line.strip()
            # Skip comments
            if line.startswith("#"):
                continue
            pieces = line.split(",")
            cur_station = pieces[0]
            # Check if this is the station we want
            if cur_station == station:
                data.append(line)
        in_data.close()

    # Now, write the output file
    out_file = open((os.path.join(temp_dir, "%s.rzz2015" % (station))), 'w')
    for item in data:
        out_file.write("%s\n" % (item))
    out_file.close()
Пример #13
0
def load_all_data(input_indir, input_outdir):
    """
    This function goes through all realizations and loads all data to
    the DATA dictionary
    """
    # First create data dictionary
    data = {}
    # First level is C1..CMAX
    for i in range(10):
        data[i] = {}
        # Second level is B1..BMAX
        for j in range(BMAX):
            data[i][j] = {}

    # Get realizations
    realizations = sorted(os.listdir(input_indir))
    one_realization = realizations[0]
    basedir = os.path.join(input_indir, one_realization)

    # Get the station list
    a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep))
    if len(a_statfile) != 1:
        raise bband_utils.ProcessingError("Cannot get station list!")
    a_statfile = a_statfile[0]
    slo = StationList(a_statfile)
    site_list = slo.getStationList()

    # Go through all stations
    for site in site_list:
        station = site.scode
        print "working on station: %s" % (station)

        # Read data for this station
        load_station_data(input_outdir, data, station)

    # Return data dictionary
    return data
Пример #14
0
    def correct_file(self, factors, input_file, output_file):
        """
        This function reads input_file and writes output_file after
        applying the correction factors
        """

        # Open files
        ifile = open(input_file, 'r')
        ofile = open(output_file, 'w')

        for line in ifile:
            if line.startswith("#"):
                # Comment line, just write to output
                ofile.write(line)
            else:
                # This line needs correction
                pieces = line.split()
                period = float(pieces[0])
                pieces.pop(0)
                values = [float(value) for value in pieces]
                try:
                    index = self.periods.index(period)
                except ValueError:
                    raise bband_utils.ProcessingError("Period %f not on the " %
                                                      (period) +
                                                      "correction list!")
                # Apply correction value
                values = [value * factors[index] for value in values]
                ostr = " %10.5E" % (period)
                for value in values:
                    ostr = "%s %10.5E" % (ostr, value)
                # Write to output file
                ofile.write("%s\n" % (ostr))

        # All done!
        ifile.close()
        ofile.close()
Пример #15
0
    def write_xyz_srf(self, srf_file, xyz_srf_file, T3M):
        """
        Reads the SRF file and converts its lat/lon to XYZ format
        using self.latmin and self.lonmin as reference points. The T3M
        matrix is used to shift the coordinates to the new reference
        point calculated in the run function.
        """
        # Open files
        infile = open(srf_file, 'r')
        outfile = open(xyz_srf_file, 'w')

        # Pick up version number from SRF file
        version = float(infile.readline().strip().split()[0])
        # Now go back to the start
        infile.seek(0)

        # Copy lines
        while True:
            line = infile.readline()
            # Cannot mix for line in infile with readline...
            if line is None:
                break
            outfile.write(line)
            # Until we find the plane line
            if line.find("PLANE") >= 0:
                break

        tokens = line.strip().split()
        # Make sure we have a valid SRF file
        if len(tokens) != 2:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srf_file))
        planes = int(tokens[1])
        # Make sure we have only 1 plane
        if planes > 1:
            raise bband_utils.ProcessingError("Only one plane is " +
                                              "supported!" + " Found %d " %
                                              (planes) + " in SRF file!")
        line = infile.readline()
        tokens = line.strip().split()
        if len(tokens) != 6:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srf_file))
        # Convert to XYZ
        lon = float(tokens[0])
        lat = float(tokens[1])
        [x_cart, y_cart] = self.geo2cart(lon, lat, self.min_lon, self.min_lat)
        tmp = T3M * np.mat([x_cart, y_cart, 0, 1]).transpose()
        tokens[0] = str(float(tmp[0]))
        tokens[1] = str(float(tmp[1]))
        outfile.write(" %s\n" % "   ".join(tokens))

        # Continue copying
        while True:
            line = infile.readline()
            # Cannot mix for line in infile with readline...
            if line is None:
                break
            outfile.write(line)
            if line.find("POINTS") >= 0:
                break
        # Figure out how many cells
        tokens = line.strip().split()
        if len(tokens) != 2:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srf_file))
        n_cels = int(tokens[1])

        # Go through each cell
        for i in range(0, n_cels):
            tokens = infile.readline().strip().split()
            if version == 1.0 and len(tokens) != 8:
                raise bband_utils.ProcessingError("Invalid SRF version 1 "
                                                  "file (%s)!" % (srffile))
            if version == 2.0 and len(tokens) != 10:
                raise bband_utils.ProcessingError("Invalid SRF version 2 "
                                                  "file (%s)!" % (srffile))
            lon = float(tokens[0])
            lat = float(tokens[1])
            [x_cart, y_cart] = self.geo2cart(lon, lat, self.min_lon,
                                             self.min_lat)
            tmp = T3M * np.mat([x_cart, y_cart, 0, 1]).transpose()
            tokens[0] = str(float(tmp[0]))
            tokens[1] = str(float(tmp[1]))
            outfile.write(" %s\n" % "   ".join(tokens))
            line = infile.readline()
            tokens = line.strip().split()
            if len(tokens) != 7:
                raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                                  (srf_file))
            nt1 = float(tokens[2])
            nt2 = float(tokens[4])
            nt3 = float(tokens[6])
            # Write line
            outfile.write(line)
            if nt1 > 0:
                for k in range(0, int(math.ceil(nt1 / 6.0))):
                    token = infile.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srf_file))
                    outfile.write(token)
            if nt2 > 0:
                for k in range(0, int(math.ceil(nt2 / 6.0))):
                    token = infile.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srf_file))
                    outfile.write(token)
            if nt3 > 0:
                for k in range(0, int(math.ceil(nt3 / 6.0))):
                    token = infile.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srf_file))
                    outfile.write(token)

        # All done, close input and output files
        infile.close()
        outfile.close()
Пример #16
0
a_outdir1 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_1))
a_outdir2 = os.path.join(install.A_OUT_DATA_DIR, str(sim_id_2))

# Src file
a_srcfile = os.path.join(a_indir, src_file)
src_keys = bband_utils.parse_src_file(a_srcfile)

# Station file
a_statfile = os.path.join(a_indir, station_list)
slo = StationList(a_statfile)
site_list = slo.getStationList()

# Capture event_label
bias_file = glob.glob("%s%s*.bias" % (a_outdir1, os.sep))
if len(bias_file) < 1:
    raise bband_utils.ProcessingError("Cannot find event label!")
bias_file = bias_file[0]
# Let's capture the event label
event_label = os.path.basename(bias_file).split("-")[0]

print_header_rd50 = 1

# Go through the stations
for site in site_list:
    stat = site.scode
    slon = float(site.lon)
    slat = float(site.lat)

    # Check files are there
    a_rd50file1 = os.path.join(a_outdir1, "%d.%s.rd50" % (sim_id_1, stat))
    a_rd50file2 = os.path.join(a_outdir2, "%d.%s.rd50" % (sim_id_2, stat))
Пример #17
0
    def load_correction_factors(self):
        """
        This function loads the correction factors from the corr_file
        provided
        """
        try:
            cfile = open(self.corr_file, 'r')
        except IOError:
            raise bband_utils.ParameterError("Cannot read correction file %s" %
                                             (self.corr_file))

        # We are looking for the header first
        headers = None
        # Loop through the lines
        for line in cfile:
            if line.startswith("#StaName"):
                headers = line.split()
                break

        # Make sure we got the header line
        if headers is None:
            cfile.close()
            raise bband_utils.ProcessingError("Cannot find header line in "
                                              "correction file %s" %
                                              (self.corr_file))

        skip_headers = 0
        # Pick up the periods
        while len(headers) > 0:
            try:
                tmp = float(headers[0])
            except:
                # Skip this one, and remove from list
                skip_headers = skip_headers + 1
                headers.pop(0)
            else:
                # Found first period, get out
                break
        # Make sure we have at least 1 period
        if not headers:
            cfile.close()
            raise bband_utils.ProcessingError("Cannot find any periods in "
                                              "correction file %s" %
                                              (self.corr_file))

        # Convert periods to floats
        self.periods = [float(value) for value in headers]

        # Now read the rest of the correction file
        for line in cfile:
            if line.startswith("#"):
                continue
            factors = line.split()
            station = factors[0]
            to_skip = skip_headers
            # Remove everything other than the correction factors
            while to_skip > 0:
                factors.pop(0)
                to_skip = to_skip - 1

            factors = [float(value) for value in factors]
            # Make sure we have the proper number of correction factors
            if len(factors) != len(self.periods):
                cfile.close()
                raise bband_utils.ProcessingError("Station %s has %d periods" %
                                                  (station, len(factors)) +
                                                  ", expecting %s periods" %
                                                  (len(self.periods)))

            self.factors[station] = factors

        # All done
        cfile.close()
Пример #18
0
    def convert_to_bbp(self, in_hor_file, in_ver_file, out_acc_file):
        """
        This function converts the in_file Irikura seismogram to an
        acceleration BBP file
        """
        header_lines = 0
        freq = None
        fac1_hor = None
        fac2_hor = None
        fac1_ver = None
        fac2_ver = None

        # Read horizontal acc file
        irikura_file = open(in_hor_file, 'r')
        for line in irikura_file:
            line = line.strip()
            header_lines = header_lines + 1
            if line.startswith("Sampling Freq"):
                pieces = line.split()
                token = pieces[2]
                freq = float(token[0:token.find("Hz")])
                continue
            if line.startswith("Scale Factor"):
                pieces = line.split()
                token = pieces[2]
                fac2_hor = float(token.split("/")[1])
                fac1_hor = float(token.split("/")[0][0:token.find("(gal)")])
                continue
            if line.startswith("Memo"):
                break
        irikura_file.close()

        # Read vertical acc file
        irikura_file = open(in_ver_file, 'r')
        for line in irikura_file:
            line = line.strip()
            if line.startswith("Scale Factor"):
                pieces = line.split()
                token = pieces[2]
                fac2_ver = float(token.split("/")[1])
                fac1_ver = float(token.split("/")[0][0:token.find("(gal)")])
                continue
            if line.startswith("Memo"):
                break
        irikura_file.close()

        if (freq is None or fac1_hor is None or fac2_hor is None
                or fac1_ver is None or fac2_ver is None):
            # Not able to parse it properly, exit!
            raise bband_utils.ProcessingError(
                "Could not parse files %s and %s" % (in_hor_file, in_ver_file))

        time_step = 1.0 / freq
        time_curr = 0.0
        skip_lines = 0
        irikura_hor_file = open(in_hor_file, 'r')
        irikura_ver_file = open(in_ver_file, 'r')
        bbp_file = open(out_acc_file, 'w')
        # Add header to BBP file
        bbp_file.write("#    time(sec)      N-S(cm/s/s)      "
                       "E-W(cm/s/s)      U-D(cm/s/s)\n")
        for line1, line2 in zip(irikura_hor_file, irikura_ver_file):
            # Skip header lines
            if skip_lines < header_lines:
                skip_lines = skip_lines + 1
                continue
            line1 = line1.strip()
            line2 = line2.strip()
            pieces1 = line1.split()
            pieces1 = [float(val) for val in pieces1]
            pieces2 = line2.split()
            pieces2 = [float(val) for val in pieces2]
            # Convert each value to gal (cm/s/s)
            pieces1 = [val * fac1_hor / fac2_hor for val in pieces1]
            pieces2 = [val * fac1_ver / fac2_ver for val in pieces2]
            # Write values to BBP file, repeating as needed for the
            # 3-component file
            for piece_h, piece_v in zip(pieces1, pieces2):
                bbp_file.write("%15.6e%15.6e%15.6e%15.6e\n" %
                               (time_curr, piece_h, piece_h, piece_v))
                # Don't forget to increment time
                time_curr = time_curr + time_step
        bbp_file.close()
        irikura_hor_file.close()
        irikura_ver_file.close()
Пример #19
0
def bbp2peer(in_bbp_file, out_peer_n_file, out_peer_e_file, out_peer_z_file):
    """
    Convert bbp file into three peer files for use by RotD50 and
    other programs that input PEER format seismograms
    """
    num_header_lines = 0
    bbp = open(in_bbp_file, "r")
    lines = bbp.readlines()
    bbp.close()
    for line in lines:
        elems = line.split()
        if elems[0] == "#":
            num_header_lines = num_header_lines + 1
        else:
            break

    #print "Counted %d header lines" % (num_header_lines)

    cur_line = 0
    header_lines = []
    dt_vals = []
    n_vals = []
    e_vals = []
    z_vals = []
    for line in lines:
        if cur_line <= (num_header_lines - 2):
            cur_line = cur_line + 1
            header_lines.append(line)
        elif cur_line == (num_header_lines - 1):
            # Last line before start gives dt and npts
            cur_line = cur_line + 1
            # Print line
            elems = line.split()
            try:
                npts = int(elems[1])
                dt = float(elems[2])
            except (IndexError, ValueError) as err:
                # Ok, it doesn't seem we are re-converting to PEER
                # format a previously PEER-BBP converted file
                # Let's try to figure things out...
                npts = len(lines) - num_header_lines
                try:
                    time_1 = float(lines[num_header_lines].split()[0])
                    time_2 = float(lines[num_header_lines + 1].split()[0])
                except ValueError:
                    print("Cannot figure out npts and dt from this bbp file!")
                    sys.exit(-1)
                dt = time_2 - time_1
            #print("Reformating BBP file with %d header lines." %
            #      (num_header_lines))
            #print("Reformating BBP file with dt: %f " % (dt))
            #print("Reformating BBP file with npts: %d" % (npts))
        else:
            elems = line.split()
            if len(elems) != 4:
                raise bband_utils.ProcessingError("Unexpected BBP time series "
                                                  "line format found."
                                                  "Error in conversion.")
            else:
                dt_vals.append(dt)
                n_vals.append(float(elems[1]) / bband_utils.G2CMSS)
                e_vals.append(float(elems[2]) / bband_utils.G2CMSS)
                z_vals.append(float(elems[3]) / bband_utils.G2CMSS)

    # Prepare to write 6 colume format
    n_file = open(out_peer_n_file, "w")
    e_file = open(out_peer_e_file, "w")
    z_file = open(out_peer_z_file, "w")

    #n_file.write("Created by: bbp2peer v12.8.0\n")
    #e_file.write("Created by: bbp2peer v12.8.0\n")
    #z_file.write("Created by: bbp2peer v12.8.0\n")

    # Adjust header lines, so we always have enough
    while len(header_lines) <= (PEER_HEADER_LINES - 2):
        header_lines.append("\n")

    for line in header_lines[0:(PEER_HEADER_LINES - 2)]:
        n_file.write(line)
        e_file.write(line)
        z_file.write(line)

    n_file.write("Acceleration in g\n")
    n_file.write("  %d   %1.6f   NPTS, DT\n" % (npts, dt))
    e_file.write("Acceleration in g\n")
    e_file.write("  %d   %1.6f   NPTS, DT\n" % (npts, dt))
    z_file.write("Acceleration in g\n")
    z_file.write("  %d   %1.6f   NPTS, DT\n" % (npts, dt))

    cur_line = 0
    for index, elem in enumerate(dt_vals):
        n_file.write("% 12.7E " % (n_vals[index]))
        e_file.write("% 12.7E " % (e_vals[index]))
        z_file.write("% 12.7E " % (z_vals[index]))
        #print "%f"%(dt_vals[index])
        #print "%e"%(n_vals[index])
        #print "%e"%(e_vals[index])
        #print "%e"%(z_vals[index])
        if (index % 5) == 4:
            n_file.write("\n")
            e_file.write("\n")
            z_file.write("\n")
        #else:
        #    n_file.write("\t")
        #    e_file.write("\t")
        #    z_file.write("\t")

    # Add newline at the end of last line to avoid issue when rotd50.f
    # reads the file (only when compiled with gfortran 4.3.3 on HPCC)
    n_file.write("\n")
    e_file.write("\n")
    z_file.write("\n")
    # Close all files
    n_file.close()
    e_file.close()
    z_file.close()
Пример #20
0
def create_resid_data_file(comp_label, input_indir, input_obsdir,
                           combined_file, temp_dir):
    """
    This function creates a file containing the combined residuals
    from the simulation data from all stations
    """
    # Copy header for first file, set logfile
    copy_header = 1
    logfile = os.path.join(temp_dir, "log.txt")

    # Figure out where out binaries are
    if "BBP_DIR" in os.environ:
        install_root = os.path.normpath(os.environ["BBP_DIR"])
    else:
        raise bband_utils.ProcessingError("BBP_DIR is not set!")
    gp_bin_dir = os.path.join(install_root, "src", "gp", "bin")

    # Get realizations
    realizations = sorted(os.listdir(input_indir))
    one_realization = realizations[0]
    basedir = os.path.join(input_indir, one_realization)

    # Get the station list
    a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep))
    if len(a_statfile) != 1:
        raise bband_utils.ProcessingError("Cannot get station list!")
    a_statfile = a_statfile[0]
    slo = StationList(a_statfile)
    site_list = slo.getStationList()

    # Get source file
    a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep))
    if len(a_srcfile) == 0:
        raise bband_utils.ProcessingError("Cannot get src file!")
    a_srcfile = a_srcfile[0]

    # Parse it!
    src_keys = bband_utils.parse_src_file(a_srcfile)

    # Get the obsdir
    realizations = sorted(os.listdir(input_obsdir))
    one_realization = realizations[0]
    basedir = os.path.join(input_obsdir, one_realization)
    obs_dir = glob.glob("%s%sobs_seis*" % (basedir, os.sep))
    if len(obs_dir) != 1:
        raise bband_utils.ProcessingError("Cannot get observation dir!")
    obs_dir = obs_dir[0]

    # Go through all stations
    for site in site_list:
        slon = float(site.lon)
        slat = float(site.lat)
        stat = site.scode

        # Calculate Rrup
        origin = (src_keys['lon_top_center'], src_keys['lat_top_center'])
        dims = (src_keys['fault_length'], src_keys['dlen'],
                src_keys['fault_width'], src_keys['dwid'],
                src_keys['depth_to_top'])
        mech = (src_keys['strike'], src_keys['dip'], src_keys['rake'])

        site_geom = [float(site.lon), float(site.lat), 0.0]
        (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1,
         dummy2) = putils.FaultTraceGen(origin, dims, mech)
        _, rrup, _ = putils.DistanceToSimpleFaultSurface(
            site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip)

        simfile1 = os.path.join(temp_dir, "%s.rd50" % (stat))
        datafile1 = os.path.join(obs_dir, "%s.rd50" % (stat))

        cmd = ("%s bbp_format=1 " %
               (os.path.join(gp_bin_dir, "gen_resid_tbl_3comp")) +
               "datafile1=%s simfile1=%s " % (datafile1, simfile1) +
               "comp1=psa5n comp2=psa5e comp3=rotd50 " +
               "eqname=%s mag=0.0 stat=%s lon=%.4f lat=%.4f " %
               (comp_label, stat, slon, slat) + "vs30=%d cd=%.2f " %
               (site.vs30, rrup) + "flo=%f fhi=%f " %
               (site.low_freq_corner, site.high_freq_corner) +
               "print_header=%d >> %s 2>> %s" %
               (copy_header, combined_file, logfile))
        bband_utils.runprog(cmd, abort_on_error=True)

        if copy_header == 1:
            copy_header = 0
Пример #21
0
def exsim2bbp(in_exsim_n, in_exsim_e, in_exsim_z, out_bbp_file):
    """
    Converts 3 exsim acc format files into 3 component bbp file.
    Assumes all three files are for the same site.
    """

    n_vals = []
    e_vals = []
    z_vals = []
    header_lines = []
    dt_vals = []

    pfn = open(in_exsim_n, "r")
    lines_n = pfn.readlines()
    pfn.close()
    pfe = open(in_exsim_e, "r")
    lines_e = pfe.readlines()
    pfe.close()
    pfz = open(in_exsim_z, "r")
    lines_z = pfz.readlines()
    pfz.close()

    #
    # Test for empty input file and return error if found
    if len(lines_n) < 1:
        raise bband_utils.ProcessingError("Input file %s is empty!" %
                                          (in_exsim_n))
    elif len(lines_e) != len(lines_n):
        raise bband_utils.ProcessingError("N and E peer files do not have "
                                          "same number of lines!")
    elif len(lines_z) != len(lines_n):
        raise bband_utils.ProcessingError("N and Z peer files do not have "
                                          "same number of lines!")
    else:
        print("Input EXSIM-format seismogram files with len: %d" %
              (len(lines_n)))

    pts = 0
    dt = 0.0
    start_line = 0
    start_samples = False

    while not start_samples:
        for line in lines_n:
            start_line = start_line + 1
            elems = line.split()
            if len(elems) == 2 and elems[1] == "samples":
                pts = int(elems[0])
            elif len(elems) >= 2 and elems[0] == "dt:":
                dt = float(elems[1])
            elif len(elems) >= 2 and elems[0] == "time(s)":
                start_samples = True
                break
            else:
                header_lines.append(line)

    if not start_samples:
        print("No samples found in peer file: " + in_exsim_n)
        sys.exit(0)
    else:
        print("starting line: %d" % (start_line))
        print("Reading Seismogram with NPTS: %d and DT: %f" % (pts, dt))

    cur_line = 0
    for line in lines_n:
        cur_line = cur_line + 1
        if cur_line > start_line:
            vals = line.split()
            n_vals.append(float(vals[1]))

    cur_line = 0
    for line in lines_e:
        cur_line = cur_line + 1
        if cur_line > start_line:
            vals = line.split()
            e_vals.append(float(vals[1]))

    cur_line = 0
    for line in lines_z:
        cur_line = cur_line + 1
        if cur_line > start_line:
            vals = line.split()
            z_vals.append(float(vals[1]))

    #
    # Populate the dt values for bbp format
    # Use number of pts in time series as counter
    #
    dt_val = dt
    pts_count = len(n_vals)
    #
    # Define the first sample at time dt
    cur_dt = 0.0
    for x in range(pts_count):
        dt_vals.append(cur_dt)
        cur_dt = cur_dt + dt_val
    #
    #
    if len(dt_vals) == pts_count:
        print("Read format consistent time series with %d samples." %
              (len(dt_vals)))
    else:
        print("Inconsistent time series: %d %d" % (len(dt_vals), pts_count))

    #
    # Print bbp file
    #
    bbp_file = open(out_bbp_file, "w")
    # Print header
    bbp_file.write(
        "#    time(sec)      N-S(cm/s/s)      E-W(cm/s/s)      U-D(cm/s/s)\n")
    #for line in header_lines:
    #    bbp_file.write("# %s" % (line))
    # bbp_file.write("# Column 1: Time (s)\n")
    # bbp_file.write("# Column 2: North-south acceleration (cm/s/s) (+ is northward)\n")
    # bbp_file.write("# Column 3: East-west acceleration (cm/s/s) (+ is eastward)\n")
    # bbp_file.write("# Column 4: Up-down acceleration (cm/s/s) (+ is upward)\n#\n")
    # bbp_file.write("# NPTS  DT  \n")
    # bbp_file.write("# %d %s\n" % (pts_count, dt))
    for x in range(pts_count):
        bbp_file.write("%7e   % 8e   % 8e   % 8e\n" %
                       (dt_vals[x], n_vals[x], e_vals[x], z_vals[x]))

    bbp_file.close()
Пример #22
0
def peer2bbp(in_peer_n_file, in_peer_e_file, in_peer_z_file, out_bbp_file):
    """
    This function converts the 3 input peer files (N/E/Z) to a
    3-component bbp file
    """
    n_vals = []
    e_vals = []
    z_vals = []
    header_lines = []
    dt_vals = []
    start_line = 0
    start_samples = False

    # Read input files
    pfn = open(in_peer_n_file, "r")
    lines_n = pfn.readlines()
    pfn.close()
    pfe = open(in_peer_e_file, "r")
    lines_e = pfe.readlines()
    pfe.close()
    pfz = open(in_peer_z_file, "r")
    lines_z = pfz.readlines()
    pfz.close()

    #
    # Test for empty input file and return error if found
    if len(lines_n) < 1:
        raise bband_utils.ProcessingError("Input file %s is empty!" %
                                          (in_peer_n_file))
    elif len(lines_e) != len(lines_n):
        raise bband_utils.ProcessingError("N and E peer files do not have "
                                          "same number of lines!")
    elif len(lines_z) != len(lines_n):
        raise bband_utils.ProcessingError("N and Z peer files do not have "
                                          "same number of lines!")
    else:
        pass
        #print("Input PEER-format seismogram files with len: %d" %
        #      (len(lines_n)))

    while not start_samples:
        for line in lines_n:
            start_line = start_line + 1
            elems = line.split()
            if len(elems) > 0:
                if elems[0] == "ACCELERATION" or elems[0] == 'Acceleration':
                    start_samples = True
                    break
            header_lines.append(line)

    if not start_samples:
        raise bband_utils.ProcessingError("No samples found in peer file: %s" %
                                          (in_peer_n_file))
    else:
        #print "starting line: %d"%(start_line)
        cur_line = 0
        pts = 0
        dt = 0.0
        for line in lines_n:
            cur_line = cur_line + 1
            if cur_line == start_line + 1:
                pts_dt = line.split()
                pts = float(pts_dt[0])
                dt = float(pts_dt[1])
                #print("Reading Seismogram with NPTS: %d and DT: %f" %
                #      (pts, dt))
            elif cur_line > start_line + 1:
                vals = line.split()
                for x in vals:
                    n_vals.append(float(x) * bband_utils.G2CMSS)
            else:
                # This block will skip the header lines use
                # ACCELERATION TAG as starting key
                pass

    cur_line = 0
    for line in lines_e:
        cur_line = cur_line + 1
        if cur_line > start_line + 1:
            vals = line.split()
            for y in vals:
                e_vals.append(float(y) * bband_utils.G2CMSS)
            else:
                # Skip header lines
                pass

    cur_line = 0
    for line in lines_z:
        cur_line = cur_line + 1
        if cur_line > start_line + 1:
            vals = line.split()
            for z in vals:
                z_vals.append(float(z) * bband_utils.G2CMSS)
        else:
            # Skip header lines
            pass
    #
    # Populate the dt values for bbp format
    # Use number of pts in time series as counter
    #
    dt_val = float(dt)
    pts_count = len(n_vals)
    #
    # Define the first sample at time dt
    cur_dt = 0.0
    for x in range(pts_count):
        dt_vals.append(cur_dt)
        cur_dt = cur_dt + dt_val
    #
    #
    if len(dt_vals) == pts_count:
        pass
        #print("Read format consistent time series with %d samples." %
        #      (len(dt_vals)))
    else:
        print("Inconsistent time series: %d %d" % (len(dt_vals), pts_count))

    #
    # Print files in bbp
    #
    # Open file
    bbp_file = open(out_bbp_file, "w")
    # Write header
    bbp_file.write(
        "#    time(sec)      N-S(cm/s/s)      E-W(cm/s/s)      U-D(cm/s/s)\n")
    #    for line in header_lines:
    #        bbp_file.write("# %s" % (line))
    #    bbp_file.write("# Column 1: Time (s)\n")
    #    bbp_file.write("# Column 2: North-south acceleration (cm/s/s) (+ is northward)\n")
    #    bbp_file.write("# Column 3: East-west acceleration (cm/s/s) (+ is eastward)\n")
    #    bbp_file.write("# Column 4: Up-down acceleration (cm/s/s) (+ is upward)\n#\n")
    #    bbp_file.write("# NPTS  DT  \n")
    #    bbp_file.write("# %d %s\n" % (pts_count, dt))
    # Write the data
    for x in range(pts_count):
        bbp_file.write("%7e   % 8e   % 8e   % 8e\n" %
                       (dt_vals[x], n_vals[x], e_vals[x], z_vals[x]))
    # Lastly, close the file
    bbp_file.close()
Пример #23
0
def plot_combined_map_gof(indir, tmpdir, outdir, codebase):
    """
    This function reads data from the residuals files from multiple
    realizations and plots a map gof plot with a number of periods.
    """
    # Capture number of realizations and event label
    num_realizations = len(os.listdir(tmpdir))
    basedir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
    resid_file = glob.glob("%s%s*-resid-map*rotd50.txt" % (basedir, os.sep))[0]
    event_label = os.path.basename(resid_file).split("-")[0]

    # Get one trace file
    basedir = os.path.join(indir, os.listdir(indir)[0])
    trace_file = glob.glob("%s%s*.trace" % (basedir, os.sep))[0]

    # Now get the SRC (or SRF file) in order to get the hypocenter
    # location. Note that this function will look for the hypocenter
    # location from the first realization. If the simulation was
    # created using randomized hypocenter locations, the plot will
    # only display the location of the hypocenter from the first
    # realization.
    src_file = glob.glob("%s%s*.src" % (basedir, os.sep))
    if not len(src_file):
        srf_file = glob.glob("%s%s*.srf" % (basedir, os.sep))
        if not len(srf_file):
            raise bband_utils.ProcessingError("Cannot find SRC/SRF file!")
        source_file = srf_file[0]
    else:
        source_file = src_file[0]

    # Get hypo_lon, hypo_lat from src/srf file
    hypo_lon, hypo_lat = fault_utils.calculate_epicenter(source_file)

    # Collect all the data from the residuals file
    all_sta_x_data = []
    all_sta_y_data = []
    all_sta_resid_data = []
    for period in DIST_PERIODS:
        (sta_x_data, sta_y_data,
         sta_resid_data) = combine_realization_data(tmpdir, period)
        all_sta_x_data.append(sta_x_data)
        all_sta_y_data.append(sta_y_data)
        all_sta_resid_data.append(sta_resid_data)

    # Get plot boundaries
    (north, south, east,
     west) = set_boundaries_from_lon_lat(all_sta_x_data[0], all_sta_y_data[0])

    # Get directory names
    install = InstallCfg.getInstance()
    # Prepare to plot map GOF
    plotregion = [west, east, south, north]
    topo = os.path.join(install.A_PLOT_DATA_DIR, 'calTopo18.bf')
    coastal = os.path.join(install.A_PLOT_DATA_DIR, 'gshhs_h.txt')
    border = os.path.join(install.A_PLOT_DATA_DIR, 'wdb_borders_h.txt')

    # Now create the map GOF
    outfile = os.path.join(
        outdir, "gof-map-combined-%s-%s-rotd50.png" % (codebase, event_label))

    create_combined_map_gof(all_sta_x_data,
                            all_sta_y_data,
                            all_sta_resid_data,
                            plotregion,
                            topo,
                            coastal,
                            border,
                            trace_file,
                            event_label,
                            num_realizations,
                            codebase,
                            outfile,
                            hypo_lat=hypo_lat,
                            hypo_lon=hypo_lon)
Пример #24
0
def load_all_data(comp_label, input_indir, input_obsdir, combined_file,
                  temp_dir, component):
    """
    This function loads all data from each station file
    and creates the structures needed for plotting.
    """
    data = {}

    # Get realizations
    realizations = sorted(os.listdir(input_indir))
    one_realization = realizations[0]
    basedir = os.path.join(input_indir, one_realization)

    # Get the GMPE data for the RZZ2015 metrics
    base_outdir = os.path.join(input_obsdir, one_realization, "validations",
                               "rzz2015_gmpe")
    a_rzz2015_gmpe = glob.glob("%s%s%s.rzz2015gmpe.txt" %
                               (base_outdir, os.sep, one_realization))
    a_rzz2015_gmpe = a_rzz2015_gmpe[0]
    # Get the station list
    a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep))
    if len(a_statfile) != 1:
        raise bband_utils.ProcessingError("Cannot get station list!")
    a_statfile = a_statfile[0]
    slo = StationList(a_statfile)
    site_list = slo.getStationList()

    # Get source file
    a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep))
    if len(a_srcfile) != 1:
        raise bband_utils.ProcessingError("Cannot get src file!")
    a_srcfile = a_srcfile[0]

    # Parse it!
    src_keys = bband_utils.parse_src_file(a_srcfile)

    # Go through all stations
    for site in site_list:
        slon = float(site.lon)
        slat = float(site.lat)
        stat = site.scode

        # Calculate Rrup
        origin = (src_keys['lon_top_center'], src_keys['lat_top_center'])
        dims = (src_keys['fault_length'], src_keys['dlen'],
                src_keys['fault_width'], src_keys['dwid'],
                src_keys['depth_to_top'])
        mech = (src_keys['strike'], src_keys['dip'], src_keys['rake'])

        site_geom = [float(site.lon), float(site.lat), 0.0]
        (fault_trace1, up_seis_depth, low_seis_depth, ave_dip, dummy1,
         dummy2) = putils.FaultTraceGen(origin, dims, mech)
        _, rrup, _ = putils.DistanceToSimpleFaultSurface(
            site_geom, fault_trace1, up_seis_depth, low_seis_depth, ave_dip)

        # Read data for this station
        data_file = os.path.join(temp_dir, "%s.rzz2015" % (stat))

        data[stat] = {}
        data[stat]["dist"] = rrup
        data[stat]["r1"] = []
        data[stat]["r2"] = []
        data[stat]["r3"] = []
        data[stat]["r4"] = []
        data[stat]["r5"] = []
        data[stat]["r1_obs"] = None
        data[stat]["r2_obs"] = None
        data[stat]["r3_obs"] = None
        data[stat]["r4_obs"] = None
        data[stat]["r5_obs"] = None
        data[stat]["r1_gmpe"] = None
        data[stat]["r2_gmpe"] = None
        data[stat]["r3_gmpe"] = None
        data[stat]["r4_gmpe"] = None
        data[stat]["r5_gmpe"] = None

        in_file = open(data_file, 'r')
        for line in in_file:
            line = line.strip()
            if line.startswith("#"):
                # Skip comments
                continue
            pieces = line.split(",")
            comp = pieces[1].strip()
            # Check if we want this component
            if component != "both":
                if comp != component:
                    # Skip
                    continue
            # We want this data point
            pieces = pieces[2:]
            pieces = [float(piece) for piece in pieces]
            # Get observation values
            if data[stat]["r1_obs"] is None:
                data[stat]["r1_obs"] = pieces[6]
            if data[stat]["r2_obs"] is None:
                data[stat]["r2_obs"] = pieces[8]
            if data[stat]["r3_obs"] is None:
                data[stat]["r3_obs"] = pieces[10]
            if data[stat]["r4_obs"] is None:
                data[stat]["r4_obs"] = pieces[12]
            if data[stat]["r5_obs"] is None:
                data[stat]["r5_obs"] = pieces[14]
            # Get simulated data values
            data[stat]["r1"].append(pieces[7])
            data[stat]["r2"].append(pieces[9])
            data[stat]["r3"].append(pieces[11])
            data[stat]["r4"].append(pieces[13])
            data[stat]["r5"].append(pieces[15])
        in_file.close()

    gmpe_file = open(a_rzz2015_gmpe, 'r')
    for line in gmpe_file:
        line = line.strip()
        # Skip comments
        if line.startswith("#"):
            continue
        pieces = line.split(",")
        stat = pieces[0].strip()
        pieces = pieces[1:]
        pieces = [float(piece.strip()) for piece in pieces]
        data[stat]["r1_gmpe"] = pieces[2]
        data[stat]["r2_gmpe"] = pieces[3]
        data[stat]["r3_gmpe"] = pieces[2] / pieces[3]
        data[stat]["r4_gmpe"] = pieces[5]
        data[stat]["r5_gmpe"] = pieces[6]
    gmpe_file.close()

    # Return all data
    return data
Пример #25
0
    def read_srf(self, srffile):
        if os.path.exists(srffile):
            srff = open(srffile, 'r')
        else:
            raise bband_utils.ParameterError("Missing SRF file (%s)!" %
                                             (srffile))

        # Check number of planes
        version = float(srff.readline().strip().split()[0])
        tokens = srff.readline().strip().split()
        # Make sure we have a valid SRF file
        if len(tokens) != 2:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srffile))
        planes = int(tokens[1])
        # Make sure we have only 1 plane
        if planes > 1:
            raise bband_utils.ProcessingError("Only one plane is supported!" +
                                              " Found %d planes in SRF file!" %
                                              (planes))

        # Read Fault Data
        tokens = srff.readline().strip().split()
        if len(tokens) != 6:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srffile))
        self.f_len = float(tokens[4])
        self.f_width = float(tokens[5])
        tokens = srff.readline().strip().split()
        if len(tokens) != 5:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srffile))
        self.f_strike = float(tokens[0])
        self.f_dip = float(tokens[1])
        self.f_depth = float(tokens[2])

        tokens = srff.readline().strip().split()
        if len(tokens) != 2:
            raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                              (srffile))
        n_cels = int(tokens[1])
        # print ("F_len: %f, F_width: %f, F_Strike: %f" %
        #        (self.f_len, self.f_width, self.f_strike))
        # print ("F_dip: %f, F_depth: %f, n_cells: %d" %
        #        (self.f_dip, self.f_depth, n_cels))
        M0 = 0.0
        lon = []
        lat = []
        dep = []
        tinit = []
        rake = []
        for i in range(0, n_cels):
            tokens = srff.readline().strip().split()
            if version == 1.0 and len(tokens) != 8:
                raise bband_utils.ProcessingError("Invalid SRF version 1 "
                                                  "file (%s)!" % (srffile))
            if version == 2.0 and len(tokens) != 10:
                raise bband_utils.ProcessingError("Invalid SRF version 2 "
                                                  "file (%s)!" % (srffile))
            lon.append(float(tokens[0]))
            lat.append(float(tokens[1]))
            dep.append(float(tokens[2]))
            area = float(tokens[5])
            tinit.append(float(tokens[6]))
            tokens = srff.readline().strip().split()
            if len(tokens) != 7:
                raise bband_utils.ProcessingError("Invalid SRF file (%s)!" %
                                                  (srffile))
            rake.append(float(tokens[0]))
            slip1 = float(tokens[1])
            slip2 = float(tokens[3])
            slip3 = float(tokens[5])
            nt1 = float(tokens[2])
            nt2 = float(tokens[4])
            nt3 = float(tokens[6])
            if nt1 > 0:
                for k in range(0, int(math.ceil(nt1 / 6.0))):
                    token = srff.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srffile))
            if nt2 > 0:
                for k in range(0, int(math.ceil(nt2 / 6.0))):
                    token = srff.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srffile))
            if nt3 > 0:
                for k in range(0, int(math.ceil(nt3 / 6.0))):
                    token = srff.readline()
                    if token == "":
                        raise bband_utils.ProcessingError("Invalid SRF "
                                                          "file (%s)!" %
                                                          (srffile))

            M0 = M0 + (area * (math.sqrt(slip1**2 + slip2**2 + slip3**2)) *
                       3) * (10**11)
        srff.close()

        np_tinit = np.array(tinit)
        tinit_index = []
        [tinit_index] = np.nonzero(np_tinit == np.min(np_tinit))

        # Find hypocenter
        hyp = []
        hyp.append(lon[tinit_index[0]])
        hyp.append(lat[tinit_index[0]])
        hyp.append(dep[tinit_index[0]])
        self.hyp = hyp

        # Find mw
        mw = (math.log10(M0)) / 1.5 - 10.73
        self.mw = mw

        # Find rake (i.e. mechanism)
        rake_ave = np.mean(np.array(rake))
        self.rake = rake_ave
        if rake_ave > 45 and rake_ave < 135:
            mecha = 'rs'
        if rake_ave >= 135 and rake_ave < 225:
            mecha = 'ss'
        if rake_ave >= 225 and rake_ave < 315:
            mecha = 'ns'
        if rake_ave <= 45 or rake_ave >= 315:
            mecha = 'ss'
        self.mecha = mecha
        # print "mw: %f, rake_ave: %f, mecha:%s"%(mw, rake_ave, mecha)

        if self.extended == 'y':
            # Find fault corners
            i = []
            j = []
            k = []
            np_dep = np.array(dep)
            # print np.min(np_dep), len(np_dep)
            [i] = np.nonzero(np_dep == min(dep))

            if len(i) < 1:
                raise bband_utils.ProcessingError("Invalid SRF file: %s\n" %
                                                  (srffile) +
                                                  "len(i)=%d Failed to " %
                                                  len(i) + "calculate " +
                                                  "extended fault data!")
            np_lon = np.array(lon)
            [j] = np.nonzero(np_lon[i] == np.min(np_lon[i]))

            if len(j) < 1:
                raise bband_utils.ProcessingError("Invalid SRF file: %s\n" %
                                                  (srffile) +
                                                  "len(j)=%d Failed to " %
                                                  len(j) + "calculate " +
                                                  "extended fault data!")
            np_lat = np.array(lat)
            [k] = np.nonzero(np_lat[j] == np.min(np_lat[j]))

            if len(k) == 1:
                lat_index = k[0]
                self.corn = [lon[lat_index], lat[lat_index]]
                # print "Corner:", self.corn
            else:
                raise bband_utils.ProcessingError("Invalid SRF file: %s\n" %
                                                  (srffile) +
                                                  "len(k)=%d Failed to " %
                                                  len(k) + "calculate " +
                                                  "extended fault data!")

            self.fault_maxlon = np.max(np_lon)
            self.fault_minlon = np.min(np_lon)
            self.fault_maxlat = np.max(np_lat)
            self.fault_minlat = np.min(np_lat)
            #print ("fault_maxlon: %f, fault_minlon: %f, fault_maxlat: %f, " %
            #       (self.fault_maxlon,self.fault_minlon,self.fault_maxlat) +
            #       "fault_minlat: %f" % (self.fault_minlat))
            # par.mw = mw; par.mecha = mecha;
        return 0