예제 #1
0
    def parse_spectra(self):
        """
        Parses the response spectra - 5 % damping is assumed
        """
        sm_record = OrderedDict([("X", {
            "Scalar": {},
            "Spectra": {
                "Response": {}
            }
        }), ("Y", {
            "Scalar": {},
            "Spectra": {
                "Response": {}
            }
        }), ("V", {
            "Scalar": {},
            "Spectra": {
                "Response": {}
            }
        })])
        target_names = list(sm_record)
        for iloc, ifile in enumerate(self.input_files):
            if not os.path.exists(ifile):
                continue
            metadata = _get_metadata_from_file(ifile)
            data = np.genfromtxt(ifile, skip_header=64)

            units = metadata["UNITS"]
            if "s^2" in units:
                units = units.replace("s^2", "s/s")
            pga = convert_accel_units(
                _to_float(metadata["PGA_" + metadata["UNITS"].upper()]), units)
            periods = data[:, 0]
            s_a = convert_accel_units(data[:, 1], units)

            sm_record[target_names[iloc]]["Spectra"]["Response"] = {
                "Periods": periods,
                "Number Periods": len(periods),
                "Acceleration": {
                    "Units": "cm/s/s"
                },
                "Velocity": None,
                "Displacement": None,
                "PSA": None,
                "PSV": None
            }
            sm_record[target_names[iloc]]["Spectra"]["Response"]\
                     ["Acceleration"]["damping_05"] = s_a
            # If the displacement file exists - get the data from that directly
            sd_file = ifile.replace("SA.ASC", "SD.ASC")
            if os.path.exists(sd_file):
                # SD data
                sd_data = np.genfromtxt(sd_file, skip_header=64)
                # Units should be cm
                sm_record[target_names[iloc]]["Spectra"]["Response"]\
                    ["Displacement"] = {"damping_05": sd_data[:, 1],
                                        "Units": "cm"}
        return sm_record
예제 #2
0
    def build_time_series_hdf5(self, record, sm_data, record_dir):
        """
        Constructs the hdf5 file for storing the strong motion record
        :param record:
            Strong motion record as instance of :class: GroundMotionRecord
        :param dict sm_data:
            Data dictionary for the strong motion record
        :param str record_dir:
            Directory in which to save the record
        """
        output_file = os.path.join(record_dir, record.id + ".hdf5")
        fle = h5py.File(output_file, "w-")
        grp = fle.create_group("Time Series")
        for key in sm_data.keys():
            if not sm_data[key]["Original"]:
                continue
            grp_comp = grp.create_group(key)
            grp_orig = grp_comp.create_group("Original Record")
            for attribute in self.TS_ATTRIBUTE_LIST:
                if attribute in sm_data[key]["Original"].keys():
                    grp_orig.attrs[attribute] =\
                        sm_data[key]["Original"][attribute]
            ts_dset = grp_orig.create_dataset(
                "Acceleration", (sm_data[key]["Original"]["Number Steps"], ),
                dtype="f")
            ts_dset.attrs["Units"] = "cm/s/s"
            time_step = sm_data[key]["Original"]["Time-step"]
            ts_dset.attrs["Time-step"] = time_step
            number_steps = sm_data[key]["Original"]["Number Steps"]
            ts_dset.attrs["Number Steps"] = number_steps
            ts_dset.attrs["PGA"] = utils.convert_accel_units(
                sm_data[key]["Original"]["PGA"],
                sm_data[key]["Original"]["Units"])
            # Store acceleration as cm/s/s
            ts_dset[:] = utils.convert_accel_units(
                sm_data[key]["Original"]["Acceleration"],
                sm_data[key]["Original"]["Units"])
            # Get velocity and displacement
            vel, dis = utils.get_velocity_displacement(time_step, ts_dset[:],
                                                       "cm/s/s")
            # Build velocity data set
            v_dset = grp_orig.create_dataset("Velocity", (number_steps, ),
                                             dtype="f")
            v_dset.attrs["Units"] = "cm/s"
            v_dset.attrs["Time-step"] = time_step
            v_dset.attrs["Number Steps"] = number_steps
            v_dset[:] = vel
            # Build displacement data set
            d_dset = grp_orig.create_dataset("Displacement", (number_steps, ),
                                             dtype="f")
            d_dset.attrs["Units"] = "cm"
            d_dset.attrs["Time-step"] = time_step
            d_dset.attrs["Number Steps"] = number_steps
            d_dset[:] = dis

        # Get the velocity and displacement time series and build scalar IMS

        return fle, output_file
예제 #3
0
    def parse_row(cls, rowdict, sa_colnames):
        '''This method is intended to be overridden by subclasses (by default
        is no-op) to perform any further operation on the given csv row
        `rowdict` before writing it to the GM databse file. Please note that:

        1. This method should process `rowdict` in place, the returned value is
           ignored. Any exception raised here is hanlded in the caller method.
        2. `rowdict` keys might not be the same as the csv
           field names (first csv row). See `mappings` class attribute
        3. The values of `rowdict` are all strings and they will be casted
           later according to the column type. However, if a cast is needed
           here for some custom operation, in order to convert strings to
           floats or timestamps (floats denoting date-times) you can use the
           static methods `timestamp` and `float`. Both methods accept also
           lists or tuples to convert arrays and silenttly coerce unparsable
           values to nan (Note that nan represents a missing value for any
           numeric or timestamp column).
        4. the `rowdict` keys 'event_id', 'station_id' and 'record_id' are
           reserved and their values will be overridden anyway

        :param rowdict: a row of the csv flatfile, as Python dict

        :param sa_colnames: a list of strings of the column
            names denoting the SA values. The list is sorted ascending
            according to the relative numeric period defined in
            :method:`get_sa_periods`
        '''
        # replace non lower case keys with their lower case counterpart:
        for key in cls._non_lcase_fieldnames:
            rowdict[key.lower()] = rowdict.pop(key)

        # assign values (sa, event time, pga):
        tofloat = cls.float
        sa_ = tofloat([rowdict[p] for p in sa_colnames])
        sa_unit = rowdict[cls._acc_unit_col] if cls._acc_unit_col else 'g'
        sa_ = convert_accel_units(sa_, sa_unit)
        rowdict['sa'] = sa_

        # assign event time:
        evtime_fieldnames = cls._evtime_fieldnames
        dtime = ""
        if len(evtime_fieldnames) == 6:
            # use zfill to account for '934' formatted as '0934' for years,
            # and '5' formatted as '05' for all other fields:
            dtime = "{}-{}-{}T{}:{}:{}".\
                format(*(rowdict[c].zfill(4 if i == 0 else 2)
                       for i, c in enumerate(evtime_fieldnames)))
        else:
            dtime = rowdict[evtime_fieldnames[0]]
        rowdict['event_time'] = cls.timestamp(dtime)

        # assign pga:
        pga_col, pga_unit = cls._pga_col, cls._pga_unit
        if not pga_unit:
            pga_unit = cls._acc_unit_col if cls._acc_unit_col else 'g'
        rowdict['pga'] = \
            convert_accel_units(tofloat(rowdict[pga_col]), pga_unit)
예제 #4
0
    def parse_row(cls, rowdict, sa_colnames):
        '''This method is intended to be overridden by subclasses (by default
        is no-op) to perform any further operation on the given csv row
        `rowdict` before writing it to the GM databse file. Please note that:

        1. This method should process `rowdict` in place, the returned value is
           ignored. Any exception raised here is hanlded in the caller method.
        2. `rowdict` keys might not be the same as the csv
           field names (first csv row). See `mappings` class attribute
        3. The values of `rowdict` are all strings and they will be casted
           later according to the column type. However, if a cast is needed
           here for some custom operation, in order to convert strings to
           floats or timestamps (floats denoting date-times) you can use the
           static methods `timestamp` and `float`. Both methods accept also
           lists or tuples to convert arrays and silenttly coerce unparsable
           values to nan (Note that nan represents a missing value for any
           numeric or timestamp column).
        4. the `rowdict` keys 'event_id', 'station_id' and 'record_id' are
           reserved and their values will be overridden anyway

        :param rowdict: a row of the csv flatfile, as Python dict

        :param sa_colnames: a list of strings of the column
            names denoting the SA values. The list is sorted ascending
            according to the relative numeric period defined in
            :method:`get_sa_periods`
        '''
        # replace non lower case keys with their lower case counterpart:
        for key in cls._non_lcase_fieldnames:
            rowdict[key.lower()] = rowdict.pop(key)

        # assign values (sa, event time, pga):
        tofloat = cls.float
        sa_ = tofloat([rowdict[p] for p in sa_colnames])
        sa_unit = rowdict[cls._acc_unit_col] if cls._acc_unit_col else 'g'
        sa_ = convert_accel_units(sa_, sa_unit)
        rowdict['sa'] = sa_

        # assign event time:
        evtime_fieldnames = cls._evtime_fieldnames
        dtime = ""
        if len(evtime_fieldnames) == 6:
            # use zfill to account for '934' formatted as '0934' for years,
            # and '5' formatted as '05' for all other fields:
            dtime = "{}-{}-{}T{}:{}:{}".\
                format(*(rowdict[c].zfill(4 if i == 0 else 2)
                       for i, c in enumerate(evtime_fieldnames)))
        else:
            dtime = rowdict[evtime_fieldnames[0]]
        rowdict['event_time'] = cls.timestamp(dtime)

        # assign pga:
        pga_col, pga_unit = cls._pga_col, cls._pga_unit
        if not pga_unit:
            pga_unit = cls._acc_unit_col if cls._acc_unit_col else 'g'
        rowdict['pga'] = \
            convert_accel_units(tofloat(rowdict[pga_col]), pga_unit)
예제 #5
0
    def build_time_series_hdf5(self, record, sm_data, record_dir):
        """
        Constructs the hdf5 file for storing the strong motion record
        :param record:
            Strong motion record as instance of :class: GroundMotionRecord
        :param dict sm_data:
            Data dictionary for the strong motion record
        :param str record_dir:
            Directory in which to save the record
        """
        output_file = os.path.join(record_dir, record.id + ".hdf5")
        fle = h5py.File(output_file, "w-")
        grp = fle.create_group("Time Series")
        for key in sm_data.keys():
            if not sm_data[key]["Original"]:
                continue
            grp_comp = grp.create_group(key)
            grp_orig = grp_comp.create_group("Original Record")
            for attribute in self.TS_ATTRIBUTE_LIST:
                if attribute in sm_data[key]["Original"].keys():
                    grp_orig.attrs[attribute] = sm_data[key]["Original"][attribute]
            ts_dset = grp_orig.create_dataset("Acceleration", (sm_data[key]["Original"]["Number Steps"],), dtype="f")
            ts_dset.attrs["Units"] = "cm/s/s"
            time_step = sm_data[key]["Original"]["Time-step"]
            ts_dset.attrs["Time-step"] = time_step
            number_steps = sm_data[key]["Original"]["Number Steps"]
            ts_dset.attrs["Number Steps"] = number_steps
            ts_dset.attrs["PGA"] = utils.convert_accel_units(
                sm_data[key]["Original"]["PGA"], sm_data[key]["Original"]["Units"]
            )
            # Store acceleration as cm/s/s
            ts_dset[:] = utils.convert_accel_units(
                sm_data[key]["Original"]["Acceleration"], sm_data[key]["Original"]["Units"]
            )
            # Get velocity and displacement
            vel, dis = utils.get_velocity_displacement(time_step, ts_dset[:], "cm/s/s")
            # Build velocity data set
            v_dset = grp_orig.create_dataset("Velocity", (number_steps,), dtype="f")
            v_dset.attrs["Units"] = "cm/s"
            v_dset.attrs["Time-step"] = time_step
            v_dset.attrs["Number Steps"] = number_steps
            v_dset[:] = vel
            # Build displacement data set
            d_dset = grp_orig.create_dataset("Displacement", (number_steps,), dtype="f")
            d_dset.attrs["Units"] = "cm"
            d_dset.attrs["Time-step"] = time_step
            d_dset.attrs["Number Steps"] = number_steps
            d_dset[:] = dis

        # Get the velocity and displacement time series and build scalar IMS

        return fle, output_file
예제 #6
0
  def _build_spectra_hdf5_from_row(self, output_file, row, periods,
                                   scalar_fields, spectra_fields, component,
                                   damping, units):
      """
 
      """
      fle = h5py.File(output_file, "w-")
      ts_grp = fle.create_group("Time Series")
      ims_grp = fle.create_group("IMS")
      h_grp = ims_grp.create_group("H")
      scalar_grp = h_grp.create_group("Scalar")
      # Create Scalar values
      for f_attr, imt in scalar_fields:
          dset = scalar_grp.create_dataset(imt, (1,), dtype="f")
          dset.attrs["Component"] = component
          input_units = re.search('\((.*?)\)', f_attr).group(1)
          if imt == "PGA":
              # Convert acceleration from reported units to cm/s/s
              dset.attrs["Units"] = "cm/s/s"
              dset[:] = utils.convert_accel_units(get_float(row[f_attr]),
                                                  input_units)
          else:
              # For other values take direct from spreadsheet
              # Units should be given in parenthesis from fieldname 
              dset.attrs["Units"] = input_units
              dset[:] = get_float(row[f_attr])
              
      spectra_grp = h_grp.create_group("Spectra")
      rsp_grp = spectra_grp.create_group("Response")
      # Setup periods dataset
      per_dset = rsp_grp.create_dataset("Periods",
                                        (len(periods),),
                                        dtype="f")
      per_dset.attrs["High Period"] = np.max(periods)
      per_dset.attrs["Low Period"] = np.min(periods)
      per_dset.attrs["Number Periods"] = len(periods)
      per_dset[:] = periods
      # Get response spectra
      spectra = np.array([get_float(row[f_attr])
                          for f_attr in spectra_fields])
      acc_grp = rsp_grp.create_group("Acceleration")
      comp_grp = acc_grp.create_group(component)
      spectra_dset = comp_grp.create_dataset("damping_{:s}".format(damping),
                                             (len(spectra),),
                                             dtype="f")
      spectra_dset.attrs["Units"] = "cm/s/s"
      spectra_dset[:] = utils.convert_accel_units(spectra, units)
      fle.close()
예제 #7
0
    def __init__(self,
                 acceleration,
                 time_step,
                 periods,
                 damping=0.05,
                 units="cm/s/s"):
        '''
        Setup the response spectrum calculator
        :param numpy.ndarray time_hist:
            Acceleration time history [Time, Acceleration]
        :param numpy.ndarray periods:
            Spectral periods (s) for calculation
        :param float damping:
            Fractional coefficient of damping
        :param str units:
            Units of the acceleration time history {"g", "m/s", "cm/s/s"}

        '''
        self.periods = periods
        self.num_per = len(periods)
        self.acceleration = convert_accel_units(acceleration, units)
        self.damping = damping
        self.d_t = time_step
        self.velocity, self.displacement = get_velocity_displacement(
            self.d_t, self.acceleration)
        self.num_steps = len(self.acceleration)
        self.omega = (2. * np.pi) / self.periods
        self.response_spectrum = None
 def _parse_time_history(self, ifile):
     """
     Parses the time history
     """
     # Build the metadata dictionary again
     metadata = _get_metadata_from_file(ifile)
     self.number_steps = _to_int(metadata["NDATA"])
     self.time_step = _to_float(metadata["SAMPLING_INTERVAL_S"])
     self.units = metadata["UNITS"]
     # Get acceleration data
     accel = np.genfromtxt(ifile, skip_header=64)
     if "DIS" in ifile:
         pga = None
         pgd = np.fabs(_to_float(metadata["PGD_" +
                       metadata["UNITS"].upper()]))
     else:
         pga = np.fabs(_to_float(
                       metadata["PGA_" + metadata["UNITS"].upper()]))
         pgd = None
         if "s^2" in self.units:
             self.units = self.units.replace("s^2", "s/s")
     
     output = {
         # Although the data will be converted to cm/s/s internally we can
         # do it here too
         "Acceleration": convert_accel_units(accel, self.units),
         "Time": get_time_vector(self.time_step, self.number_steps),
         "Time-step": self.time_step,
         "Number Steps": self.number_steps,
         "Units": self.units,
         "PGA": pga,
         "PGD": pgd
     }
     return output
예제 #9
0
    def _parse_time_history(self, ifile):
        """
        Parses the time history
        """
        # Build the metadata dictionary again
        metadata = _get_metadata_from_file(ifile)
        self.number_steps = _to_int(metadata["NDATA"])
        self.time_step = _to_float(metadata["SAMPLING_INTERVAL_S"])
        self.units = metadata["UNITS"]
        # Get acceleration data
        accel = np.genfromtxt(ifile, skip_header=64)
        if "DIS" in ifile:
            pga = None
            pgd = np.fabs(
                _to_float(metadata["PGD_" + metadata["UNITS"].upper()]))
        else:
            pga = np.fabs(
                _to_float(metadata["PGA_" + metadata["UNITS"].upper()]))
            pgd = None
            if "s^2" in self.units:
                self.units = self.units.replace("s^2", "s/s")

        output = {
            # Although the data will be converted to cm/s/s internally we can
            # do it here too
            "Acceleration": convert_accel_units(accel, self.units),
            "Time": get_time_vector(self.time_step, self.number_steps),
            "Time-step": self.time_step,
            "Number Steps": self.number_steps,
            "Units": self.units,
            "PGA": pga,
            "PGD": pgd
        }
        return output
예제 #10
0
    def _build_spectra_hdf5_from_row(self, output_file, row, periods,
                                     scalar_fields, spectra_fields, component,
                                     damping, units):
        """
   
        """
        fle = h5py.File(output_file, "w-")
        ts_grp = fle.create_group("Time Series")
        ims_grp = fle.create_group("IMS")
        h_grp = ims_grp.create_group("H")
        scalar_grp = h_grp.create_group("Scalar")
        # Create Scalar values
        for f_attr, imt in scalar_fields:
            dset = scalar_grp.create_dataset(imt, (1, ), dtype="f")
            dset.attrs["Component"] = component
            input_units = re.search('\((.*?)\)', f_attr).group(1)
            if imt == "PGA":
                # Convert acceleration from reported units to cm/s/s
                dset.attrs["Units"] = "cm/s/s"
                dset[:] = utils.convert_accel_units(get_float(row[f_attr]),
                                                    input_units)
            else:
                # For other values take direct from spreadsheet
                # Units should be given in parenthesis from fieldname
                dset.attrs["Units"] = input_units
                dset[:] = get_float(row[f_attr])

        spectra_grp = h_grp.create_group("Spectra")
        rsp_grp = spectra_grp.create_group("Response")
        # Setup periods dataset
        per_dset = rsp_grp.create_dataset("Periods", (len(periods), ),
                                          dtype="f")
        per_dset.attrs["High Period"] = np.max(periods)
        per_dset.attrs["Low Period"] = np.min(periods)
        per_dset.attrs["Number Periods"] = len(periods)
        per_dset[:] = periods
        # Get response spectra
        spectra = np.array(
            [get_float(row[f_attr]) for f_attr in spectra_fields])
        acc_grp = rsp_grp.create_group("Acceleration")
        comp_grp = acc_grp.create_group(component)
        spectra_dset = comp_grp.create_dataset("damping_{:s}".format(damping),
                                               (len(spectra), ),
                                               dtype="f")
        spectra_dset.attrs["Units"] = "cm/s/s"
        spectra_dset[:] = utils.convert_accel_units(spectra, units)
        fle.close()
    def parse_spectra(self):
        """
        Parses the response spectra - 5 % damping is assumed
        """
        sm_record = OrderedDict([
            ("X", {"Scalar": {}, "Spectra": {"Response": {}}}), 
            ("Y", {"Scalar": {}, "Spectra": {"Response": {}}}), 
            ("V", {"Scalar": {}, "Spectra": {"Response": {}}})])
        target_names = list(sm_record)
        for iloc, ifile in enumerate(self.input_files):
            if not os.path.exists(ifile):
                continue
            metadata = _get_metadata_from_file(ifile)
            data = np.genfromtxt(ifile, skip_header=64)

            units = metadata["UNITS"]
            if "s^2" in units:
                units = units.replace("s^2", "s/s")
            pga = convert_accel_units(
                _to_float(metadata["PGA_" + metadata["UNITS"].upper()]),
                units)
            periods = data[:, 0]
            s_a = convert_accel_units(data[:, 1], units)
                                      
            sm_record[target_names[iloc]]["Spectra"]["Response"] = { 
                "Periods": periods,
                "Number Periods" : len(periods),
                "Acceleration" : {"Units": "cm/s/s"},
                "Velocity" : None,
                "Displacement" : None,
                "PSA" : None,
                "PSV" : None}
            sm_record[target_names[iloc]]["Spectra"]["Response"]\
                     ["Acceleration"]["damping_05"] = s_a
            # If the displacement file exists - get the data from that directly
            sd_file = ifile.replace("SA.ASC", "SD.ASC")
            if os.path.exists(sd_file):
                # SD data
                sd_data = np.genfromtxt(sd_file, skip_header=64)
                # Units should be cm
                sm_record[target_names[iloc]]["Spectra"]["Response"]\
                    ["Displacement"] = {"damping_05": sd_data[:, 1],
                                        "Units": "cm"}
        return sm_record
예제 #12
0
def plot_time_series(acceleration,
                     time_step,
                     velocity=[],
                     displacement=[],
                     units="cm/s/s",
                     figure_size=(8, 6),
                     filename=None,
                     filetype="png",
                     dpi=300,
                     linewidth=1.5):
    """
    Creates a plot of acceleration, velocity and displacement for a specific
    ground motion record
    """
    acceleration = convert_accel_units(acceleration, units)
    accel_time = get_time_vector(time_step, len(acceleration))
    if not len(velocity):
        velocity, dspl = get_velocity_displacement(time_step, acceleration)
    vel_time = get_time_vector(time_step, len(velocity))
    if not len(displacement):
        displacement = dspl
    disp_time = get_time_vector(time_step, len(displacement))
    fig = plt.figure(figsize=figure_size)
    fig.set_tight_layout(True)
    ax = plt.subplot(3, 1, 1)
    # Accleration
    ax.plot(accel_time, acceleration, 'k-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Acceleration (cm/s/s)", fontsize=12)
    end_time = np.max(np.array([accel_time[-1], vel_time[-1], disp_time[-1]]))
    pga = np.max(np.fabs(acceleration))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pga, 1.1 * pga)
    ax.grid()
    # Velocity
    ax = plt.subplot(3, 1, 2)
    ax.plot(vel_time, velocity, 'b-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Velocity (cm/s)", fontsize=12)
    pgv = np.max(np.fabs(velocity))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pgv, 1.1 * pgv)
    ax.grid()
    # Displacement
    ax = plt.subplot(3, 1, 3)
    ax.plot(disp_time, displacement, 'r-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Displacement (cm)", fontsize=12)
    pgd = np.max(np.fabs(displacement))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pgd, 1.1 * pgd)
    ax.grid()
    _save_image(filename, filetype, dpi)
    plt.show()
 def _parse_time_history(self, ifile):
     """
     Parses the time history from the file and returns a dictionary of
     time-series properties
     """
     output = {}
     accel = np.genfromtxt(ifile, skip_header=1)
     output["Acceleration"] = convert_accel_units(accel, self.units)
     nvals, time_step = (getline(ifile, 1).rstrip("\n")).split()
     output["Time-step"] = float(time_step)
     output["Number Steps"] = int(nvals)
     output["Units"] = "cm/s/s"
     output["PGA"] = np.max(np.fabs(output["Acceleration"]))
     return output
예제 #14
0
 def _parse_time_history(self, ifile, units="cm/s/s"):
     """
     Parses the time history from the file and returns a dictionary of
     time-series properties
     """
     output = {}
     accel = np.genfromtxt(ifile, skip_header=1)
     output["Acceleration"] = convert_accel_units(accel, self.units)
     nvals, time_step = (getline(ifile, 1).rstrip("\n")).split()
     output["Time-step"] = float(time_step)
     output["Number Steps"] = int(nvals)
     output["Units"] = units
     output["PGA"] = np.max(np.fabs(output["Acceleration"]))
     return output
예제 #15
0
def plot_time_series(acceleration, time_step, velocity=[], displacement=[],
        units="cm/s/s", figure_size=(8, 6), filename=None, filetype="png",
        dpi=300, linewidth=1.5):
    """
    Creates a plot of acceleration, velocity and displacement for a specific
    ground motion record
    """
    acceleration = convert_accel_units(acceleration, units)
    accel_time = get_time_vector(time_step, len(acceleration))
    if not len(velocity):
        velocity, dspl = get_velocity_displacement(time_step, acceleration)
    vel_time = get_time_vector(time_step, len(velocity))
    if not len(displacement):
        displacement = dspl
    disp_time = get_time_vector(time_step, len(displacement))
    fig = plt.figure(figsize=figure_size)
    fig.set_tight_layout(True)
    ax = plt.subplot(3, 1, 1)
    # Accleration
    ax.plot(accel_time, acceleration, 'k-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Acceleration (cm/s/s)", fontsize=12)
    end_time = np.max(np.array([accel_time[-1], vel_time[-1], disp_time[-1]]))
    pga = np.max(np.fabs(acceleration))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pga, 1.1 * pga)
    ax.grid()
    # Velocity
    ax = plt.subplot(3, 1, 2)
    ax.plot(vel_time, velocity, 'b-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Velocity (cm/s)", fontsize=12)
    pgv = np.max(np.fabs(velocity))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pgv, 1.1 * pgv)
    ax.grid()
    # Displacement
    ax = plt.subplot(3, 1, 3)
    ax.plot(disp_time, displacement, 'r-', linewidth=linewidth)
    ax.set_xlabel("Time (s)", fontsize=12)
    ax.set_ylabel("Displacement (cm)", fontsize=12)
    pgd = np.max(np.fabs(displacement))
    ax.set_xlim(0, end_time)
    ax.set_ylim(-1.1 * pgd, 1.1 * pgd)
    ax.grid()
    _save_image(filename, filetype, dpi)
    plt.show()
예제 #16
0
    def __init__(self, acceleration, time_step, periods, damping=0.05,
            units="cm/s/s"):
        '''
        Setup the response spectrum calculator
        :param numpy.ndarray time_hist:
            Acceleration time history [Time, Acceleration]
        :param numpy.ndarray periods:
            Spectral periods (s) for calculation
        :param float damping:
            Fractional coefficient of damping
        :param str units:
            Units of the acceleration time history {"g", "m/s", "cm/s/s"}

        '''
        self.periods = periods
        self.num_per = len(periods)
        self.acceleration = convert_accel_units(acceleration, units)
        self.damping = damping
        self.d_t = time_step
        self.velocity, self.displacement = get_velocity_displacement(
            self.d_t, self.acceleration)
        self.num_steps = len(self.acceleration)
        self.omega = (2. * np.pi) / self.periods
        self.response_spectrum = None
예제 #17
0
    def get_residuals(self,
                      ctx_database,
                      nodal_plane_index=1,
                      component="Geometric",
                      normalise=True):
        """
        Calculate the residuals for a set of ground motion records

        :param ctx_database: a :class:`context_db.ContextDB`, i.e. a database of
            records capable of returning dicts of earthquake-based Contexts and
            observed IMTs.
            See e.g., :class:`smtk.sm_database.GroundMotionDatabase` for an
            example
        """

        contexts = ctx_database.get_contexts(nodal_plane_index, self.imts,
                                             component)

        # Fetch now outside the loop for efficiency the IMTs which need
        # acceleration units conversion from cm/s/s to g. Conversion will be
        # done inside the loop:
        accel_imts = tuple(
            [imtx for imtx in self.imts if (imtx == "PGA" or "SA(" in imtx)])

        # Contexts is in either case a list of dictionaries
        self.contexts = []
        for context in contexts:

            # convert all IMTS with acceleration units, which are supposed to
            # be in cm/s/s, to g:
            for a_imt in accel_imts:
                context['Observations'][a_imt] = \
                    convert_accel_units(context['Observations'][a_imt],
                                        'cm/s/s', 'g')

            # Get the expected ground motions
            context = self.get_expected_motions(context)
            context = self.calculate_residuals(context, normalise)
            for gmpe in self.residuals.keys():
                for imtx in self.residuals[gmpe].keys():
                    if not context["Residual"][gmpe][imtx]:
                        continue
                    for res_type in self.residuals[gmpe][imtx].keys():
                        if res_type == "Inter event":
                            inter_ev = \
                                context["Residual"][gmpe][imtx][res_type]
                            if np.all(
                                    np.fabs(inter_ev - inter_ev[0]) < 1.0E-12):
                                # Single inter-event residual
                                self.residuals[gmpe][imtx][res_type].append(
                                    inter_ev[0])
                                # Append indices
                                self.unique_indices[gmpe][imtx].append(
                                    np.array([0]))
                            else:
                                # Inter event residuals per-site e.g. Chiou
                                # & Youngs (2008; 2014) case
                                self.residuals[gmpe][imtx][res_type].extend(
                                    inter_ev)
                                self.unique_indices[gmpe][imtx].append(
                                    np.arange(len(inter_ev)))
                        else:
                            self.residuals[gmpe][imtx][res_type].extend(
                                context["Residual"][gmpe][imtx][res_type])
                        self.modelled[gmpe][imtx][res_type].extend(
                            context["Expected"][gmpe][imtx][res_type])

                    self.modelled[gmpe][imtx]["Mean"].extend(
                        context["Expected"][gmpe][imtx]["Mean"])

            self.contexts.append(context)

        for gmpe in self.residuals.keys():
            for imtx in self.residuals[gmpe].keys():
                if not self.residuals[gmpe][imtx]:
                    continue
                for res_type in self.residuals[gmpe][imtx].keys():
                    self.residuals[gmpe][imtx][res_type] = np.array(
                        self.residuals[gmpe][imtx][res_type])
                    self.modelled[gmpe][imtx][res_type] = np.array(
                        self.modelled[gmpe][imtx][res_type])
                self.modelled[gmpe][imtx]["Mean"] = np.array(
                    self.modelled[gmpe][imtx]["Mean"])
    def _parse_time_history(self, ifile, component2parse):
        """
        Parses the time history and returns the time history of the specified
        component. All 3 components are provided in every ASA file. Note that
        components are defined with various names, and are not always
        given in the same order
        """

        # The components are definied using the following names
        comp_names = {'X': ['ENE', 'N90E', 'N90E;', 'N90W', 'N90W;',
                            'S90E', 'S90W', 'E--W', 'S9OE'],
                      'Y': ['ENN', 'N00E', 'N00E;', 'NOOE;', 'N00W',
                            'NOOW;', 'S00E', 'S00W', 'N--S', 'NOOE'],
                      'V': ['ENZ', 'V', 'V;+', '+V', 'Z', 'VERT']}

        # Read component names, which are given on line 107
        o = open(ifile, "r", encoding='iso-8859-1')
        r = o.readlines()
        components = list(r[107].split())

        # Check if any component names are repeated
        if any(components.count(x) > 1 for x in components):
            raise ValueError(
                "Some components %s in record %s have the same name"
                % (components, ifile))
        # Check if more than 3 components are given
        if len(components) > 3:
            raise ValueError(
                "More than 3 components %s in record %s"
                % (components, ifile))

        # Get acceleration data from correct column
        column = None
        for i in comp_names[component2parse]:
            if i == components[0]:
                column = 0
                try:
                    accel = np.genfromtxt(ifile, skip_header=109,
                        usecols=column, delimiter='', encoding='iso-8859-1')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns"
                        % ifile)
                break
            elif i == components[1]:
                column = 1
                try:
                    accel = np.genfromtxt(ifile, skip_header=109,
                        usecols=column, delimiter='', encoding='iso-8859-1')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns"
                        % ifile)
                break
            elif i == components[2]:
                column = 2
                try:
                    accel = np.genfromtxt(ifile, skip_header=109,
                        usecols=column, delimiter='', encoding='iso-8859-1')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns"
                        % ifile)
                break
        if column is None:
                raise ValueError(
                    "None of the components %s were found to be \n\
                    the %s component of file %s" %
                    (components, component2parse, ifile))

        # Build the metadata dictionary again
        metadata = _get_metadata_from_file(ifile)

        # Get units
        units_provided = metadata["UNIDADES DE LOS DATOS"]
        units = units_provided[units_provided.find("(") + 1:
                               units_provided.find(")")]

        # Get time step, naming is not consistent so allow for variation
        for i in metadata:
            if 'INTERVALO DE MUESTREO, C1' in i:
                self.time_step = get_float(metadata[i].split("/")[1])

        # Get number of time steps, use len(accel) because
        # sometimes "NUM. TOTAL DE MUESTRAS, C1-C6" is wrong
        self.number_steps = len(accel)

        output = {
            "Acceleration": convert_accel_units(accel, self.units),
            "Time": get_time_vector(self.time_step, self.number_steps),
            "Time-step": self.time_step,
            "Number Steps": self.number_steps,
            "Units": self.units,
            "PGA": max(abs(accel)),
            "PGD": None
        }

        return output
예제 #19
0
    def _parse_time_history(self, ifile, component2parse):
        """
        Parses the time history and returns the time history of the specified
        component. All 3 components are provided in every ASA file. Note that
        components are defined with various names, and are not always
        given in the same order
        """

        # The components are definied using the following names
        comp_names = {
            'X':
            ['N90E', 'N90E;', 'N90W', 'N90W;', 'S90E', 'S90W', 'E--W', 'S9OE'],
            'Y':
            ['N00E', 'N00E;', 'N00W', 'N00W;', 'S00E', 'S00W', 'N--S', 'NOOE'],
            'V': ['V', 'V;+', '+V', 'Z', 'VERT']
        }

        # Read component names, which are given on line 107
        o = open(ifile, "r")
        r = o.readlines()
        components = list(r[107].split())

        # Check if any component names are repeated
        if any(components.count(x) > 1 for x in components):
            raise ValueError(
                "Some components %s in record %s have the same name" %
                (components, ifile))
        # Check if more than 3 components are given
        if len(components) > 3:
            raise ValueError("More than 3 components %s in record %s" %
                             (components, ifile))

        # Get acceleration data from correct column
        column = None
        for i in comp_names[component2parse]:
            if i == components[0]:
                column = 0
                try:
                    accel = np.genfromtxt(ifile,
                                          skip_header=109,
                                          usecols=column,
                                          delimiter='')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns" %
                        ifile)
                break
            elif i == components[1]:
                column = 1
                try:
                    accel = np.genfromtxt(ifile,
                                          skip_header=109,
                                          usecols=column,
                                          delimiter='')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns" %
                        ifile)
                break
            elif i == components[2]:
                column = 2
                try:
                    accel = np.genfromtxt(ifile,
                                          skip_header=109,
                                          usecols=column,
                                          delimiter='')
                except:
                    raise ValueError(
                        "Check %s has 3 equal length time-series columns" %
                        ifile)
                break
        if column is None:
            raise ValueError("None of the components %s were found to be \n\
                    the %s component of file %s" %
                             (components, component2parse, ifile))

        # Build the metadata dictionary again
        metadata = _get_metadata_from_file(ifile)

        # Get units
        units_provided = metadata["UNIDADES DE LOS DATOS"]
        units = units_provided[units_provided.find("(") +
                               1:units_provided.find(")")]

        # Get time step, naming is not consistent so allow for variation
        for i in metadata:
            if 'INTERVALO DE MUESTREO, C1' in i:
                self.time_step = get_float(metadata[i].split("/")[1])

        # Get number of time steps, use len(accel) because
        # sometimes "NUM. TOTAL DE MUESTRAS, C1-C6" is wrong
        self.number_steps = len(accel)

        output = {
            "Acceleration": convert_accel_units(accel, self.units),
            "Time": get_time_vector(self.time_step, self.number_steps),
            "Time-step": self.time_step,
            "Number Steps": self.number_steps,
            "Units": self.units,
            "PGA": max(abs(accel)),
            "PGD": None
        }

        return output