Пример #1
0
class QSeisConfigFull(QSeisConfig):

    time_start = Float.T(default=0.0)
    time_reduction_velocity = Float.T(default=0.0)
    time_window = Float.T(default=900.0)

    source_depth = Float.T(default=10.0)
    source_mech = QSeisSourceMech.T(optional=True,
                                    default=QSeisSourceMechMT.D())

    receiver_depth = Float.T(default=0.0)
    receiver_distances = List.T(Float.T())
    nsamples = Int.T(default=256)

    gf_sw_source_types = Tuple.T(6, Int.T(), default=(1, 1, 1, 1, 0, 0))

    gf_filenames = Tuple.T(6, String.T(), default=qseis_greenf_names)

    seismogram_filename = String.T(default='seis')

    receiver_azimuths = List.T(Float.T())

    earthmodel_1d = gf.meta.Earthmodel1D.T(optional=True)
    earthmodel_receiver_1d = gf.meta.Earthmodel1D.T(optional=True)

    @staticmethod
    def example():
        conf = QSeisConfigFull()
        conf.receiver_distances = [2000.]
        conf.receiver_azimuths = [0.]
        conf.time_start = -10.0
        conf.time_reduction_velocity = 15.0
        conf.earthmodel_1d = cake.load_model().extract(depth_max='cmb')
        conf.earthmodel_receiver_1d = None
        conf.sw_flat_earth_transform = 1
        return conf

    def get_output_filenames(self, rundir):
        return [
            pjoin(rundir, self.seismogram_filename + '.t' + c)
            for c in qseis_components
        ]

    def get_output_filenames_gf(self, rundir):
        return [
            pjoin(rundir, fn + '.t' + c) for fn in self.gf_filenames
            for c in qseis_components
        ]

    def string_for_config(self):
        def aggregate(l):
            return len(l), '\n'.join([''] + [x.string_for_config() for x in l])

        assert len(self.receiver_distances) > 0
        assert len(self.receiver_distances) == len(self.receiver_azimuths)
        assert self.earthmodel_1d is not None

        d = self.__dict__.copy()

        # fixing these switches here to reduce the amount of wrapper code
        d['sw_distance_unit'] = 1  # always give distances in [km]
        d['sw_t_reduce'] = 1  # time reduction always as velocity [km/s]
        d['sw_equidistant'] = 0  # always give all distances and azimuths
        d['sw_irregular_azimuths'] = 1

        d['n_distances'] = len(self.receiver_distances)
        d['str_distances'] = str_float_vals(self.receiver_distances)
        d['str_azimuths'] = str_float_vals(self.receiver_azimuths)

        model_str, nlines = cake_model_to_config(self.earthmodel_1d)
        d['n_model_lines'] = nlines
        d['model_lines'] = model_str

        if self.earthmodel_receiver_1d:
            model_str, nlines = cake_model_to_config(
                self.earthmodel_receiver_1d)
        else:
            model_str = "# no receiver side model"
            nlines = 0

        d['n_model_receiver_lines'] = nlines
        d['model_receiver_lines'] = model_str

        d['str_slowness_window'] = str_float_vals(self.slowness_window)
        d['n_depth_ranges'], d['str_depth_ranges'] = \
            aggregate(self.propagation_filters)

        if self.wavelet_type == 0:  # user wavelet
            d['str_w_samples'] = '\n' \
                + '%i\n' % len(self.user_wavelet_samples) \
                + str_float_vals(self.user_wavelet_samples)
        else:
            d['str_w_samples'] = ''

        if self.receiver_filter:
            d['str_receiver_filter'] = self.receiver_filter.string_for_config(
                self.qseis_version)
        else:
            if self.qseis_version == '2006a':
                d['str_receiver_filter'] = '(1.0,0.0)\n0\n#\n0'
            else:
                d['str_receiver_filter'] = '1.0\n0\n#\n0'

        d['str_gf_sw_source_types'] = str_int_vals(self.gf_sw_source_types)
        d['str_gf_filenames'] = str_str_vals(self.gf_filenames)

        if self.source_mech:
            d['str_source'] = '%s \'%s\'' % (
                self.source_mech.string_for_config(), self.seismogram_filename)
        else:
            d['str_source'] = '0'

        template = '''# autogenerated QSEIS input by qseis.py
#
# This is the input file of FORTRAN77 program "qseis06" for calculation of
# synthetic seismograms based on a layered halfspace earth model.
#
# by
# Rongjiang  Wang <*****@*****.**>
# GeoForschungsZentrum Potsdam
# Telegrafenberg, D-14473 Potsdam, Germany
#
# Last modified: Potsdam, Nov., 2006
#
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# If not specified, SI Unit System is used overall!
#
# Coordinate systems:
# cylindrical (z,r,t) with z = downward,
#                          r = from source outward,
#                          t = azmuth angle from north to east;
# cartesian (x,y,z) with   x = north,
#                          y = east,
#                          z = downward;
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
#
#	SOURCE PARAMETERS
#	=================
# 1. source depth [km]
#------------------------------------------------------------------------------
 %(source_depth)e                    |dble: source_depth;
#------------------------------------------------------------------------------
#
#	RECEIVER PARAMETERS
#	===================
# 1. receiver depth [km]
# 2. switch for distance sampling role (1/0 = equidistant/irregular); switch
#    for unit used (1/0 = km/deg)
# 3. number of distance samples
# 4. if equidistant, then start and end trace distance (> 0); else distance
#    list (please order the receiver distances from small to large)
# 5. (reduced) time begin [sec] & length of time window [sec], number of time
#    samples (<= 2*nfmax in qsglobal.h)
# 6. switch for unit of the following time reduction parameter: 1 = velocity
#    [km/sec], 0 = slowness [sec/deg]; time reduction parameter
#------------------------------------------------------------------------------
 %(receiver_depth)e                         |dble: receiver_depth;
 %(sw_equidistant)i  %(sw_distance_unit)i   |int: sw_equidistant, sw_d_unit;
 %(n_distances)i                            |int: no_distances;
 %(str_distances)s                          |dble: d_1,d_n; or d_1,d_2, ...(no comments in between!);
 %(time_start)e %(time_window)e %(nsamples)i  |dble: t_start,t_window; int: no_t_samples;
 %(sw_t_reduce)i %(time_reduction_velocity)e  |int: sw_t_reduce; dble: t_reduce;
#------------------------------------------------------------------------------
#
#	WAVENUMBER INTEGRATION PARAMETERS
#	=================================
# 1. select slowness integration algorithm (0 = suggested for full wave-field
#    modelling; 1 or 2 = suggested when using a slowness window with narrow
#    taper range - a technique for suppressing space-domain aliasing);
# 2. 4 parameters for low and high slowness (Note 1) cut-offs [s/km] with
#    tapering: 0 < slw1 < slw2 defining cosine taper at the lower end, and 0 <
#    slw3 < slw4 defining the cosine taper at the higher end. default values
#    will be used in case of inconsistent input of the cut-offs (possibly with
#    much more computational effort);
# 3. parameter for sampling rate of the wavenumber integration (1 = sampled
#    with the spatial Nyquist frequency, 2 = sampled with twice higher than
#    the Nyquist, and so on: the larger this parameter, the smaller the space-
#    domain aliasing effect, but also the more computation effort);
# 4. the factor for suppressing time domain aliasing (> 0 and <= 1) (Note 2).
#------------------------------------------------------------------------------
 %(sw_algorithm)i                    |int: sw_algorithm;
 %(str_slowness_window)s             |dble: slw(1-4);
 %(wavenumber_sampling)e             |dble: sample_rate;
 %(aliasing_suppression_factor)e     |dble: supp_factor;
#------------------------------------------------------------------------------
#
#	        OPTIONS FOR PARTIAL SOLUTIONS
#       (only applied to the source-site structure)
#	    ===========================================
#
# 1. switch for filtering free surface effects (0 = with free surface, i.e.,
#    do not select this filter; 1 = without free surface; 2 = without free
#    surface but with correction on amplitude and wave form. Note switch 2
#    can only be used for receivers at the surface)
# 2. switch for filtering waves with a shallow penetration depth (concerning
#    their whole trace from source to receiver), penetration depth limit [km]
#
#    if this option is selected, waves whose travel path never exceeds the
#    given depth limit will be filtered ("seismic nuting"). the condition for
#    selecting this filter is that the given shallow path depth limit should
#    be larger than both source and receiver depth.
#
# 3. number of depth ranges where the following selected up/down-sp2oing P or
#    SV waves should be filtered
# 4. the 1. depth range: upper and lower depth [km], switch for filtering P
#    or SV wave in this depth range:
#
#    switch no:              1      2        3       4         other
#    filtered phase:         P(up)  P(down)  SV(up)  SV(down)  Error
#
# 5. the 2. ...
#
#    The partial solution options are useful tools to increase the numerical
#    significance of desired wave phases. Especially when the desired phases
#    are smaller than the undesired phases, these options should be selected
#    and carefully combined.
#------------------------------------------------------------------------------
 %(filter_surface_effects)i                  |int: isurf;
 %(filter_shallow_paths)i %(filter_shallow_paths_depth)e  |int: sw_path_filter; dble:shallow_depth_limit;
 %(n_depth_ranges)i %(str_depth_ranges)s
#------------------------------------------------------------------------------
#
#	SOURCE TIME FUNCTION (WAVELET) PARAMETERS (Note 3)
#	==================================================
# 1. wavelet duration [unit = time sample rather than sec!], that is about
#    equal to the half-amplitude cut-off period of the wavelet (> 0. if <= 0,
#    then default value = 2 time samples will be used), and switch for the
#    wavelet form (0 = user's own wavelet; 1 = default wavelet: normalized
#    square half-sinusoid for simulating a physical delta impulse; 2 = tapered
#    Heaviside wavelet, i.e. integral of wavelet 1)
# 2. IF user's own wavelet is selected, then number of the wavelet time samples
#    (<= 1024), and followed by
# 3. equidistant wavelet time samples
# 4  ...(continue) (! no comment lines allowed between the time sample list!)
#    IF default, delete line 2, 3, 4 ... or comment them out!
#------------------------------------------------------------------------------
 %(wavelet_duration_samples)e %(wavelet_type)i%(str_w_samples)s
#------------------------------------------------------------------------------
#
#	 FILTER PARAMETERS OF RECEIVERS (SEISMOMETERS OR HYDROPHONES)
#	 ============================================================
# 1. constant coefficient (normalization factor)
# 2. number of roots (<= nrootmax in qsglobal.h)
# 3. list of the root positions in the complex format (Re,Im). If no roots,
#    comment out this line
# 4. number of poles (<= npolemax in qsglobal.h)
# 5. list of the pole positions in the complex format (Re,Im). If no poles,
#    comment out this line
#------------------------------------------------------------------------------
 %(str_receiver_filter)s
#------------------------------------------------------------------------------
#
#	OUTPUT FILES FOR GREEN'S FUNCTIONS (Note 4)
#	===========================================
# 1. selections of source types (yes/no = 1/0)
# 2. file names of Green's functions (please give the names without extensions,
#    which will be appended by the program automatically: *.tz, *.tr, *.tt
#    and *.tv are for the vertical, radial, tangential, and volume change (for
#    hydrophones) components, respectively)
#------------------------------------------------------------------------------
#  explosion   strike-slip dip-slip   clvd       single_f_v  single_f_h
#------------------------------------------------------------------------------
 %(str_gf_sw_source_types)s
 %(str_gf_filenames)s
#------------------------------------------------------------------------------
#	OUTPUT FILES FOR AN ARBITRARY POINT DISLOCATION SOURCE
#               (for applications to earthquakes)
#	======================================================
# 1. selection (0 = not selected; 1 or 2 = selected), if (selection = 1), then
#    the 6 moment tensor elements [N*m]: Mxx, Myy, Mzz, Mxy, Myz, Mzx (x is
#    northward, y is eastward and z is downard); else if (selection = 2), then
#    Mis [N*m] = isotropic moment part = (MT+MN+MP)/3, Mcl = CLVD moment part
#    = (2/3)(MT+MP-2*MN), Mdc = double-couple moment part = MT-MN, Strike [deg],
#    Dip [deg] and Rake [deg].
#
#    Note: to use this option, the Green's functions above should be computed
#          (selection = 1) if they do not exist already.
#
#                 north(x)
#                  /
#                 /\ strike
#                *----------------------->  east(y)
#                |\                       \
#                |-\                       \
#                |  \     fault plane       \
#                |90 \                       \
#                |-dip\                       \
#                |     \                       \
#                |      \                       \
#           downward(z)  \-----------------------\\
#
# 2. switch for azimuth distribution of the stations (0 = uniform azimuth,
#    else = irregular azimuth angles)
# 3. list of the azimuth angles [deg] for all stations given above (if the
#    uniform azimuth is selected, then only one azimuth angle is required)
#
#------------------------------------------------------------------------------
#     Mis        Mcl        Mdc        Strike     Dip        Rake      File
#------------------------------------------------------------------------------
#  2   0.00       1.00       6.0E+19    120.0      30.0       25.0      'seis'
#------------------------------------------------------------------------------
#     Mxx        Myy        Mzz        Mxy        Myz        Mzx       File
#------------------------------------------------------------------------------
%(str_source)s
%(sw_irregular_azimuths)i
%(str_azimuths)s
#------------------------------------------------------------------------------
#
#	GLOBAL MODEL PARAMETERS (Note 5)
#	================================
# 1. switch for flat-earth-transform
# 2. gradient resolution [%%] of vp, vs, and ro (density), if <= 0, then default
#    values (depending on wave length at cut-off frequency) will be used
#------------------------------------------------------------------------------
 %(sw_flat_earth_transform)i     |int: sw_flat_earth_transform;
 %(gradient_resolution_vp)e %(gradient_resolution_vs)e %(gradient_resolution_density)e   |dble: vp_res, vs_res, ro_res;
#------------------------------------------------------------------------------
#
#	                LAYERED EARTH MODEL
#       (SHALLOW SOURCE + UNIFORM DEEP SOURCE/RECEIVER STRUCTURE)
#	=========================================================
# 1. number of data lines of the layered model (source site)
#------------------------------------------------------------------------------
 %(n_model_lines)i                   |int: no_model_lines;
#------------------------------------------------------------------------------
#
#	MULTILAYERED MODEL PARAMETERS (source site)
#	===========================================
# no  depth[km]  vp[km/s]  vs[km/s]  ro[g/cm^3] qp      qs
#------------------------------------------------------------------------------
%(model_lines)s
#------------------------------------------------------------------------------
#
#	          LAYERED EARTH MODEL
#       (ONLY THE SHALLOW RECEIVER STRUCTURE)
#       =====================================
# 1. number of data lines of the layered model
#
#    Note: if the number = 0, then the receiver site is the same as the
#          source site, else different receiver-site structure is considered.
#          please be sure that the lowest interface of the receiver-site
#          structure given given below can be found within the source-site
#          structure, too.
#
#------------------------------------------------------------------------------
 %(n_model_receiver_lines)i                               |int: no_model_lines;
#------------------------------------------------------------------------------
#
#	MULTILAYERED MODEL PARAMETERS (shallow receiver-site structure)
#	===============================================================
# no  depth[km]    vp[km/s]    vs[km/s]   ro[g/cm^3]   qp      qs
#------------------------------------------------------------------------------
%(model_receiver_lines)s
#---------------------------------end of all inputs----------------------------


Note 1:

The slowness is defined by inverse value of apparent wave velocity = sin(i)/v
with i = incident angle and v = true wave velocity.

Note 2:

The suppression of the time domain aliasing is achieved by using the complex
frequency technique. The suppression factor should be a value between 0 and 1.
If this factor is set to 0.1, for example, the aliasing phase at the reduced
time begin is suppressed to 10%%.

Note 3:

The default basic wavelet function (option 1) is (2/tau)*sin^2(pi*t/tau),
for 0 < t < tau, simulating physical delta impuls. Its half-amplitude cut-off
frequency is 1/tau. To avoid high-frequency noise, tau should not be smaller
than 4-5 time samples.

Note 4:

  Double-Couple   m11/ m22/ m33/ m12/ m23/ m31  Azimuth_Factor_(tz,tr,tv)/(tt)
  ============================================================================
  explosion       1.0/ 1.0/ 1.0/ -- / -- / --       1.0         /   0.0
  strike-slip     -- / -- / -- / 1.0/ -- / --       sin(2*azi)  /   cos(2*azi)
                  1.0/-1.0/ -- / -- / -- / --       cos(2*azi)  /  -sin(2*azi)
  dip-slip        -- / -- / -- / -- / -- / 1.0      cos(azi)    /   sin(azi)
                  -- / -- / -- / -- / 1.0/ --       sin(azi)    /  -cos(azi)
  clvd           -0.5/-0.5/ 1.0/ -- / -- / --       1.0         /   0.0
  ============================================================================
  Single-Force    fx / fy / fz                  Azimuth_Factor_(tz,tr,tv)/(tt)
  ============================================================================
  fz              -- / -- / 1.0                        1.0      /   0.0
  fx              1.0/ -- / --                         cos(azi) /   sin(azi)
  fy              -- / 1.0/ --                         sin(azi) /  -cos(azi)
  ============================================================================

Note 5:

Layers with a constant gradient will be discretized with a number of homogeneous
sublayers. The gradient resolutions are then used to determine the maximum
allowed thickness of the sublayers. If the resolutions of Vp, Vs and Rho
(density) require different thicknesses, the smallest is first chosen. If this
is even smaller than 1%% of the characteristic wavelength, then the latter is
taken finally for the sublayer thickness.
'''  # noqa

        return (template % d).encode('ascii')
Пример #2
0
class Parameter(Object):
    name__ = String.T()
    unit = Unicode.T(optional=True)
    scale_factor = Float.T(default=1., optional=True)
    scale_unit = Unicode.T(optional=True)
    label = Unicode.T(optional=True)
    optional = Bool.T(default=True, optional=True)

    def __init__(self, *args, **kwargs):
        if len(args) >= 1:
            kwargs['name'] = args[0]
        if len(args) >= 2:
            kwargs['unit'] = newstr(args[1])

        self.groups = [None]
        self._name = None

        Object.__init__(self, **kwargs)

    def get_label(self, with_unit=True):
        lbl = [self.label or self.name]
        if with_unit:
            unit = self.get_unit_label()
            if unit:
                lbl.append('[%s]' % unit)

        return ' '.join(lbl)

    def set_groups(self, groups):
        if not isinstance(groups, list):
            raise AttributeError('Groups must be a list of strings.')
        self.groups = groups

    def _get_name(self):
        if None not in self.groups:
            return '%s.%s' % ('.'.join(self.groups), self._name)
        return self._name

    def _set_name(self, value):
        self._name = value

    name = property(_get_name, _set_name)

    @property
    def name_nogroups(self):
        return self._name

    def get_value_label(self, value, format='%(value)g%(unit)s'):
        value = self.scaled(value)
        unit = self.get_unit_suffix()
        return format % dict(value=value, unit=unit)

    def get_unit_label(self):
        if self.scale_unit is not None:
            return self.scale_unit
        elif self.unit:
            return self.unit
        else:
            return None

    def get_unit_suffix(self):
        unit = self.get_unit_label()
        if not unit:
            return ''
        else:
            return ' %s' % unit

    def scaled(self, x):
        if isinstance(x, tuple):
            return tuple(v / self.scale_factor for v in x)
        if isinstance(x, list):
            return list(v / self.scale_factor for v in x)
        else:
            return x / self.scale_factor

    def inv_scaled(self, x):
        if isinstance(x, tuple):
            return tuple(v * self.scale_factor for v in x)
        if isinstance(x, list):
            return list(v * self.scale_factor for v in x)
        else:
            return x * self.scale_factor
Пример #3
0
class GNSSCampaignMisfitTarget(gf.GNSSCampaignTarget, MisfitTarget):
    """Handles and carries out operations related to the objective functions.

    The objective function is here the weighted misfit between observed
    and predicted surface displacements.
    """
    campaign_name = String.T()
    misfit_config = GNSSCampaignMisfitConfig.T()

    can_bootstrap_weights = True
    can_bootstrap_residuals = True

    plot_misfits_cumulative = False

    def __init__(self, **kwargs):
        gf.GNSSCampaignTarget.__init__(self, **kwargs)
        MisfitTarget.__init__(self, **kwargs)
        self._obs_data = None
        self._sigma = None
        self._weights = None
        self._station_component_mask = None

    @property
    def id(self):
        return self.campaign_name

    def string_id(self):
        return self.campaign_name

    def misfits_string_ids(self):
        return [
            '%s.%s' % (self.path, station.code)
            for station in self.campaign.stations
        ]

    @property
    def station_names(self):
        return ['%s' % (station.code) for station in self.campaign.stations]

    @property
    def nmisfits(self):
        return self.lats.size

    @property
    def nstations(self):
        return self.nmisfits

    def set_dataset(self, ds):
        MisfitTarget.set_dataset(self, ds)

    @property
    def campaign(self):
        return self._ds.get_gnss_campaign(self.campaign_name)

    @property
    def obs_data(self):
        if self._obs_data is None:
            self._obs_data = num.concatenate(
                [s.get_displacement_data() for s in self.campaign.stations])
        return self._obs_data

    @property
    def obs_sigma(self):
        if self._sigma is None:
            self._sigma = num.array([
                [s.north.sigma for s in self.campaign.stations],
                [s.east.sigma for s in self.campaign.stations],
                [s.up.sigma for s in self.campaign.stations]])\
              .ravel(order='F')
        return self._sigma

    @property
    def weights(self):
        """Weights are the inverse of the data error variance-covariance.

        The single component variances, and if provided the component
        covariances, are used to build a data variance matrix or
        variance-covariance matrix. Correlations between stations are
        not implemented.
        """
        if self._weights is None:
            covar = self.campaign.get_covariance_matrix()

            if not num.any(covar.diagonal()):
                logger.warning('GNSS Stations have an empty covariance matrix.'
                               ' Weights will be all equal.')
                num.fill_diagonal(covar, 1.)
            self._weights = num.asmatrix(covar).I
        return self._weights

    @property
    def station_component_mask(self):
        if self._station_component_mask is None:
            self._station_component_mask = self.campaign.get_component_mask()
        return self._station_component_mask

    @property
    def station_weights(self):
        weights = num.diag(self.weights)

        return num.mean([weights[0::3], weights[1::3], weights[2::3]], axis=0)

    def post_process(self, engine, source, statics):
        """Applies the objective function.

        As a result the weighted misfits are given and the observed and
        synthetic data.
        """
        obs = self.obs_data
        weights = self.weights
        nstations = self.campaign.nstations
        misfit_value = num.zeros(num.shape(self.station_component_mask))
        misfit_norm = num.zeros(num.shape(self.station_component_mask))

        # All data is ordered in vectors as
        # S1_n, S1_e, S1_u, ..., Sn_n, Sn_e, Sn_u. Hence (.ravel(order='F'))
        syn = num.array([
              statics['displacement.n'],
              statics['displacement.e'],
              -statics['displacement.d']])\
            .ravel(order='F')

        syn = syn[self.station_component_mask]

        res = num.abs(obs - syn)

        misfit_value0 = res * weights
        misfit_norm0 = obs * weights

        i_truecomponents = num.where(self.station_component_mask)
        misfit_value[i_truecomponents] = misfit_value0
        misfit_norm[i_truecomponents] = misfit_norm0
        misfit_value = num.sum(misfit_value.reshape((nstations, 3)), axis=1)
        misfit_norm = num.sum(misfit_norm.reshape((nstations, 3)), axis=1)
        misfit_value = misfit_value.reshape(nstations, 1)
        misfit_norm = misfit_norm.reshape(nstations, 1)

        mf = num.hstack((misfit_value, misfit_norm))
        result = GNSSCampaignMisfitResult(misfits=mf)

        if self._result_mode == 'full':
            result.statics_syn = statics
            result.statics_obs = obs

        return result

    def get_combined_weight(self):
        """A given manual weight in the configuration is applied."""
        if self._combined_weight is None:
            self._combined_weight = num.full(self.nmisfits, self.manual_weight)

        return self._combined_weight

    def prepare_modelling(self, engine, source, targets):
        return [self]

    def init_bootstrap_residuals(self, nbootstraps, rstate=None):
        logger.info('GNSS campaign %s, bootstrapping residuals'
                    ' from measurement uncertainties ...' % self.campaign.name)
        if rstate is None:
            rstate = num.random.RandomState()
        campaign = self.campaign
        bootstraps = num.empty((nbootstraps, campaign.nstations))
        sigmas = num.array([])
        for s in campaign.stations:
            if s.north:
                sigmas = num.hstack((sigmas, s.north.sigma))
            if s.east:
                sigmas = num.hstack((sigmas, s.east.sigma))
            if s.up:
                sigmas = num.hstack((sigmas, s.up.sigma))

        #sigmas = num.array([(s.north.sigma, s.east.sigma, s.up.sigma)
        #                    for s in campaign.stations])
        sigmas = num.abs(sigmas)

        if not num.all(sigmas):
            logger.warning('Bootstrapping GNSS stations is meaningless,'
                           ' all station\'s sigma are 0.0!')

        for ibs in range(nbootstraps):
            syn_noise = num.zeros(num.shape(self.station_component_mask))
            i_truecomponents = num.where(self.station_component_mask)
            syn_noise0 = rstate.normal(scale=sigmas.ravel())
            syn_noise[i_truecomponents] = syn_noise0
            syn_noise = syn_noise.reshape(campaign.nstations, 3) \
                .sum(axis=1)

            bootstraps[ibs, :] = syn_noise

        self.set_bootstrap_residuals(bootstraps)

    def finalize_modelling(self, engine, source, modelling_targets,
                           modelling_results):

        return modelling_results[0]

    @classmethod
    def get_plot_classes(cls):
        from . import plot
        plots = super(GNSSCampaignMisfitTarget, cls).get_plot_classes()
        plots.extend(plot.get_plot_classes())
        return plots
Пример #4
0
class Response(Object):
    resource_id = String.T(optional=True, xmlstyle='attribute')
    instrument_sensitivity = Sensitivity.T(optional=True,
                                           xmltagname='InstrumentSensitivity')
    instrument_polynomial = Polynomial.T(optional=True,
                                         xmltagname='InstrumentPolynomial')
    stage_list = List.T(ResponseStage.T(xmltagname='Stage'))

    def get_pyrocko_response(self, nslc, fake_input_units=None):
        responses = []
        for stage in self.stage_list:
            responses.extend(stage.get_pyrocko_response(nslc))

        if not self.stage_list and self.instrument_sensitivity:
            responses.append(
                trace.PoleZeroResponse(
                    constant=self.instrument_sensitivity.value))

        if fake_input_units is not None:
            if not self.instrument_sensitivity or \
                    self.instrument_sensitivity.input_units is None:

                raise NoResponseInformation('no input units given')

            input_units = self.instrument_sensitivity.input_units.name

            try:
                conresp = conversion[
                    fake_input_units.upper(), input_units.upper()]

            except KeyError:
                raise NoResponseInformation(
                    'cannot convert between units: %s, %s'
                    % (fake_input_units, input_units))

            if conresp is not None:
                responses.append(conresp)

        return trace.MultiplyResponse(responses)

    @classmethod
    def from_pyrocko_pz_response(cls, presponse, input_unit, output_unit,
                                 normalization_frequency=1.0):

        norm_factor = 1.0/float(abs(
            presponse.evaluate(num.array([normalization_frequency]))[0]
            / presponse.constant))

        pzs = PolesZeros(
            pz_transfer_function_type='LAPLACE (RADIANS/SECOND)',
            normalization_factor=norm_factor,
            normalization_frequency=Frequency(normalization_frequency),
            zero_list=[PoleZero(real=FloatNoUnit(z.real),
                                imaginary=FloatNoUnit(z.imag))
                       for z in presponse.zeros],
            pole_list=[PoleZero(real=FloatNoUnit(z.real),
                                imaginary=FloatNoUnit(z.imag))
                       for z in presponse.poles])

        pzs.validate()

        stage = ResponseStage(
            number=1,
            poles_zeros_list=[pzs],
            stage_gain=Gain(float(abs(presponse.constant))/norm_factor))

        resp = Response(
            instrument_sensitivity=Sensitivity(
                value=stage.stage_gain.value,
                input_units=Units(input_unit),
                output_units=Units(output_unit)),

            stage_list=[stage])

        return resp
Пример #5
0
class Event(Object):
    '''Seismic event representation

    :param lat: latitude of hypocenter (default 0.0)
    :param lon: longitude of hypocenter (default 0.0)
    :param time: origin time as float in seconds after '1970-01-01 00:00:00
    :param name: event identifier as string (optional)
    :param depth: source depth (optional)
    :param magnitude: magnitude of event (optional)
    :param region: source region (optional)
    :param catalog: name of catalog that lists this event (optional)
    :param moment_tensor: moment tensor as
        :py:class:`moment_tensor.MomentTensor` instance (optional)
    :param duration: source duration as float (optional)
    '''

    lat = Float.T(default=0.0)
    lon = Float.T(default=0.0)
    time = Timestamp.T(default=util.str_to_time('1970-01-01 00:00:00'))
    name = String.T(default='', optional=True)
    depth = Float.T(optional=True)
    magnitude = Float.T(optional=True)
    magnitude_type = String.T(optional=True)
    region = Unicode.T(optional=True)
    catalog = String.T(optional=True)
    moment_tensor = moment_tensor.MomentTensor.T(optional=True)
    duration = Float.T(optional=True)

    def __init__(self,
                 lat=0.,
                 lon=0.,
                 time=0.,
                 name='',
                 depth=None,
                 magnitude=None,
                 magnitude_type=None,
                 region=None,
                 load=None,
                 loadf=None,
                 catalog=None,
                 moment_tensor=None,
                 duration=None):

        vals = None
        if load is not None:
            vals = Event.oldload(load)
        elif loadf is not None:
            vals = Event.oldloadf(loadf)

        if vals:
            lat, lon, time, name, depth, magnitude, magnitude_type, region, \
                catalog, moment_tensor, duration = vals

        Object.__init__(self,
                        lat=lat,
                        lon=lon,
                        time=time,
                        name=name,
                        depth=depth,
                        magnitude=magnitude,
                        magnitude_type=magnitude_type,
                        region=region,
                        catalog=catalog,
                        moment_tensor=moment_tensor,
                        duration=duration)

    def time_as_string(self):
        return util.time_to_str(self.time)

    def set_name(self, name):
        self.name = name

    def olddump(self, filename):
        file = open(filename, 'w')
        self.olddumpf(file)
        file.close()

    def olddumpf(self, file):
        file.write('name = %s\n' % self.name)
        file.write('time = %s\n' % util.time_to_str(self.time))
        if self.lat is not None:
            file.write('latitude = %.12g\n' % self.lat)
        if self.lon is not None:
            file.write('longitude = %.12g\n' % self.lon)
        if self.magnitude is not None:
            file.write('magnitude = %g\n' % self.magnitude)
            file.write('moment = %g\n' %
                       moment_tensor.magnitude_to_moment(self.magnitude))
        if self.magnitude_type is not None:
            file.write('magnitude_type = %s\n' % self.magnitude_type)
        if self.depth is not None:
            file.write('depth = %.10g\n' % self.depth)
        if self.region is not None:
            file.write('region = %s\n' % self.region)
        if self.catalog is not None:
            file.write('catalog = %s\n' % self.catalog)
        if self.moment_tensor is not None:
            m = self.moment_tensor.m()
            sdr1, sdr2 = self.moment_tensor.both_strike_dip_rake()
            file.write(
                ('mnn = %g\nmee = %g\nmdd = %g\nmne = %g\nmnd = %g\nmed = %g\n'
                 'strike1 = %g\ndip1 = %g\nrake1 = %g\n'
                 'strike2 = %g\ndip2 = %g\nrake2 = %g\n') %
                ((m[0, 0], m[1, 1], m[2, 2], m[0, 1], m[0, 2], m[1, 2]) +
                 sdr1 + sdr2))

        if self.duration is not None:
            file.write('duration = %g\n' % self.duration)

    @staticmethod
    def unique(events,
               deltat=10.,
               group_cmp=(lambda a, b: cmp(a.catalog, b.catalog))):
        groups = Event.grouped(events, deltat)

        events = []
        for group in groups:
            if group:
                group.sort(group_cmp)
                events.append(group[-1])

        return events

    @staticmethod
    def grouped(events, deltat=10.):
        events = list(events)
        groups = []
        for ia, a in enumerate(events):
            groups.append([])
            haveit = False
            for ib, b in enumerate(events[:ia]):
                if abs(b.time - a.time) < deltat:
                    groups[ib].append(a)
                    haveit = True
                    break

            if not haveit:
                groups[ia].append(a)

        groups = [g for g in groups if g]
        groups.sort(key=lambda g: sum(e.time for e in g) // len(g))
        return groups

    @staticmethod
    def dump_catalog(events, filename=None, stream=None):
        if filename is not None:
            file = open(filename, 'w')
        else:
            file = stream
        try:
            i = 0
            for ev in events:

                ev.olddumpf(file)

                file.write('--------------------------------------------\n')
                i += 1

        finally:
            if filename is not None:
                file.close()

    @staticmethod
    def oldload(filename):
        with open(filename, 'r') as file:
            return Event.oldloadf(file)

    @staticmethod
    def oldloadf(file):
        d = {}
        try:
            for line in file:
                if line.lstrip().startswith('#'):
                    continue

                toks = line.split(' = ', 1)
                if len(toks) == 2:
                    k, v = toks[0].strip(), toks[1].strip()
                    if k in ('name', 'region', 'catalog', 'magnitude_type'):
                        d[k] = v
                    if k in (('latitude longitude magnitude depth duration '
                              'mnn mee mdd mne mnd med strike1 dip1 rake1 '
                              'strike2 dip2 rake2 duration').split()):
                        d[k] = float(v)
                    if k == 'time':
                        d[k] = util.str_to_time(v)

                if line.startswith('---'):
                    d['have_separator'] = True
                    break

        except Exception as e:
            raise FileParseError(e)

        if not d:
            raise EOF()

        if 'have_separator' in d and len(d) == 1:
            raise EmptyEvent()

        mt = None
        m6 = [d[x] for x in 'mnn mee mdd mne mnd med'.split() if x in d]
        if len(m6) == 6:
            mt = moment_tensor.MomentTensor(m=moment_tensor.symmat6(*m6))
        else:
            sdr = [d[x] for x in 'strike1 dip1 rake1'.split() if x in d]
            if len(sdr) == 3:
                moment = 1.0
                if 'moment' in d:
                    moment = d['moment']
                elif 'magnitude' in d:
                    moment = moment_tensor.magnitude_to_moment(d['magnitude'])

                mt = moment_tensor.MomentTensor(strike=sdr[0],
                                                dip=sdr[1],
                                                rake=sdr[2],
                                                scalar_moment=moment)

        return (d.get('latitude', 0.0), d.get('longitude', 0.0),
                d.get('time', 0.0), d.get('name', ''), d.get('depth', None),
                d.get('magnitude', None), d.get('magnitude_type',
                                                None), d.get('region', None),
                d.get('catalog', None), mt, d.get('duration', None))

    @staticmethod
    def load_catalog(filename):

        file = open(filename, 'r')

        try:
            while True:
                try:
                    ev = Event(loadf=file)
                    yield ev
                except EmptyEvent:
                    pass

        except EOF:
            pass

        file.close()

    def get_hash(self):
        e = self
        if isinstance(e.time, util.hpfloat):
            stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.6FRAC')
        else:
            stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.3FRAC')

        s = float_or_none_to_str

        return ehash(', '.join(
            (stime, s(e.lat), s(e.lon), s(e.depth), s(e.magnitude),
             str(e.catalog), str(e.name), str(e.region))))

    def human_str(self):
        s = [
            'Latitude [deg]: %g' % self.lat,
            'Longitude [deg]: %g' % self.lon,
            'Time [UTC]: %s' % util.time_to_str(self.time)
        ]

        if self.name:
            s.append('Name: %s' % self.name)

        if self.depth is not None:
            s.append('Depth [km]: %g' % (self.depth / 1000.))

        if self.magnitude is not None:
            s.append('Magnitude [%s]: %3.1f' %
                     (self.magnitude_type or 'M?', self.magnitude))

        if self.region:
            s.append('Region: %s' % self.region)

        if self.catalog:
            s.append('Catalog: %s' % self.catalog)

        if self.moment_tensor:
            s.append(str(self.moment_tensor))

        return '\n'.join(s)
Пример #6
0
class Frequency(FloatWithUnit):
    unit = String.T(default='HERTZ', optional=True, xmlstyle='attribute')
Пример #7
0
class Longitude(FloatWithUnit):
    '''Type for longitude coordinate.'''

    unit = String.T(default='DEGREES', optional=True, xmlstyle='attribute')
    # fixed unit
    datum = String.T(default='WGS84', optional=True, xmlstyle='attribute')
Пример #8
0
class SeismosizerData(DataGenerator):
    fn_sources = String.T(
            help='filename containing pyrocko.gf.seismosizer.Source instances')
    fn_targets = String.T(
            help='filename containing pyrocko.gf.seismosizer.Target instances',
            optional=True)
    fn_stations = String.T(
            help='filename containing pyrocko.model.Station instances. Will be\
                converted to Target instances',
            optional=True)

    store_id = String.T(optional=True)
    center_sources = Bool.T(
            default=False,
            help='Transform the center of sources to the center of stations')

    engine = LocalEngine.T()
    onset_phase = String.T(default='first(p|P)')

    def setup(self):
        self.sources = guts.load(filename=self.fn_sources)
        self.targets = []

        if self.fn_targets:
            self.targets.extend(guts.load(filename=self.fn_targets))

        if self.fn_stations:
            stats = load_stations(self.fn_stations)
            self.targets.extend(self.cast_stations_to_targets(stats))

        if self.store_id:
            for t in self.targets:
                t.store_id = self.store_id

        if self.center_sources:
            self.move_sources_to_station_center()

        self.config.channels = [t.codes for t in self.targets]
        store_ids = [t.store_id for t in self.targets]
        store_id = set(store_ids)
        assert len(store_id) == 1, 'More than one store used. Not \
                implemented yet'

        self.store = self.engine.get_store(store_id.pop())

        self.sources = filter_oob(self.sources, self.targets, self.store.config)

        dt = self.config.deltat_want or self.store.config.deltat
        self.n_samples = int((self.config.sample_length + self.config.tpad) / dt)

    def move_sources_to_station_center(self):
        '''Transform the center of sources to the center of stations.'''
        lat, lon = orthodrome.geographic_midpoint_locations(self.targets)
        for s in self.sources:
            s.lat = lat
            s.lon = lon

    def cast_stations_to_targets(self, stations):
        targets = []
        channels = 'ENZ'
        for s in stations:
            targets.extend(
                [Target(codes=(s.network, s.station, s.location, c),
                    lat=s.lat, lon=s.lon, elevation=s.elevation,) for c in
                    channels])

        return targets

    def extract_labels(self, source):
        if not self.labeled:
            return UNLABELED
        return (source.north_shift, source.east_shift, source.depth)

    def iter_examples_and_labels(self):
        ensure_list(self.sources)
        ensure_list(self.targets)

        response = self.engine.process(
            sources=self.sources,
            targets=self.targets)

        for isource, source in enumerate(response.request.sources):
            traces = [x.trace.pyrocko_trace() for x in \
                    response.results_list[isource]]

            for tr in traces:
                self.preprocess(tr)
            arrivals = [self.store.t(self.onset_phase,
                (source.depth, source.distance_to(t))) for t in self.targets]
            tref = min([a for a in arrivals if a is not None])
            chunk = self.get_raw_data_chunk(self.tensor_shape)

            self.fit_data_into_chunk(traces, chunk=chunk, tref=tref+source.time)

            label = self.extract_labels(source)

            yield chunk, label
Пример #9
0
class TestResult(Object):
    package = String.T()
    branch = String.T(optional=True)
    box = String.T()
    py_version = String.T(optional=True)
    prerequisite_versions = Dict.T(String.T(),
                                   String.T(),
                                   optional=True,
                                   default={})
    log = String.T(optional=True, yamlstyle='|')
    result = String.T(optional=True)
    errors = List.T(String.T(), optional=True, default=[], yamlstyle='block')
    fails = List.T(String.T(), optional=True, default=[], yamlstyle='block')
    skips = List.T(String.T(), optional=True, default=[], yamlstyle='block')
Пример #10
0
class DataGeneratorBase(Object):
    '''This is the base class for all generators.

    This class to dump and load data to/from all subclasses into
    TFRecordDatasets.
    '''
    fn_tfrecord = String.T(optional=True)
    noise = Noise.T(optional=True, help='Add noise to feature')

    station_dropout_rate = Float.T(default=0.,
        help='Rate by which to mask all channels of station')

    station_dropout_distribution = Bool.T(default=True,
        help='If *true*, station dropout will be drawn from a uniform '
        'distribution limited by this station_dropout.')

    nmax = Int.T(optional=True)
    labeled = Bool.T(default=True)
    blacklist = List.T(optional=True, help='List of indices to ignore.')

    random_seed = Int.T(default=0)

    def __init__(self, *args, **kwargs):
        self.config = kwargs.pop('config', None)
        super().__init__(**kwargs)
        self.blacklist = set() if not self.blacklist else set(self.blacklist)
        self.n_classes = self.config.n_classes
        self.evolution = 0

    def normalize_label(self, label):
        if self.labeled:
            return self.config.normalize_label(label)
        return label

    def set_config(self, pinky_config):
        self.config = pinky_config
        self.setup()

    def setup(self):
        ...

    def reset(self):
        self.evolution = 0

    @property
    def tensor_shape(self):
        return self.config.tensor_shape

    @property
    def n_samples(self):
        return self.config._n_samples

    @n_samples.setter
    def n_samples(self, v):
        self.config._n_samples = v

    @property
    @lru_cache(maxsize=1)
    def nsl_to_indices(self):
        ''' Returns a dictionary which maps nsl codes to indexing arrays.'''
        indices = OrderedDict()
        for nslc, index in self.nslc_to_index.items():
            key = nslc[:3]
            _v = indices.get(key, [])
            _v.append(index)
            indices[key] = _v

        for k in indices.keys():
            indices[k] = num.array(indices[k])

        return indices

    @property
    @lru_cache(maxsize=1)
    def nsl_indices(self):
        ''' Returns a 2D array of indices of channels belonging to one station.'''
        return [v for v in self.nsl_to_indices.values()]

    @property
    def nslc_to_index(self):
        ''' Returns a dictionary which maps nslc codes to trace indices.'''
        d = OrderedDict()
        idx = 0
        for nslc in self.config.channels:
            if not util.match_nslc(self.config.blacklist, nslc):
                d[nslc] = idx
                idx += 1
        return d

    def reject_blacklisted(self, tr):
        '''returns `False` if nslc codes of `tr` match any of the blacklisting
        patters. Otherwise returns `True`'''
        return not util.match_nslc(self.config.blacklist, tr.nslc_id)

    def filter_iter(self, iterator):
        '''Apply *blacklist*ing by example indices

        :param iterator: producing iterator
        '''
        for i, item in enumerate(iterator):
            if i not in self.blacklist:
                yield i, item

    @property
    def generate_output_types(self):
        '''Return data types of features and labels'''
        return tf.float32, tf.float32

    def unpack_examples(self, record_iterator):
        '''Parse examples stored in TFRecordData to `tf.train.Example`'''
        for string_record in record_iterator:
            example = tf.train.Example()
            example.ParseFromString(string_record)
            chunk = example.features.feature['data'].bytes_list.value[0]
            label = example.features.feature['label'].bytes_list.value[0]

            chunk = num.fromstring(chunk, dtype=num.float32)
            chunk = chunk.reshape((self.config.n_channels, -1))

            label = num.fromstring(label, dtype=num.float32)
            yield chunk, label

    @property
    def tstart_data(self):
        return None

    def iter_chunked(self, tinc):
        # if data has been written to tf records:
        return self.iter_examples_and_labels()

    def iter_examples_and_labels(self):
        '''Subclass this method!

        Yields: feature, label

        Chunks that are all NAN will be skipped.
        '''
        record_iterator = tf.python_io.tf_record_iterator(
            path=self.fn_tfrecord)

        for chunk, label in self.unpack_examples(record_iterator):
            if all_NAN(chunk):
                logger.debug('all NAN. skipping...')
                continue

            yield chunk, label

    def generate_chunked(self, tinc=1):
        '''Takes the output of `iter_examples_and_labels` and applies post
        processing (see: `process_chunk`).
        '''
        for i, (chunk, label) in self.filter_iter(self.iter_chunked(tinc)):
            yield self.process_chunk(chunk), self.normalize_label(label)

    def generate(self, return_gaps=False):
        '''Takes the output of `iter_examples_and_labels` and applies post
        processing (see: `process_chunk`).
        '''
        self.evolution += 1
        num.random.seed(self.random_seed + self.evolution)
        for i, (chunk, label) in self.filter_iter(
                self.iter_examples_and_labels()):
            yield self.process_chunk(chunk, return_gaps=return_gaps), self.normalize_label(label)

    def extract_labels(self):
        '''Overwrite this method!'''
        if not self.labeled:
            return UNLABELED

    def iter_labels(self):
        '''Iterate through labels.'''
        for i, (_, label) in self.filter_iter(
                self.iter_examples_and_labels()):
            yield label

    @property
    def text_labels(self):
        '''Returns a list of strings to identify the labels.

        Overwrite this method for more meaningfull identifiers.'''
        return ['%i' % (i) for i, d in
                self.filter_iter(self.iter_examples_and_labels())]

    def gaps(self):
        '''Returns a list containing the gaps of each example'''
        gaps = []
        for (_, gap), _ in self.generate(return_gaps=True):
            gaps.append(gap)

        return gaps

    def snrs(self, split_factor):
        snrs = []
        for chunk, _ in self.generate():
            snrs.append(snr(chunk, split_factor))
        return snrs

    @property
    def output_shapes(self):
        return (self.config.output_shapes)

    def get_dataset(self):
        return tf.data.Dataset.from_generator(
            self.generate,
            self.generate_output_types,
            output_shapes=self.output_shapes)

    def get_chunked_dataset(self, tinc=1.):
        gen = partial(self.generate_chunked, tinc=tinc)
        return tf.data.Dataset.from_generator(
            gen,
            self.generate_output_types,
            output_shapes=self.output_shapes)

    def get_raw_data_chunk(self, shape):
        '''Return an array of size (Nchannels x Nsamples_max) filled with
        NANs.'''
        empty_array = num.empty(shape, dtype=num.float32)
        empty_array.fill(num.nan)
        return empty_array

    def pack_examples(self):
        '''Serialize Examples to strings.'''
        for ydata, label in self.iter_examples_and_labels():
            yield tf.train.Example(
                features=tf.train.Features(
                    feature={
                        'data': _BytesFeature(ydata.tobytes()),
                        'label': _BytesFeature(num.array(
                            label, dtype=num.float32).tobytes()),
                    }))

    def mask(self, chunk, rate):
        '''For data augmentation: Mask traces in chunks with NaNs.
        NaNs will be filled by the imputation method provided by the config
        file.

        :param rate: probability with which traces are NaN-ed
        '''
        # print(rate)
        indices = self.nsl_indices
        a = num.random.random(len(indices))
        i = num.where(a < rate)[0]
        for ii in i:
            chunk[indices[ii], :] = num.nan

    def random_trim(self, chunk, margin):
        '''For data augmentation: Randomly trim examples in time domain with
        *margin* seconds.'''
        sample_margin = int(margin / self.config.effective_deltat)
        nstart = num.random.randint(low=0, high=sample_margin)

        _, n_samples = self.config.tensor_shape
        nstop = nstart + n_samples
        chunk[:, :nstart] = 0.
        chunk[:, nstop:] = 0.

    def process_chunk(self, chunk, return_gaps=False):
        '''Performs preprocessing of data chunks.'''

        if self.config.t_translation_max:
            self.random_trim(chunk, self.config.t_translation_max)

        # add noise
        if self.noise:
            self.noise(chunk)

        # apply normalization
        self.config.normalization(chunk)

        # apply station dropout
        if self.station_dropout_rate:
            if self.station_dropout_distribution:
                self.mask(chunk, num.random.uniform(
                    high=self.station_dropout_rate))
            else:
                self.mask(chunk, self.station_dropout_rate)

        # fill gaps
        if self.config.imputation:
            gaps = num.isnan(chunk)
            chunk[gaps] = self.config.imputation(chunk)

        if not return_gaps:
            return chunk
        else:
            return chunk, gaps

    def write(self, directory):
        '''Write example data to TFRecordDataset using `self.writer`.'''
        logger.debug('writing TFRecordDataset: %s' % directory)
        writer = tf.python_io.TFRecordWriter(directory)

        for ex in self.pack_examples():
            writer.write(ex.SerializeToString())

    def cleanup(self):
        '''Remove remaining folders'''
        delete_if_exists(self.fn_tfrecord)
Пример #11
0
class PileData(DataGenerator):
    '''Data generator for locally saved data.'''
    fn_stations = String.T()
    data_paths = List.T(String.T())
    data_format = String.T(default='detect')
    fn_markers = String.T()
    fn_events = String.T(optional=True)
    sort_markers = Bool.T(default=False,
            help= 'Sorting markers speeds up data io. Shuffled markers \
            improve generalization')
    align_phase = String.T(default='P')

    tstart = String.T(optional=True)
    tstop = String.T(optional=True)

    def setup(self):
        self.data_pile = pile.make_pile(
            self.data_paths, fileformat=self.data_format)

        if self.data_pile.is_empty():
            sys.exit('Data pile is empty!')

        self.deltat_want = self.config.deltat_want or \
                min(self.data_pile.deltats.keys())

        self.n_samples = int(
                (self.config.sample_length + self.config.tpad) / self.deltat_want)

        logger.debug('loading marker file %s' % self.fn_markers)

        # loads just plain markers:
        markers = marker.load_markers(self.fn_markers)

        if self.fn_events:
            markers.extend(
                    [marker.EventMarker(e) for e in
                load_events(self.fn_events)])

        if self.sort_markers:
            logger.info('sorting markers!')
            markers.sort(key=lambda x: x.tmin)
        marker.associate_phases_to_events(markers)

        markers_by_nsl = {}
        for m in markers:
            if not m.match_nsl(self.config.reference_target.codes[:3]):
                continue

            if m.get_phasename().upper() != self.align_phase:
                continue

            markers_by_nsl.setdefault(m.one_nslc()[:3], []).append(m)

        assert(len(markers_by_nsl) == 1)

        # filter markers that do not have an event assigned:
        self.markers = list(markers_by_nsl.values())[0]

        if not self.labeled:
            dummy_event = Event(lat=0., lon=0., depth=0.)
            for m in self.markers:
                if not m.get_event():
                    m.set_event(dummy_event)

        self.markers = [m for m in self.markers if m.get_event() is not None]

        if not len(self.markers):
            raise Exception('No markers left in dataset')

        self.config.channels = list(self.data_pile.nslc_ids.keys())
        self.config.channels.sort()

    def check_inputs(self):
        if len(self.data_pile.deltats()) > 1:
            logger.warn(
                'Different sampling rates in dataset. Preprocessing slow')

    def extract_labels(self, marker):
        if not self.labeled:
            return UNLABELED

        source = marker.get_event()
        n, e = orthodrome.latlon_to_ne(
            self.config.reference_target.lat, self.config.reference_target.lon,
            source.lat, source.lon)
        return (n, e, source.depth)

    @property
    def tstart_data(self):
        '''Returns start point of data returned by generator.'''
        return util.stt(self.tstart) if self.tstart else self.data_pile.tmin

    def iter_chunked(self, tinc):
        tr_len = self.n_samples * self.deltat_want
        nslc_to_index = self.nslc_to_index

        tpad = self.config.effective_tpad

        tstart = util.stt(self.tstart) if self.tstart else None
        tstop = util.stt(self.tstop) if self.tstop else None

        logger.debug('START')
        for trs in self.data_pile.chopper(
                tinc=tinc, tmin=tstart, tmax=tstop, tpad=tpad,
                keep_current_files_open=True, want_incomplete=False,
                trace_selector=self.reject_blacklisted):

            chunk = self.get_raw_data_chunk(self.tensor_shape)

            if not trs:
                yield chunk, UNLABELED
                continue

            for tr in trs:
                self.preprocess(tr)

            indices = [nslc_to_index[tr.nslc_id] for tr in trs]
            self.fit_data_into_chunk(trs, chunk=chunk, indices=indices,
                    tref=trs[0].tmin)

            if all_NAN(chunk):
                logger.debug('all NAN. skipping...')
                continue

            yield chunk, UNLABELED

    def iter_labels(self):
        for m in self.markers:
            yield self.extract_labels(m)

    def iter_examples_and_labels(self):
        tr_len = self.n_samples * self.deltat_want
        nslc_to_index = self.nslc_to_index

        tpad = self.config.effective_tpad

        for i_m, m in enumerate(self.markers):
            logger.debug('processig marker %s / %s' % (i_m, len(self.markers)))

            for trs in self.data_pile.chopper(
                    tmin=m.tmin-tpad, tmax=m.tmax+tr_len+tpad,
                    keep_current_files_open=True,
                    want_incomplete=False,
                    trace_selector=self.reject_blacklisted):

                for tr in trs:
                    self.preprocess(tr)

                indices = [nslc_to_index[tr.nslc_id] for tr in trs]
                chunk = self.get_raw_data_chunk(self.tensor_shape)
                self.fit_data_into_chunk(trs, chunk=chunk, indices=indices, tref=m.tmin)

                if all_NAN(chunk):
                    logger.debug('all NAN. skipping...')
                    continue

                label = self.extract_labels(m)
                yield chunk, label
Пример #12
0
class FitsWaveformEnsemblePlot(PlotConfig):
    ''' Plot showing all waveform fits for the ensemble of solutions'''

    name = 'fits_waveform_ensemble'
    size_cm = Tuple.T(2,
                      Float.T(),
                      default=(9., 5.),
                      help='width and length of the figure in cm')
    nx = Int.T(default=1, help='horizontal number of subplots on every page')
    ny = Int.T(default=1, help='vertical number of subplots on every page')
    misfit_cutoff = Float.T(
        optional=True, help='Plot fits for models up to this misfit value')
    color_parameter = String.T(
        default='misfit',
        help='Choice of value to color, options: dist and misfit')
    font_size = Float.T(default=8, help='Font Size of all fonts, except title')
    font_size_title = Float.T(default=10, help='Font size of title')

    def make(self, environ):
        cm = environ.get_plot_collection_manager()
        mpl_init(fontsize=self.font_size)
        environ.setup_modelling()
        ds = environ.get_dataset()
        history = environ.get_history(subset='harvest')
        cm.create_group_mpl(self,
                            self.draw_figures(ds, history),
                            title=u'Waveform fits for the ensemble',
                            section='fits',
                            feather_icon='activity',
                            description=u'''
Plot showing waveform (attribute) fits for the ensemble of solutions.

Waveform fits for every nth model in the ensemble of bootstrap solutions.
Depending on the target configuration different types of comparisons are
possible: (i) time domain waveform differences, (ii) amplitude spectra, (iii)
envelopes, (iv) cross correlation functions. Each waveform plot gives a number
of details:

1) Target information (left side, from top to bottom) gives station name with
component, distance to source, azimuth of station with respect to source,
target weight, target misfit and starting time of the waveform relative to the
origin time.

2) The background gray area shows the applied taper function.

3) The waveforms shown are: the restituted and filtered observed trace without
tapering (light grey) and the same trace with tapering and processing (dark
gray), the synthetic trace (light red) and the filtered, tapered and (if
enabled) shifted and processed synthetic trace (colored). The colors of the
synthetic traces indicate how well the corresponding models fit in the global
weighting scheme (when all bootstrap weights are equal), from better fit (red)
to worse fit (blue). The amplitudes of the traces are scaled according to the
target weight (small weight, small amplitude) and normed relative to the
maximum amplitude of the targets of the corresponding normalisation family.

4) The bottom panel shows, depending on the type of comparison, sample-wise
residuals for time domain comparisons (red filled), spectra of observed and
synthetic traces for amplitude spectrum comparisons, or cross correlation
traces.''')

    def draw_figures(self, ds, history):

        color_parameter = self.color_parameter
        misfit_cutoff = self.misfit_cutoff
        fontsize = self.font_size
        fontsize_title = self.font_size_title

        nxmax = self.nx
        nymax = self.ny

        problem = history.problem

        for target in problem.targets:
            target.set_dataset(ds)

        target_index = {}
        i = 0
        for target in problem.targets:
            target_index[target] = i, i + target.nmisfits
            i += target.nmisfits

        gms = problem.combine_misfits(history.misfits)
        isort = num.argsort(gms)[::-1]
        gms = gms[isort]
        models = history.models[isort, :]

        if misfit_cutoff is not None:
            ibest = gms < misfit_cutoff
            gms = gms[ibest]
            models = models[ibest]

        gms = gms[::10]
        models = models[::10]

        nmodels = models.shape[0]
        if color_parameter == 'dist':
            mx = num.mean(models, axis=0)
            cov = num.cov(models.T)
            mdists = core.mahalanobis_distance(models, mx, cov)
            icolor = meta.ordersort(mdists)

        elif color_parameter == 'misfit':
            iorder = num.arange(nmodels)
            icolor = iorder

        elif color_parameter in problem.parameter_names:
            ind = problem.name_to_index(color_parameter)
            icolor = problem.extract(models, ind)

        target_to_results = defaultdict(list)
        all_syn_trs = []

        dtraces = []
        for imodel in range(nmodels):
            model = models[imodel, :]

            source = problem.get_source(model)
            results = problem.evaluate(model)

            dtraces.append([])

            for target, result in zip(problem.targets, results):
                if isinstance(result, gf.SeismosizerError):
                    dtraces[-1].append(None)
                    continue

                if not isinstance(target, WaveformMisfitTarget):
                    dtraces[-1].append(None)
                    continue

                itarget, itarget_end = target_index[target]
                assert itarget_end == itarget + 1

                w = target.get_combined_weight()

                if target.misfit_config.domain == 'cc_max_norm':
                    tref = (
                        result.filtered_obs.tmin + result.filtered_obs.tmax) \
                        * 0.5

                    for tr_filt, tr_proc, tshift in ((result.filtered_obs,
                                                      result.processed_obs,
                                                      0.),
                                                     (result.filtered_syn,
                                                      result.processed_syn,
                                                      result.tshift)):

                        norm = num.sum(num.abs(tr_proc.ydata)) \
                            / tr_proc.data_len()
                        tr_filt.ydata /= norm
                        tr_proc.ydata /= norm

                        tr_filt.shift(tshift)
                        tr_proc.shift(tshift)

                    ctr = result.cc
                    ctr.shift(tref)

                    dtrace = ctr

                else:
                    for tr in (result.filtered_obs, result.filtered_syn,
                               result.processed_obs, result.processed_syn):

                        tr.ydata *= w

                    if result.tshift is not None and result.tshift != 0.0:
                        # result.filtered_syn.shift(result.tshift)
                        result.processed_syn.shift(result.tshift)

                    dtrace = make_norm_trace(result.processed_syn,
                                             result.processed_obs,
                                             problem.norm_exponent)

                target_to_results[target].append(result)

                dtrace.meta = dict(
                    normalisation_family=target.normalisation_family,
                    path=target.path)

                dtraces[-1].append(dtrace)

                result.processed_syn.meta = dict(
                    normalisation_family=target.normalisation_family,
                    path=target.path)

                all_syn_trs.append(result.processed_syn)

        if not all_syn_trs:
            logger.warn('No traces to show!')
            return

        def skey(tr):
            return tr.meta['normalisation_family'], tr.meta['path']

        trace_minmaxs = trace.minmax(all_syn_trs, skey)

        dtraces_all = []
        for dtraces_group in dtraces:
            dtraces_all.extend(dtraces_group)

        dminmaxs = trace.minmax(
            [dtrace_ for dtrace_ in dtraces_all if dtrace_ is not None], skey)

        for tr in dtraces_all:
            if tr:
                dmin, dmax = dminmaxs[skey(tr)]
                tr.ydata /= max(abs(dmin), abs(dmax))

        cg_to_targets = meta.gather(problem.waveform_targets,
                                    lambda t: (t.path, t.codes[3]),
                                    filter=lambda t: t in target_to_results)

        cgs = sorted(cg_to_targets.keys())

        from matplotlib import colors
        cmap = cm.ScalarMappable(norm=colors.Normalize(vmin=num.min(icolor),
                                                       vmax=num.max(icolor)),
                                 cmap=plt.get_cmap('coolwarm'))

        imodel_to_color = []
        for imodel in range(nmodels):
            imodel_to_color.append(cmap.to_rgba(icolor[imodel]))

        for cg in cgs:
            targets = cg_to_targets[cg]

            frame_to_target, nx, ny, nxx, nyy = layout(source, targets, nxmax,
                                                       nymax)

            figures = {}
            for iy in range(ny):
                for ix in range(nx):
                    if (iy, ix) not in frame_to_target:
                        continue

                    ixx = ix // nxmax
                    iyy = iy // nymax
                    if (iyy, ixx) not in figures:
                        title = '_'.join(x for x in cg if x)
                        item = PlotItem(name='fig_%s_%i_%i' %
                                        (title, ixx, iyy))
                        item.attributes['targets'] = []
                        figures[iyy,
                                ixx] = (item,
                                        plt.figure(figsize=self.size_inch))

                        figures[iyy, ixx][1].subplots_adjust(left=0.03,
                                                             right=1.0 - 0.03,
                                                             bottom=0.03,
                                                             top=1.0 - 0.06,
                                                             wspace=0.2,
                                                             hspace=0.2)

                    item, fig = figures[iyy, ixx]

                    target = frame_to_target[iy, ix]

                    item.attributes['targets'].append(target.string_id())

                    amin, amax = trace_minmaxs[target.normalisation_family,
                                               target.path]
                    absmax = max(abs(amin), abs(amax))

                    ny_this = nymax  # min(ny, nymax)
                    nx_this = nxmax  # min(nx, nxmax)
                    i_this = (iy % ny_this) * nx_this + (ix % nx_this) + 1

                    axes2 = fig.add_subplot(ny_this, nx_this, i_this)

                    space = 0.5
                    space_factor = 1.0 + space
                    axes2.set_axis_off()
                    axes2.set_ylim(-1.05 * space_factor, 1.05)

                    axes = axes2.twinx()
                    axes.set_axis_off()

                    if target.misfit_config.domain == 'cc_max_norm':
                        axes.set_ylim(-10. * space_factor, 10.)
                    else:
                        axes.set_ylim(-absmax * 1.33 * space_factor,
                                      absmax * 1.33)

                    itarget, itarget_end = target_index[target]
                    assert itarget_end == itarget + 1

                    for imodel, result in enumerate(target_to_results[target]):

                        syn_color = imodel_to_color[imodel]

                        dtrace = dtraces[imodel][itarget]

                        tap_color_annot = (0.35, 0.35, 0.25)
                        tap_color_edge = (0.85, 0.85, 0.80)
                        tap_color_fill = (0.95, 0.95, 0.90)

                        plot_taper(axes2,
                                   result.processed_obs.get_xdata(),
                                   result.taper,
                                   fc=tap_color_fill,
                                   ec=tap_color_edge,
                                   alpha=0.2)

                        obs_color = mpl_color('aluminium5')
                        obs_color_light = light(obs_color, 0.5)

                        plot_dtrace(axes2,
                                    dtrace,
                                    space,
                                    0.,
                                    1.,
                                    fc='none',
                                    ec=syn_color)

                        # plot_trace(
                        #     axes, result.filtered_syn,
                        #     color=syn_color_light, lw=1.0)

                        if imodel == 0:
                            plot_trace(axes,
                                       result.filtered_obs,
                                       color=obs_color_light,
                                       lw=0.75)

                        plot_trace(axes,
                                   result.processed_syn,
                                   color=syn_color,
                                   lw=1.0,
                                   alpha=0.3)

                        plot_trace(axes,
                                   result.processed_obs,
                                   color=obs_color,
                                   lw=0.75,
                                   alpha=0.3)

                        if imodel != 0:
                            continue
                        xdata = result.filtered_obs.get_xdata()
                        axes.set_xlim(xdata[0], xdata[-1])

                        tmarks = [
                            result.processed_obs.tmin,
                            result.processed_obs.tmax
                        ]

                        for tmark in tmarks:
                            axes2.plot([tmark, tmark], [-0.9, 0.1],
                                       color=tap_color_annot)

                        dur = tmarks[1] - tmarks[0]
                        for tmark, text, ha in [
                            (tmarks[0], '$\\,$ ' +
                             meta.str_duration(tmarks[0] - source.time),
                             'left'),
                            (tmarks[1], '$\\Delta$ ' + meta.str_duration(dur),
                             'right')
                        ]:

                            axes2.annotate(
                                text,
                                xy=(tmark, -0.9),
                                xycoords='data',
                                xytext=(fontsize * 0.4 * [-1, 1][ha == 'left'],
                                        fontsize * 0.2),
                                textcoords='offset points',
                                ha=ha,
                                va='bottom',
                                color=tap_color_annot,
                                fontsize=fontsize)

                        axes2.set_xlim(tmarks[0] - dur * 0.1,
                                       tmarks[1] + dur * 0.1)

                    scale_string = None

                    if target.misfit_config.domain == 'cc_max_norm':
                        scale_string = 'Syn/obs scales differ!'

                    infos = []
                    if scale_string:
                        infos.append(scale_string)

                    if self.nx == 1 and self.ny == 1:
                        infos.append(target.string_id())
                    else:
                        infos.append('.'.join(x for x in target.codes if x))
                    dist = source.distance_to(target)
                    azi = source.azibazi_to(target)[0]
                    infos.append(meta.str_dist(dist))
                    infos.append(u'%.0f\u00B0' % azi)
                    axes2.annotate('\n'.join(infos),
                                   xy=(0., 1.),
                                   xycoords='axes fraction',
                                   xytext=(2., 2.),
                                   textcoords='offset points',
                                   ha='left',
                                   va='top',
                                   fontsize=fontsize,
                                   fontstyle='normal')

                    if (self.nx == 1 and self.ny == 1):
                        yield item, fig
                        del figures[iyy, ixx]

            if not (self.nx == 1 and self.ny == 1):
                for (iyy, ixx), (_, fig) in figures.items():
                    title = '.'.join(x for x in cg if x)
                    if len(figures) > 1:
                        title += ' (%i/%i, %i/%i)' % (iyy + 1, nyy, ixx + 1,
                                                      nxx)

                    fig.suptitle(title, fontsize=fontsize_title)

            for item, fig in figures.values():
                yield item, fig
Пример #13
0
class DirContextEntry(Object):
    path = String.T()
    tstart = Timestamp.T()
    ifile = Int.T()
Пример #14
0
class SatelliteTargetDisplacement(PlotConfig):
    ''' Maps showing surface displacements from satellite and modelled data '''

    name = 'satellite'
    dpi = Int.T(default=250)
    size_cm = Tuple.T(2, Float.T(), default=(22., 12.))
    colormap = String.T(default='RdBu',
                        help='Colormap for the surface displacements')
    relative_coordinates = Bool.T(
        default=False,
        help='Show relative coordinates, initial location centered at 0N, 0E')

    def make(self, environ):
        cm = environ.get_plot_collection_manager()
        history = environ.get_history(subset='harvest')
        optimiser = environ.get_optimiser()
        ds = environ.get_dataset()

        environ.setup_modelling()

        cm.create_group_mpl(self,
                            self.draw_static_fits(ds, history, optimiser),
                            title=u'InSAR Displacements',
                            section='fits',
                            feather_icon='navigation',
                            description=u'''
Maps showing subsampled surface displacements as observed, modelled and the
residual (observed minus modelled).

The displacement values predicted by the orbit-ambiguity ramps are added to the
modelled displacements (middle panels). The color shows the LOS displacement
values associated with, and the extent of, every quadtree box. The light grey
dots show the focal point of pixels combined in the quadtree box. This point
corresponds to the position of the modelled data point.

The large dark grey dot shows the reference source position. The grey filled
box shows the surface projection of the modelled source, with the thick-lined
edge marking the upper fault edge. Complete data extent is shown.
''')

    def draw_static_fits(self, ds, history, optimiser, closeup=False):
        from pyrocko.orthodrome import latlon_to_ne_numpy
        problem = history.problem

        sat_targets = problem.satellite_targets
        for target in sat_targets:
            target.set_dataset(ds)

        source = history.get_best_source()
        best_model = history.get_best_model()
        results = problem.evaluate(best_model, targets=sat_targets)

        def initAxes(ax, scene, title, last_axes=False):
            ax.set_title(title)
            ax.tick_params(length=2)

            if scene.frame.isMeter():
                ax.set_xlabel('Easting [km]')
                scale_x = {'scale': 1. / km}
                scale_y = {'scale': 1. / km}
                if not self.relative_coordinates:
                    import utm
                    utm_E, utm_N, utm_zone, utm_zone_letter =\
                        utm.from_latlon(source.effective_lat,
                                        source.effective_lon)
                    scale_x['offset'] = utm_E
                    scale_y['offset'] = utm_N

                    if last_axes:
                        ax.text(0.975,
                                0.025,
                                'UTM Zone %d%s' % (utm_zone, utm_zone_letter),
                                va='bottom',
                                ha='right',
                                fontsize=8,
                                alpha=.7,
                                transform=ax.transAxes)
                ax.set_aspect('equal')

            elif scene.frame.isDegree():
                ax.set_xlabel('Lon [°]')
                scale_x = {'scale': 1.}
                scale_y = {'scale': 1.}
                if not self.relative_coordinates:
                    scale_x['offset'] = source.effective_lon
                    scale_y['offset'] = source.effective_lat
                ax.set_aspect(1. / num.cos(source.effective_lat * d2r))

            scale_axes(ax.get_xaxis(), **scale_x)
            scale_axes(ax.get_yaxis(), **scale_y)

        def drawSource(ax, scene):
            if scene.frame.isMeter():
                fn, fe = source.outline(cs='xy').T
                fn -= fn.mean()
                fe -= fe.mean()
            elif scene.frame.isDegree():
                fn, fe = source.outline(cs='latlon').T
                fn -= source.effective_lat
                fe -= source.effective_lon

            # source is centered
            ax.scatter(0., 0., color='black', s=3, alpha=.5, marker='o')
            ax.fill(fe,
                    fn,
                    edgecolor=(0., 0., 0.),
                    facecolor=(.5, .5, .5),
                    alpha=0.7)
            ax.plot(fe[0:2], fn[0:2], 'k', linewidth=1.3)

        def mapDisplacementGrid(displacements, scene):
            arr = num.full_like(scene.displacement, fill_value=num.nan)
            qt = scene.quadtree

            for syn_v, l in zip(displacements, qt.leaves):
                arr[l._slice_rows, l._slice_cols] = syn_v

            arr[scene.displacement_mask] = num.nan
            return arr

        def drawLeaves(ax, scene, offset_e=0., offset_n=0.):
            rects = scene.quadtree.getMPLRectangles()
            for r in rects:
                r.set_edgecolor((.4, .4, .4))
                r.set_linewidth(.5)
                r.set_facecolor('none')
                r.set_x(r.get_x() - offset_e)
                r.set_y(r.get_y() - offset_n)
            map(ax.add_artist, rects)

            ax.scatter(scene.quadtree.leaf_coordinates[:, 0] - offset_e,
                       scene.quadtree.leaf_coordinates[:, 1] - offset_n,
                       s=.25,
                       c='black',
                       alpha=.1)

        def addArrow(ax, scene):
            phi = num.nanmean(scene.phi)
            los_dx = num.cos(phi + num.pi) * .0625
            los_dy = num.sin(phi + num.pi) * .0625

            az_dx = num.cos(phi - num.pi / 2) * .125
            az_dy = num.sin(phi - num.pi / 2) * .125

            anchor_x = .9 if los_dx < 0 else .1
            anchor_y = .85 if los_dx < 0 else .975

            az_arrow = patches.FancyArrow(x=anchor_x - az_dx,
                                          y=anchor_y - az_dy,
                                          dx=az_dx,
                                          dy=az_dy,
                                          head_width=.025,
                                          alpha=.5,
                                          fc='k',
                                          head_starts_at_zero=False,
                                          length_includes_head=True,
                                          transform=ax.transAxes)

            los_arrow = patches.FancyArrow(x=anchor_x - az_dx / 2,
                                           y=anchor_y - az_dy / 2,
                                           dx=los_dx,
                                           dy=los_dy,
                                           head_width=.02,
                                           alpha=.5,
                                           fc='k',
                                           head_starts_at_zero=False,
                                           length_includes_head=True,
                                           transform=ax.transAxes)

            ax.add_artist(az_arrow)
            ax.add_artist(los_arrow)

        urE, urN, llE, llN = (0., 0., 0., 0.)
        for target in sat_targets:

            if target.scene.frame.isMeter():
                off_n, off_e = map(
                    float,
                    latlon_to_ne_numpy(target.scene.frame.llLat,
                                       target.scene.frame.llLon,
                                       source.effective_lat,
                                       source.effective_lon))
            if target.scene.frame.isDegree():
                off_n = source.effective_lat - target.scene.frame.llLat
                off_e = source.effective_lon - target.scene.frame.llLon

            turE, turN, tllE, tllN = zip(
                *[(l.gridE.max() - off_e, l.gridN.max() - off_n,
                   l.gridE.min() - off_e, l.gridN.min() - off_n)
                  for l in target.scene.quadtree.leaves])

            turE, turN = map(max, (turE, turN))
            tllE, tllN = map(min, (tllE, tllN))
            urE, urN = map(max, ((turE, urE), (urN, turN)))
            llE, llN = map(min, ((tllE, llE), (llN, tllN)))

        def generate_plot(sat_target, result, ifig):

            scene = sat_target.scene

            fig = plt.figure()
            fig.set_size_inches(*self.size_inch)
            gs = gridspec.GridSpec(2,
                                   3,
                                   wspace=.15,
                                   hspace=.2,
                                   left=.1,
                                   right=.975,
                                   top=.95,
                                   height_ratios=[12, 1])

            item = PlotItem(name='fig_%i' % ifig,
                            attributes={'targets': [sat_target.path]},
                            title=u'Satellite Surface Displacements - %s' %
                            scene.meta.scene_title,
                            description=u'''
Surface displacements derived from satellite data.
(Left) the input data, (center) the modelled
data and (right) the model residual.
'''.format(meta=scene.meta))

            stat_obs = result.statics_obs
            stat_syn = result.statics_syn['displacement.los']
            res = stat_obs - stat_syn

            if scene.frame.isMeter():
                offset_n, offset_e = map(
                    float,
                    latlon_to_ne_numpy(scene.frame.llLat, scene.frame.llLon,
                                       source.effective_lat,
                                       source.effective_lon))
            elif scene.frame.isDegree():
                offset_n = source.effective_lat - scene.frame.llLat
                offset_e = source.effective_lon - scene.frame.llLon

            im_extent = (scene.frame.E.min() - offset_e, scene.frame.E.max() -
                         offset_e, scene.frame.N.min() - offset_n,
                         scene.frame.N.max() - offset_n)

            abs_displ = num.abs([
                stat_obs.min(),
                stat_obs.max(),
                stat_syn.min(),
                stat_syn.max(),
                res.min(),
                res.max()
            ]).max()

            cmw = cm.ScalarMappable(cmap=self.colormap)
            cmw.set_clim(vmin=-abs_displ, vmax=abs_displ)
            cmw.set_array(stat_obs)

            axes = [
                fig.add_subplot(gs[0, 0]),
                fig.add_subplot(gs[0, 1]),
                fig.add_subplot(gs[0, 2])
            ]

            ax = axes[0]
            ax.imshow(mapDisplacementGrid(stat_obs, scene),
                      extent=im_extent,
                      cmap=self.colormap,
                      vmin=-abs_displ,
                      vmax=abs_displ,
                      origin='lower')
            drawLeaves(ax, scene, offset_e, offset_n)
            drawSource(ax, scene)
            addArrow(ax, scene)
            initAxes(ax, scene, 'Observed')

            ax.text(.025,
                    .025,
                    'Scene ID: %s' % scene.meta.scene_id,
                    fontsize=8,
                    alpha=.7,
                    va='bottom',
                    transform=ax.transAxes)
            if scene.frame.isDegree():
                ax.set_ylabel('Lat [°]')
            elif scene.frame.isMeter():
                ax.set_ylabel('Northing [km]')

            ax = axes[1]
            ax.imshow(mapDisplacementGrid(stat_syn, scene),
                      extent=im_extent,
                      cmap=self.colormap,
                      vmin=-abs_displ,
                      vmax=abs_displ,
                      origin='lower')
            drawLeaves(ax, scene, offset_e, offset_n)
            drawSource(ax, scene)
            addArrow(ax, scene)
            initAxes(ax, scene, 'Model')
            ax.get_yaxis().set_visible(False)

            ax = axes[2]
            ax.imshow(mapDisplacementGrid(res, scene),
                      extent=im_extent,
                      cmap=self.colormap,
                      vmin=-abs_displ,
                      vmax=abs_displ,
                      origin='lower')
            drawLeaves(ax, scene, offset_e, offset_n)
            drawSource(ax, scene)
            addArrow(ax, scene)
            initAxes(ax, scene, 'Residual', last_axes=True)
            ax.get_yaxis().set_visible(False)

            for ax in axes:
                ax.set_xlim(llE, urE)
                ax.set_ylim(llN, urN)

            if closeup:
                if scene.frame.isMeter():
                    fn, fe = source.outline(cs='xy').T
                elif scene.frame.isDegree():
                    fn, fe = source.outline(cs='latlon').T
                    fn -= source.effective_lat
                    fe -= source.effective_lon

                if fn.size > 1:
                    off_n = (fn[0] + fn[1]) / 2
                    off_e = (fe[0] + fe[1]) / 2
                else:
                    off_n = fn[0]
                    off_e = fe[0]

                fault_size = 2 * num.sqrt(
                    max(abs(fn - off_n))**2 + max(abs(fe - off_e))**2)
                fault_size *= self.map_scale
                if fault_size == 0.0:
                    extent = (scene.frame.N[-1] + scene.frame.E[-1]) / 2
                    fault_size = extent * .25

                for ax in axes:
                    ax.set_xlim(-fault_size / 2 + off_e,
                                fault_size / 2 + off_e)
                    ax.set_ylim(-fault_size / 2 + off_n,
                                fault_size / 2 + off_n)

            cax = fig.add_subplot(gs[1, :])
            cbar = fig.colorbar(cmw,
                                cax=cax,
                                orientation='horizontal',
                                use_gridspec=True)

            cbar.set_label('LOS Displacement [m]')

            return (item, fig)

        for ifig, (sat_target, result) in enumerate(zip(sat_targets, results)):
            yield generate_plot(sat_target, result, ifig)
Пример #15
0
class Dip(FloatWithUnit):
    '''Instrument dip in degrees down from horizontal. Together
    azimuth and dip describe the direction of the sensitive axis of
    the instrument.'''

    unit = String.T(default='DEGREES', optional=True, xmlstyle='attribute')
Пример #16
0
class ExternalReference(Object):
    '''This type contains a URI and description for external data that
    users may want to reference in StationXML.'''

    uri = String.T(xmltagname='URI')
    description = Unicode.T(xmltagname='Description')
Пример #17
0
class Distance(FloatWithUnit):
    '''Extension of FloatWithUnit for distances, elevations, and depths.'''

    unit = String.T(default='METERS', optional=True, xmlstyle='attribute')
Пример #18
0
class FloatWithUnit(FloatNoUnit):
    unit = String.T(optional=True, xmlstyle='attribute')
Пример #19
0
class SampleRate(FloatWithUnit):
    '''Sample rate in samples per second.'''

    unit = String.T(default='SAMPLES/S', optional=True, xmlstyle='attribute')
Пример #20
0
class ClockDrift(FloatWithUnit):
    unit = String.T(default='SECONDS/SAMPLE', optional=True,
                    xmlstyle='attribute')  # fixed
Пример #21
0
class Operator(Object):
    agency_list = List.T(Unicode.T(xmltagname='Agency'))
    contact_list = List.T(Person.T(xmltagname='Contact'))
    web_site = String.T(optional=True, xmltagname='WebSite')
Пример #22
0
class Second(FloatWithUnit):
    '''A time value in seconds.'''

    unit = String.T(default='SECONDS', optional=True, xmlstyle='attribute')
Пример #23
0
class FDSNStationXML(Object):
    '''Top-level type for Station XML. Required field are Source
    (network ID of the institution sending the message) and one or
    more Network containers or one or more Station containers.'''

    schema_version = Float.T(default=1.0, xmlstyle='attribute')
    source = String.T(xmltagname='Source')
    sender = String.T(optional=True, xmltagname='Sender')
    module = String.T(optional=True, xmltagname='Module')
    module_uri = String.T(optional=True, xmltagname='ModuleURI')
    created = Timestamp.T(xmltagname='Created')
    network_list = List.T(Network.T(xmltagname='Network'))

    xmltagname = 'FDSNStationXML'
    guessable_xmlns = [guts_xmlns]

    def get_pyrocko_stations(self, nslcs=None, nsls=None,
                             time=None, timespan=None,
                             inconsistencies='warn'):

        assert inconsistencies in ('raise', 'warn')

        if nslcs is not None:
            nslcs = set(nslcs)

        if nsls is not None:
            nsls = set(nsls)

        tt = ()
        if time is not None:
            tt = (time,)
        elif timespan is not None:
            tt = timespan

        pstations = []
        for network in self.network_list:
            if not network.spans(*tt):
                continue

            for station in network.station_list:
                if not station.spans(*tt):
                    continue

                if station.channel_list:
                    loc_to_channels = {}
                    for channel in station.channel_list:
                        if not channel.spans(*tt):
                            continue

                        loc = channel.location_code.strip()
                        if loc not in loc_to_channels:
                            loc_to_channels[loc] = []

                        loc_to_channels[loc].append(channel)

                    for loc in sorted(loc_to_channels.keys()):
                        channels = loc_to_channels[loc]
                        if nslcs is not None:
                            channels = [channel for channel in channels
                                        if (network.code, station.code, loc,
                                            channel.code) in nslcs]

                        if not channels:
                            continue

                        nsl = network.code, station.code, loc
                        if nsls is not None and nsl not in nsls:
                            continue

                        pstations.append(
                            pyrocko_station_from_channels(
                                nsl,
                                channels,
                                inconsistencies=inconsistencies))
                else:
                    pstations.append(pyrocko.model.Station(
                        network.code, station.code, '*',
                        lat=station.latitude.value,
                        lon=station.longitude.value,
                        elevation=value_or_none(station.elevation),
                        name=station.description or ''))

        return pstations

    @classmethod
    def from_pyrocko_stations(
            cls, pyrocko_stations, add_flat_responses_from=None):

        ''' Generate :py:class:`FDSNStationXML` from list of
        :py:class;`pyrocko.model.Station` instances.

        :param pyrocko_stations: list of :py:class;`pyrocko.model.Station`
            instances.
        :param add_flat_responses_from: unit, 'M', 'M/S' or 'M/S**2'
        '''
        from collections import defaultdict
        network_dict = defaultdict(list)

        if add_flat_responses_from:
            assert add_flat_responses_from in ('M', 'M/S', 'M/S**2')
            extra = dict(
                response=Response(
                    instrument_sensitivity=Sensitivity(
                        value=1.0,
                        frequency=1.0,
                        input_units=Units(name=add_flat_responses_from))))
        else:
            extra = {}

        for s in pyrocko_stations:
            network, station, location = s.nsl()
            channel_list = []
            for c in s.channels:
                channel_list.append(
                    Channel(
                        location_code=location,
                        code=c.name,
                        latitude=Latitude(value=s.lat),
                        longitude=Longitude(value=s.lon),
                        elevation=Distance(value=s.elevation),
                        depth=Distance(value=s.depth),
                        azimuth=Azimuth(value=c.azimuth),
                        dip=Dip(value=c.dip),
                        **extra
                    )
                )

            network_dict[network].append(
                Station(
                    code=station,
                    latitude=Latitude(value=s.lat),
                    longitude=Longitude(value=s.lon),
                    elevation=Distance(value=s.elevation),
                    channel_list=channel_list)
            )

        timestamp = time.time()
        network_list = []
        for k, station_list in network_dict.items():

            network_list.append(
                Network(
                    code=k, station_list=station_list,
                    total_number_stations=len(station_list)))

        sxml = FDSNStationXML(
            source='from pyrocko stations list', created=timestamp,
            network_list=network_list)

        sxml.validate()
        return sxml

    def iter_network_stations(
            self, net=None, sta=None, time=None, timespan=None):

        tt = ()
        if time is not None:
            tt = (time,)
        elif timespan is not None:
            tt = timespan

        for network in self.network_list:
            if not network.spans(*tt) or (
                    net is not None and network.code != net):
                continue

            for station in network.station_list:
                if not station.spans(*tt) or (
                        sta is not None and station.code != sta):
                    continue

                yield (network, station)

    def iter_network_station_channels(
            self, net=None, sta=None, loc=None, cha=None,
            time=None, timespan=None):

        if loc is not None:
            loc = loc.strip()

        tt = ()
        if time is not None:
            tt = (time,)
        elif timespan is not None:
            tt = timespan

        for network in self.network_list:
            if not network.spans(*tt) or (
                    net is not None and network.code != net):
                continue

            for station in network.station_list:
                if not station.spans(*tt) or (
                        sta is not None and station.code != sta):
                    continue

                if station.channel_list:
                    for channel in station.channel_list:
                        if (not channel.spans(*tt) or
                                (cha is not None and channel.code != cha) or
                                (loc is not None and
                                 channel.location_code.strip() != loc)):
                            continue

                        yield (network, station, channel)

    def get_channel_groups(self, net=None, sta=None, loc=None, cha=None,
                           time=None, timespan=None):

        groups = {}
        for network, station, channel in self.iter_network_station_channels(
                net, sta, loc, cha, time=time, timespan=timespan):

            net = network.code
            sta = station.code
            cha = channel.code
            loc = channel.location_code.strip()
            if len(cha) == 3:
                bic = cha[:2]  # band and intrument code according to SEED
            elif len(cha) == 1:
                bic = ''
            else:
                bic = cha

            if channel.response and \
                    channel.response.instrument_sensitivity and \
                    channel.response.instrument_sensitivity.input_units:

                unit = channel.response.instrument_sensitivity.input_units.name
            else:
                unit = None

            bic = (bic, unit)

            k = net, sta, loc
            if k not in groups:
                groups[k] = {}

            if bic not in groups[k]:
                groups[k][bic] = []

            groups[k][bic].append(channel)

        for nsl, bic_to_channels in groups.items():
            bad_bics = []
            for bic, channels in bic_to_channels.items():
                sample_rates = []
                for channel in channels:
                    sample_rates.append(channel.sample_rate.value)

                if not same(sample_rates):
                    scs = ','.join(channel.code for channel in channels)
                    srs = ', '.join('%e' % x for x in sample_rates)
                    err = 'ignoring channels with inconsistent sampling ' + \
                          'rates (%s.%s.%s.%s: %s)' % (nsl + (scs, srs))

                    logger.warn(err)
                    bad_bics.append(bic)

            for bic in bad_bics:
                del bic_to_channels[bic]

        return groups

    def choose_channels(
            self,
            target_sample_rate=None,
            priority_band_code=['H', 'B', 'M', 'L', 'V', 'E', 'S'],
            priority_units=['M/S', 'M/S**2'],
            priority_instrument_code=['H', 'L'],
            time=None,
            timespan=None):

        nslcs = {}
        for nsl, bic_to_channels in self.get_channel_groups(
                time=time, timespan=timespan).items():

            useful_bics = []
            for bic, channels in bic_to_channels.items():
                rate = channels[0].sample_rate.value

                if target_sample_rate is not None and \
                        rate < target_sample_rate*0.99999:
                    continue

                if len(bic[0]) == 2:
                    if bic[0][0] not in priority_band_code:
                        continue

                    if bic[0][1] not in priority_instrument_code:
                        continue

                unit = bic[1]

                prio_unit = len(priority_units)
                try:
                    prio_unit = priority_units.index(unit)
                except ValueError:
                    pass

                prio_inst = len(priority_instrument_code)
                prio_band = len(priority_band_code)
                if len(channels[0].code) == 3:
                    try:
                        prio_inst = priority_instrument_code.index(
                            channels[0].code[1])
                    except ValueError:
                        pass

                    try:
                        prio_band = priority_band_code.index(
                            channels[0].code[0])
                    except ValueError:
                        pass

                if target_sample_rate is None:
                    rate = -rate

                useful_bics.append((-len(channels), prio_band, rate, prio_unit,
                                    prio_inst, bic))

            useful_bics.sort()

            for _, _, rate, _, _, bic in useful_bics:
                channels = sorted(
                    bic_to_channels[bic],
                    key=lambda channel: channel.code)

                if channels:
                    for channel in channels:
                        nslcs[nsl + (channel.code,)] = channel

                    break

        return nslcs

    def get_pyrocko_response(
            self, nslc, time=None, timespan=None, fake_input_units=None):

        net, sta, loc, cha = nslc
        resps = []
        for _, _, channel in self.iter_network_station_channels(
                net, sta, loc, cha, time=time, timespan=timespan):
            resp = channel.response
            if resp:
                resps.append(resp.get_pyrocko_response(
                    nslc, fake_input_units=fake_input_units))

        if not resps:
            raise NoResponseInformation('%s.%s.%s.%s' % nslc)
        elif len(resps) > 1:
            raise MultipleResponseInformation('%s.%s.%s.%s' % nslc)

        return resps[0]

    @property
    def n_code_list(self):
        return sorted(set(x.code for x in self.network_list))

    @property
    def ns_code_list(self):
        nss = set()
        for network in self.network_list:
            for station in network.station_list:
                nss.add((network.code, station.code))

        return sorted(nss)

    @property
    def nsl_code_list(self):
        nsls = set()
        for network in self.network_list:
            for station in network.station_list:
                for channel in station.channel_list:
                    nsls.add(
                        (network.code, station.code, channel.location_code))

        return sorted(nsls)

    @property
    def nslc_code_list(self):
        nslcs = set()
        for network in self.network_list:
            for station in network.station_list:
                for channel in station.channel_list:
                    nslcs.add(
                        (network.code, station.code, channel.location_code,
                            channel.code))

        return sorted(nslcs)

    def summary(self):
        lst = [
            'number of n codes: %i' % len(self.n_code_list),
            'number of ns codes: %i' % len(self.ns_code_list),
            'number of nsl codes: %i' % len(self.nsl_code_list),
            'number of nslc codes: %i' % len(self.nslc_code_list)
        ]

        return '\n'.join(lst)
Пример #24
0
class Voltage(FloatWithUnit):
    unit = String.T(default='VOLTS', optional=True, xmlstyle='attribute')
Пример #25
0
class Station(Location):
    network = String.T()
    station = String.T()
    location = String.T()
    name = String.T(default='')
    channels = List.T(Channel.T())

    def __init__(self,
                 network='',
                 station='',
                 location='',
                 lat=0.0,
                 lon=0.0,
                 elevation=0.0,
                 depth=0.0,
                 name='',
                 channels=None):

        Location.__init__(self,
                          network=network,
                          station=station,
                          location=location,
                          lat=float(lat),
                          lon=float(lon),
                          elevation=elevation and float(elevation) or 0.0,
                          depth=depth and float(depth) or 0.0,
                          name=name or '',
                          channels=channels or [])

        self.dist_deg = None
        self.dist_m = None
        self.azimuth = None
        self.backazimuth = None

    def copy(self):
        return copy.deepcopy(self)

    def set_event_relative_data(self, event, distance_3d=False):
        surface_dist = orthodrome.distance_accurate50m(event, self)
        if distance_3d:
            dd = event.depth - self.depth
            self.dist_m = math.sqrt(dd**2 + surface_dist**2)
        else:
            self.dist_m = surface_dist

        self.dist_deg = surface_dist / orthodrome.earthradius_equator * \
            orthodrome.r2d
        self.azimuth = orthodrome.azimuth(event, self)
        self.backazimuth = orthodrome.azimuth(self, event)

    def set_channels_by_name(self, *args):
        self.set_channels([])
        for name in args:
            self.add_channel(Channel(name))

    def set_channels(self, channels):
        self.channels = []
        for ch in channels:
            self.add_channel(ch)

    def get_channels(self):
        return list(self.channels)

    def get_channel_names(self):
        return set(ch.name for ch in self.channels)

    def remove_channel_by_name(self, name):
        todel = [ch for ch in self.channels if ch.name == name]
        for ch in todel:
            self.channels.remove(ch)

    def add_channel(self, channel):
        self.remove_channel_by_name(channel.name)
        self.channels.append(channel)
        self.channels.sort(key=lambda ch: ch.name)

    def get_channel(self, name):
        for ch in self.channels:
            if ch.name == name:
                return ch

        return None

    def rotation_ne_to_rt(self, in_channel_names, out_channel_names):

        angle = wrap(self.backazimuth + 180., -180., 180.)
        in_channels = [self.get_channel(name) for name in in_channel_names]
        out_channels = [
            Channel(out_channel_names[0],
                    wrap(self.backazimuth + 180., -180., 180.), 0., 1.),
            Channel(out_channel_names[1],
                    wrap(self.backazimuth + 270., -180., 180.), 0., 1.)
        ]
        return angle, in_channels, out_channels

    def _projection_to(self,
                       to,
                       in_channel_names,
                       out_channel_names,
                       use_gains=False):

        in_channels = [self.get_channel(name) for name in in_channel_names]

        # create orthogonal vectors for missing components, such that this
        # won't break projections when components are missing.

        vecs = []
        for ch in in_channels:
            if ch is None:
                vecs.append(None)
            else:
                vec = getattr(ch, to)
                if use_gains:
                    vec /= ch.gain
                vecs.append(vec)

        fill_orthogonal(vecs)
        if not are_orthogonal(vecs):
            raise ChannelsNotOrthogonal(
                'components are not orthogonal: station %s.%s.%s, '
                'channels %s, %s, %s' % (self.nsl() + tuple(in_channel_names)))

        m = num.hstack([vec2[:, num.newaxis] for vec2 in vecs])

        m = num.where(num.abs(m) < num.max(num.abs(m)) * 1e-16, 0., m)

        if to == 'ned':
            out_channels = [
                Channel(out_channel_names[0], 0., 0., 1.),
                Channel(out_channel_names[1], 90., 0., 1.),
                Channel(out_channel_names[2], 0., 90., 1.)
            ]

        elif to == 'enu':
            out_channels = [
                Channel(out_channel_names[0], 90., 0., 1.),
                Channel(out_channel_names[1], 0., 0., 1.),
                Channel(out_channel_names[2], 0., -90., 1.)
            ]

        return m, in_channels, out_channels

    def guess_channel_groups(self):
        cg = {}
        for channel in self.get_channels():
            if len(channel.name) >= 1:
                kind = channel.name[:-1]
                if kind not in cg:
                    cg[kind] = []
                cg[kind].append(channel.name[-1])

        def allin(a, b):
            return all(x in b for x in a)

        out_groups = []
        for kind, components in cg.items():
            for sys in ('ENZ', '12Z', 'XYZ', 'RTZ'):
                if allin(sys, components):
                    out_groups.append(tuple([kind + c for c in sys]))

        return out_groups

    def guess_projections_to_enu(self, out_channels=('E', 'N', 'U'), **kwargs):
        proj = []
        for cg in self.guess_channel_groups():
            try:
                proj.append(
                    self.projection_to_enu(cg,
                                           out_channels=out_channels,
                                           **kwargs))

            except ChannelsNotOrthogonal as e:
                logger.warning(str(e))

        return proj

    def guess_projections_to_rtu(self,
                                 out_channels=('R', 'T', 'U'),
                                 backazimuth=None,
                                 **kwargs):

        if backazimuth is None:
            backazimuth = self.backazimuth
        out_channels_ = [
            Channel(out_channels[0], wrap(backazimuth + 180., -180., 180.), 0.,
                    1.),
            Channel(out_channels[1], wrap(backazimuth + 270., -180., 180.), 0.,
                    1.),
            Channel(out_channels[2], 0., -90., 1.)
        ]

        proj = []
        for (m, in_channels, _) in self.guess_projections_to_enu(**kwargs):
            phi = (backazimuth + 180.) * d2r
            r = num.array([[math.sin(phi), math.cos(phi), 0.0],
                           [math.cos(phi), -math.sin(phi), 0.0],
                           [0.0, 0.0, 1.0]])
            proj.append((num.dot(r, m), in_channels, out_channels_))

        return proj

    def projection_to_enu(self,
                          in_channels,
                          out_channels=('E', 'N', 'U'),
                          **kwargs):

        return self._projection_to('enu', in_channels, out_channels, **kwargs)

    def projection_to_ned(self,
                          in_channels,
                          out_channels=('N', 'E', 'D'),
                          **kwargs):

        return self._projection_to('ned', in_channels, out_channels, **kwargs)

    def projection_from_enu(self,
                            in_channels=('E', 'N', 'U'),
                            out_channels=('X', 'Y', 'Z'),
                            **kwargs):

        m, out_channels, in_channels = self._projection_to(
            'enu', out_channels, in_channels, **kwargs)

        return num.linalg.inv(m), in_channels, out_channels

    def projection_from_ned(self,
                            in_channels=('N', 'E', 'D'),
                            out_channels=('X', 'Y', 'Z'),
                            **kwargs):

        m, out_channels, in_channels = self._projection_to(
            'ned', out_channels, in_channels, **kwargs)

        return num.linalg.inv(m), in_channels, out_channels

    def nsl_string(self):
        return '.'.join((self.network, self.station, self.location))

    def nsl(self):
        return self.network, self.station, self.location

    def oldstr(self):
        nsl = '%s.%s.%s' % (self.network, self.station, self.location)
        s = '%-15s  %14.5f %14.5f %14.1f %14.1f %s' % (
            nsl, self.lat, self.lon, self.elevation, self.depth, self.name)
        return s
Пример #26
0
class Angle(FloatWithUnit):
    unit = String.T(default='DEGREES', optional=True, xmlstyle='attribute')
Пример #27
0
class PsGrnConfigFull(PsGrnConfig):

    earthmodel_1d = gf.meta.Earthmodel1D.T(optional=True)
    psgrn_outdir = String.T(default='psgrn_green/')

    sampling_interval = Float.T(default=1.0)  # 1.0 for equidistant

    sw_source_regime = Int.T(default=1)  # 1-continental, 0-ocean
    sw_gravity = Int.T(default=0)

    accuracy_wavenumber_integration = Float.T(default=0.025)

    displ_filenames = Tuple.T(3, String.T(), default=psgrn_displ_names)
    stress_filenames = Tuple.T(6, String.T(), default=psgrn_stress_names)
    tilt_filenames = Tuple.T(3, String.T(), psgrn_tilt_names)
    gravity_filenames = Tuple.T(2, String.T(), psgrn_gravity_names)

    @staticmethod
    def example():
        conf = PsGrnConfigFull()
        conf.earthmodel_1d = cake.load_model().extract(depth_max=100 * km)
        conf.psgrn_outdir = 'TEST_psgrn_functions/'
        return conf

    def string_for_config(self):

        assert self.earthmodel_1d is not None

        d = self.__dict__.copy()

        model_str, nlines = cake_model_to_config(self.earthmodel_1d)
        d['n_model_lines'] = nlines
        d['model_lines'] = model_str

        d['str_psgrn_outdir'] = "'%s'" % './'

        d['str_displ_filenames'] = str_str_vals(self.displ_filenames)
        d['str_stress_filenames'] = str_str_vals(self.stress_filenames)
        d['str_tilt_filenames'] = str_str_vals(self.tilt_filenames)
        d['str_gravity_filenames'] = str_str_vals(self.gravity_filenames)

        d['str_distance_grid'] = self.distance_grid.string_for_config()
        d['str_depth_grid'] = self.depth_grid.string_for_config()

        template = '''# autogenerated PSGRN input by psgrn.py
#=============================================================================
# This is input file of FORTRAN77 program "psgrn08a" for computing responses
# (Green's functions) of a multi-layered viscoelastic halfspace to point
# dislocation sources buried at different depths. All results will be stored in
# the given directory and provide the necessary data base for the program
# "pscmp07a" for computing time-dependent deformation, geoid and gravity changes
# induced by an earthquake with extended fault planes via linear superposition.
# For more details, please read the accompanying READ.ME file.
#
# written by Rongjiang Wang
# GeoForschungsZentrum Potsdam
# e-mail: [email protected]
# phone +49 331 2881209
# fax +49 331 2881204
#
# Last modified: Potsdam, Jan, 2008
#
#################################################################
##                                                             ##
## Cylindrical coordinates (Z positive downwards!) are used.   ##
##                                                             ##
## If not specified otherwise, SI Unit System is used overall! ##
##                                                             ##
#################################################################
#
#------------------------------------------------------------------------------
#
#	PARAMETERS FOR SOURCE-OBSERVATION CONFIGURATIONS
#	================================================
# 1. the uniform depth of the observation points [km], switch for oceanic (0)
#    or continental(1) earthquakes;
# 2. number of (horizontal) observation distances (> 1 and <= nrmax defined in
#    psgglob.h), start and end distances [km], ratio (>= 1.0) between max. and
#    min. sampling interval (1.0 for equidistant sampling);
# 3. number of equidistant source depths (>= 1 and <= nzsmax defined in
#    psgglob.h), start and end source depths [km];
#
#    r1,r2 = minimum and maximum horizontal source-observation
#            distances (r2 > r1).
#    zs1,zs2 = minimum and maximum source depths (zs2 >= zs1 > 0).
#
#    Note that the same sampling rates dr_min and dzs will be used later by the
#    program "pscmp07a" for discretizing the finite source planes to a 2D grid
#    of point sources.
#------------------------------------------------------------------------------
        %(observation_depth)e  %(sw_source_regime)i
 %(str_distance_grid)s  %(sampling_interval)e
 %(str_depth_grid)s
#------------------------------------------------------------------------------
#
#	PARAMETERS FOR TIME SAMPLING
#	============================
# 1. number of time samples (<= ntmax def. in psgglob.h) and time window [days].
#
#    Note that nt (> 0) should be power of 2 (the fft-rule). If nt = 1, the
#    coseismic (t = 0) changes will be computed; If nt = 2, the coseismic
#    (t = 0) and steady-state (t -> infinity) changes will be computed;
#    Otherwise, time series for the given time samples will be computed.
#
#------------------------------------------------------------------------------
 %(n_snapshots)i    %(max_time)f
#------------------------------------------------------------------------------
#
#	PARAMETERS FOR WAVENUMBER INTEGRATION
#	=====================================
# 1. relative accuracy of the wave-number integration (suggested: 0.1 - 0.01)
# 2. factor (> 0 and < 1) for including influence of earth's gravity on the
#    deformation field (e.g. 0/1 = without / with 100percent gravity effect).
#------------------------------------------------------------------------------
 %(accuracy_wavenumber_integration)e
 %(sw_gravity)i
#------------------------------------------------------------------------------
#
#	PARAMETERS FOR OUTPUT FILES
#	===========================
#
# 1. output directory
# 2. file names for 3 displacement components (uz, ur, ut)
# 3. file names for 6 stress components (szz, srr, stt, szr, srt, stz)
# 4. file names for radial and tangential tilt components (as measured by a
#    borehole tiltmeter), rigid rotation of horizontal plane, geoid and gravity
#    changes (tr, tt, rot, gd, gr)
#
#    Note that all file or directory names should not be longer than 80
#    characters. Directory and subdirectoy names must be separated and ended
#    by / (unix) or \ (dos)! All file names should be given without extensions
#    that will be appended automatically by ".ep" for the explosion (inflation)
#    source, ".ss" for the strike-slip source, ".ds" for the dip-slip source,
#    and ".cl" for the compensated linear vector dipole source)
#
#------------------------------------------------------------------------------
 %(str_psgrn_outdir)s
 %(str_displ_filenames)s
 %(str_stress_filenames)s
 %(str_tilt_filenames)s %(str_gravity_filenames)s
#------------------------------------------------------------------------------
#
#	GLOBAL MODEL PARAMETERS
#	=======================
# 1. number of data lines of the layered model (<= lmax as defined in psgglob.h)
#
#    The surface and the upper boundary of the half-space as well as the
#    interfaces at which the viscoelastic parameters are continuous, are all
#    defined by a single data line; All other interfaces, at which the
#    viscoelastic parameters are discontinuous, are all defined by two data
#    lines (upper-side and lower-side values). This input format could also be
#    used for a graphic plot of the layered model. Layers which have different
#    parameter values at top and bottom, will be treated as layers with a
#    constant gradient, and will be discretised to a number of homogeneous
#    sublayers. Errors due to the discretisation are limited within about
#    5percent (changeable, see psgglob.h).
#
# 2....	parameters of the multilayered model
#
#    Burgers rheology (a Kelvin-Voigt body and a Maxwell body in series
#    connection) for relaxation of shear modulus is implemented. No relaxation
#    of compressional modulus is considered.
#
#    eta1  = transient viscosity (dashpot of the Kelvin-Voigt body; <= 0 means
#            infinity value)
#    eta2  = steady-state viscosity (dashpot of the Maxwell body; <= 0 means
#            infinity value)
#    alpha = ratio between the effective and the unrelaxed shear modulus
#            = mu1/(mu1+mu2) (> 0 and <= 1)
#
#    Special cases:
#        (1) Elastic: eta1 and eta2 <= 0 (i.e. infinity); alpha meaningless
#        (2) Maxwell body: eta1 <= 0 (i.e. eta1 = infinity)
#                          or alpha = 1 (i.e. mu1 = infinity)
#        (3) Standard-Linear-Solid: eta2 <= 0 (i.e. infinity)
#------------------------------------------------------------------------------
 %(n_model_lines)i                               |int: no_model_lines;
#------------------------------------------------------------------------------
# no  depth[km]  vp[km/s]  vs[km/s]  rho[kg/m^3] eta1[Pa*s] eta2[Pa*s] alpha
#------------------------------------------------------------------------------
%(model_lines)s
#=======================end of input===========================================
'''  # noqa
        return template % d
Пример #28
0
class Azimuth(FloatWithUnit):
    '''Instrument azimuth, degrees clockwise from North.'''

    unit = String.T(default='DEGREES', optional=True, xmlstyle='attribute')
Пример #29
0
class Problem(Object):
    '''
    Base class for objective function setup.

    Defines the *problem* to be solved by the optimiser.
    '''
    name = String.T()
    ranges = Dict.T(String.T(), gf.Range.T())
    dependants = List.T(Parameter.T())
    norm_exponent = Int.T(default=2)
    base_source = gf.Source.T(optional=True)
    targets = List.T(MisfitTarget.T())
    target_groups = List.T(TargetGroup.T())
    grond_version = String.T(optional=True)
    nthreads = Int.T(default=1)

    def __init__(self, **kwargs):
        Object.__init__(self, **kwargs)

        if self.grond_version is None:
            self.grond_version = __version__

        self._target_weights = None
        self._engine = None
        self._family_mask = None

        if hasattr(self, 'problem_waveform_parameters') and self.has_waveforms:
            self.problem_parameters =\
                self.problem_parameters + self.problem_waveform_parameters

        self.check()

    @classmethod
    def get_plot_classes(cls):
        from . import plot
        return plot.get_plot_classes()

    def check(self):
        paths = set()
        for grp in self.target_groups:
            if grp.path == 'all':
                continue
            if grp.path in paths:
                raise ValueError('Path %s defined more than once! In %s' %
                                 (grp.path, grp.__class__.__name__))
            paths.add(grp.path)
        logger.debug('TargetGroup check OK.')

    def copy(self):
        o = copy.copy(self)
        o._target_weights = None
        return o

    def set_target_parameter_values(self, x):
        nprob = len(self.problem_parameters)
        for target in self.targets:
            target.set_parameter_values(x[nprob:nprob + target.nparameters])
            nprob += target.nparameters

    def get_parameter_dict(self, model, group=None):
        params = []
        for ip, p in enumerate(self.parameters):
            if group in p.groups or group is None:
                params.append((p.name, model[ip]))
        return ADict(params)

    def get_parameter_array(self, d):
        arr = num.zeros(self.nparameters, dtype=num.float)
        for ip, p in enumerate(self.parameters):
            if p.name in d.keys():
                arr[ip] = d[p.name]
        return arr

    def dump_problem_info(self, dirname):
        fn = op.join(dirname, 'problem.yaml')
        util.ensuredirs(fn)
        guts.dump(self, filename=fn)

    def dump_problem_data(self,
                          dirname,
                          x,
                          misfits,
                          bootstraps=None,
                          sampler_context=None):

        fn = op.join(dirname, 'models')
        if not isinstance(x, num.ndarray):
            x = num.array(x)
        with open(fn, 'ab') as f:
            x.astype('<f8').tofile(f)

        fn = op.join(dirname, 'misfits')
        with open(fn, 'ab') as f:
            misfits.astype('<f8').tofile(f)

        if bootstraps is not None:
            fn = op.join(dirname, 'bootstraps')
            with open(fn, 'ab') as f:
                bootstraps.astype('<f8').tofile(f)

        if sampler_context is not None:
            fn = op.join(dirname, 'choices')
            with open(fn, 'ab') as f:
                num.array(sampler_context, dtype='<i8').tofile(f)

    def name_to_index(self, name):
        pnames = [p.name for p in self.combined]
        return pnames.index(name)

    @property
    def parameters(self):
        target_parameters = []
        for target in self.targets:
            target_parameters.extend(target.target_parameters)
        return self.problem_parameters + target_parameters

    @property
    def parameter_names(self):
        return [p.name for p in self.combined]

    @property
    def dependant_names(self):
        return [p.name for p in self.dependants]

    @property
    def nparameters(self):
        return len(self.parameters)

    @property
    def ntargets(self):
        return len(self.targets)

    @property
    def nwaveform_targets(self):
        return len(self.waveform_targets)

    @property
    def nsatellite_targets(self):
        return len(self.satellite_targets)

    @property
    def ngnss_targets(self):
        return len(self.gnss_targets)

    @property
    def nmisfits(self):
        nmisfits = 0
        for target in self.targets:
            nmisfits += target.nmisfits
        return nmisfits

    @property
    def ndependants(self):
        return len(self.dependants)

    @property
    def ncombined(self):
        return len(self.parameters) + len(self.dependants)

    @property
    def combined(self):
        return self.parameters + self.dependants

    @property
    def satellite_targets(self):
        return [
            t for t in self.targets if isinstance(t, SatelliteMisfitTarget)
        ]

    @property
    def gnss_targets(self):
        return [
            t for t in self.targets if isinstance(t, GNSSCampaignMisfitTarget)
        ]

    @property
    def waveform_targets(self):
        return [t for t in self.targets if isinstance(t, WaveformMisfitTarget)]

    @property
    def has_satellite(self):
        if self.satellite_targets:
            return True
        return False

    @property
    def has_waveforms(self):
        if self.waveform_targets:
            return True
        return False

    def set_engine(self, engine):
        self._engine = engine

    def get_engine(self):
        return self._engine

    def get_gf_store(self, target):
        if self.get_engine() is None:
            raise GrondError('Cannot get GF Store, modelling is not set up!')
        return self.get_engine().get_store(target.store_id)

    def random_uniform(self, xbounds, rstate):
        x = rstate.uniform(0., 1., self.nparameters)
        x *= (xbounds[:, 1] - xbounds[:, 0])
        x += xbounds[:, 0]
        return x

    def preconstrain(self, x):
        return x

    def extract(self, xs, i):
        if xs.ndim == 1:
            return self.extract(xs[num.newaxis, :], i)[0]

        if i < self.nparameters:
            return xs[:, i]
        else:
            return self.make_dependant(
                xs, self.dependants[i - self.nparameters].name)

    def get_target_weights(self):
        if self._target_weights is None:
            self._target_weights = num.concatenate(
                [target.get_combined_weight() for target in self.targets])

        return self._target_weights

    def get_target_residuals(self):
        pass

    def inter_family_weights(self, ns):
        exp, root = self.get_norm_functions()

        family, nfamilies = self.get_family_mask()

        ws = num.zeros(self.nmisfits)
        for ifamily in range(nfamilies):
            mask = family == ifamily
            ws[mask] = 1.0 / root(num.nansum(exp(ns[mask])))

        return ws

    def inter_family_weights2(self, ns):
        '''
        :param ns: 2D array with normalization factors ``ns[imodel, itarget]``
        :returns: 2D array ``weights[imodel, itarget]``
        '''

        exp, root = self.get_norm_functions()

        family, nfamilies = self.get_family_mask()
        ws = num.zeros(ns.shape)
        for ifamily in range(nfamilies):
            mask = family == ifamily
            ws[:, mask] = (
                1.0 / root(num.nansum(exp(ns[:, mask]), axis=1)))[:,
                                                                  num.newaxis]
        return ws

    def get_reference_model(self, expand=False):
        if expand:
            src_params = self.pack(self.base_source)
            ref = num.zeros(self.nparameters)
            ref[:src_params.size] = src_params
        else:
            ref = self.pack(self.base_source)
        return ref

    def get_parameter_bounds(self):
        out = []
        for p in self.problem_parameters:
            r = self.ranges[p.name]
            out.append((r.start, r.stop))

        for target in self.targets:
            for p in target.target_parameters:
                r = target.target_ranges[p.name_nogroups]
                out.append((r.start, r.stop))

        return num.array(out, dtype=num.float)

    def get_dependant_bounds(self):
        return num.zeros((0, 2))

    def get_combined_bounds(self):
        return num.vstack(
            (self.get_parameter_bounds(), self.get_dependant_bounds()))

    def raise_invalid_norm_exponent(self):
        raise GrondError('Invalid norm exponent: %f' % self.norm_exponent)

    def get_norm_functions(self):
        if self.norm_exponent == 2:

            def sqr(x):
                return x**2

            return sqr, num.sqrt

        elif self.norm_exponent == 1:

            def noop(x):
                return x

            return noop, num.abs

        else:
            self.raise_invalid_norm_exponent()

    def combine_misfits(self,
                        misfits,
                        extra_weights=None,
                        extra_residuals=None,
                        get_contributions=False):
        '''
        Combine misfit contributions (residuals) to global or bootstrap misfits

        :param misfits: 3D array ``misfits[imodel, iresidual, 0]`` are the
            misfit contributions (residuals) ``misfits[imodel, iresidual, 1]``
            are the normalisation contributions. It is also possible to give
            the misfit and normalisation contributions for a single model as
            ``misfits[iresidual, 0]`` and misfits[iresidual, 1]`` in which
            case, the first dimension (imodel) of the result will be stipped
            off.

        :param extra_weights: if given, 2D array of extra weights to be applied
            to the contributions, indexed as
            ``extra_weights[ibootstrap, iresidual]``.

        :param extra_residuals: if given, 2D array of perturbations to be added
            to the residuals, indexed as
            ``extra_residuals[ibootstrap, iresidual]``.

        :param get_contributions: get the weighted and perturbed contributions
            (don't do the sum).

        :returns: if no *extra_weights* or *extra_residuals* are given, a 1D
            array indexed as ``misfits[imodel]`` containing the global misfit
            for each model is returned, otherwise a 2D array
            ``misfits[imodel, ibootstrap]`` with the misfit for every model and
            weighting/residual set is returned.
        '''

        exp, root = self.get_norm_functions()

        if misfits.ndim == 2:
            misfits = misfits[num.newaxis, :, :]
            return self.combine_misfits(misfits, extra_weights,
                                        extra_residuals,
                                        get_contributions)[0, ...]

        assert misfits.ndim == 3
        assert extra_weights is None or extra_weights.ndim == 2
        assert extra_residuals is None or extra_residuals.ndim == 2

        if extra_weights is not None or extra_residuals is not None:
            if extra_weights is not None:
                w = extra_weights[num.newaxis, :, :] \
                    * self.get_target_weights()[num.newaxis, num.newaxis, :] \
                    * self.inter_family_weights2(
                        misfits[:, :, 1])[:, num.newaxis, :]
            else:
                w = 1.0

            if extra_residuals is not None:
                r = extra_residuals[num.newaxis, :, :]
            else:
                r = 0.0

            if get_contributions:
                return exp(w*(misfits[:, num.newaxis, :, 0]+r)) \
                    / num.nansum(
                        exp(w*misfits[:, num.newaxis, :, 1]),
                        axis=2)[:, :, num.newaxis]

            res = root(
                num.nansum(exp(w * (misfits[:, num.newaxis, :, 0] + r)),
                           axis=2) /
                num.nansum(exp(w * (misfits[:, num.newaxis, :, 1])), axis=2))
            assert res[res < 0].size == 0
            return res
        else:
            w = self.get_target_weights()[num.newaxis, :] \
                * self.inter_family_weights2(misfits[:, :, 1])

            r = self.get_target_weights()[num.newaxis, :] \
                * self.inter_family_weights2(misfits[:, :, 1])

            if get_contributions:
                return exp(w*misfits[:, :, 0]) \
                    / num.nansum(
                        exp(w*misfits[:, :, 1]),
                        axis=1)[:, num.newaxis]

            return root(
                num.nansum(exp(w * misfits[:, :, 0]), axis=1) /
                num.nansum(exp(w * misfits[:, :, 1]), axis=1))

    def make_family_mask(self):
        family_names = set()
        families = num.zeros(self.nmisfits, dtype=num.int)

        idx = 0
        for itarget, target in enumerate(self.targets):
            family_names.add(target.normalisation_family)
            families[idx:idx + target.nmisfits] = len(family_names) - 1
            idx += target.nmisfits

        return families, len(family_names)

    def get_family_mask(self):
        if self._family_mask is None:
            self._family_mask = self.make_family_mask()

        return self._family_mask

    def evaluate(self, x, mask=None, result_mode='full', targets=None):
        source = self.get_source(x)
        engine = self.get_engine()

        self.set_target_parameter_values(x)

        if mask is not None and targets is not None:
            raise ValueError('Mask cannot be defined with targets set.')
        targets = targets if targets is not None else self.targets

        for target in targets:
            target.set_result_mode(result_mode)

        modelling_targets = []
        t2m_map = {}
        for itarget, target in enumerate(targets):
            t2m_map[target] = target.prepare_modelling(engine, source, targets)
            if mask is None or mask[itarget]:
                modelling_targets.extend(t2m_map[target])

        u2m_map = {}
        for imtarget, mtarget in enumerate(modelling_targets):
            if mtarget not in u2m_map:
                u2m_map[mtarget] = []

            u2m_map[mtarget].append(imtarget)

        modelling_targets_unique = list(u2m_map.keys())

        resp = engine.process(source,
                              modelling_targets_unique,
                              nthreads=self.nthreads)
        modelling_results_unique = list(resp.results_list[0])

        modelling_results = [None] * len(modelling_targets)

        for mtarget, mresult in zip(modelling_targets_unique,
                                    modelling_results_unique):

            for itarget in u2m_map[mtarget]:
                modelling_results[itarget] = mresult

        imt = 0
        results = []
        for itarget, target in enumerate(targets):
            nmt_this = len(t2m_map[target])
            if mask is None or mask[itarget]:
                result = target.finalize_modelling(
                    engine, source, t2m_map[target],
                    modelling_results[imt:imt + nmt_this])

                imt += nmt_this
            else:
                result = gf.SeismosizerError(
                    'target was excluded from modelling')

            results.append(result)

        return results

    def misfits(self, x, mask=None):
        results = self.evaluate(x, mask=mask, result_mode='sparse')
        misfits = num.full((self.nmisfits, 2), num.nan)

        imisfit = 0
        for target, result in zip(self.targets, results):
            if isinstance(result, MisfitResult):
                misfits[imisfit:imisfit + target.nmisfits, :] = result.misfits

            imisfit += target.nmisfits

        return misfits

    def forward(self, x):
        source = self.get_source(x)
        engine = self.get_engine()

        plain_targets = []
        for target in self.targets:
            plain_targets.extend(target.get_plain_targets(engine, source))

        resp = engine.process(source, plain_targets)

        results = []
        for target, result in zip(plain_targets, resp.results_list[0]):
            if isinstance(result, gf.SeismosizerError):
                logger.debug('%s.%s.%s.%s: %s' % (target.codes +
                                                  (str(result), )))
            else:
                results.append(result)

        return results

    def get_random_model(self, ntries_limit=100):
        xbounds = self.get_parameter_bounds()

        for _ in range(ntries_limit):
            x = self.random_uniform(xbounds, rstate=g_rstate)
            try:
                return self.preconstrain(x)

            except Forbidden:
                pass

        raise GrondError(
            'Could not find any suitable candidate sample within %i tries' %
            (ntries_limit))
Пример #30
0
    def testOptionalDefault(self):

        from pyrocko.guts_array import Array, array_equal
        import numpy as num
        assert_ae = num.testing.assert_almost_equal

        def array_equal_noneaware(a, b):
            if a is None:
                return b is None
            elif b is None:
                return a is None
            else:
                return array_equal(a, b)

        data = [
            ('a', Int.T(),
                [None, 0, 1, 2],
                ['aerr', 0, 1, 2]),
            ('b', Int.T(optional=True),
                [None, 0, 1, 2],
                [None, 0, 1, 2]),
            ('c', Int.T(default=1),
                [None, 0, 1, 2],
                [1, 0, 1, 2]),
            ('d', Int.T(default=1, optional=True),
                [None, 0, 1, 2],
                [1, 0, 1, 2]),
            ('e', List.T(Int.T()),
                [None, [], [1], [2]],
                [[], [], [1], [2]]),
            ('f', List.T(Int.T(), optional=True),
                [None, [], [1], [2]],
                [None, [], [1], [2]]),
            ('g', List.T(Int.T(), default=[1]), [
                None, [], [1], [2]],
                [[1], [], [1], [2]]),
            ('h', List.T(Int.T(), default=[1], optional=True),
                [None, [], [1], [2]],
                [[1], [], [1], [2]]),
            ('i', Tuple.T(2, Int.T()),
                [None, (1, 2)],
                ['err', (1, 2)]),
            ('j', Tuple.T(2, Int.T(), optional=True),
                [None, (1, 2)],
                [None, (1, 2)]),
            ('k', Tuple.T(2, Int.T(), default=(1, 2)),
                [None, (1, 2), (3, 4)],
                [(1, 2), (1, 2), (3, 4)]),
            ('l', Tuple.T(2, Int.T(), default=(1, 2), optional=True),
                [None, (1, 2), (3, 4)],
                [(1, 2), (1, 2), (3, 4)]),
            ('i2', Tuple.T(None, Int.T()),
                [None, (1, 2)],
                [(), (1, 2)]),
            ('j2', Tuple.T(None, Int.T(), optional=True),
                [None, (), (3, 4)],
                [None, (), (3, 4)]),
            ('k2', Tuple.T(None, Int.T(), default=(1,)),
                [None, (), (3, 4)],
                [(1,), (), (3, 4)]),
            ('l2', Tuple.T(None, Int.T(), default=(1,), optional=True),
                [None, (), (3, 4)],
                [(1,), (), (3, 4)]),
            ('m', Array.T(shape=(None,), dtype=num.int, serialize_as='list'),
                [num.arange(0), num.arange(2)],
                [num.arange(0), num.arange(2)]),
            ('n', Array.T(shape=(None,), dtype=num.int, serialize_as='list',
                          optional=True),
                [None, num.arange(0), num.arange(2)],
                [None, num.arange(0), num.arange(2)]),
            ('o', Array.T(shape=(None,), dtype=num.int, serialize_as='list',
                          default=num.arange(2)),
                [None, num.arange(0), num.arange(2), num.arange(3)],
                [num.arange(2), num.arange(0), num.arange(2), num.arange(3)]),
            ('p', Array.T(shape=(None,), dtype=num.int, serialize_as='list',
                          default=num.arange(2), optional=True),
                [None, num.arange(0), num.arange(2), num.arange(3)],
                [num.arange(2), num.arange(0), num.arange(2), num.arange(3)]),
            ('q', Dict.T(String.T(), Int.T()),
                [None, {}, {'a': 1}],
                [{}, {}, {'a': 1}]),
            ('r', Dict.T(String.T(), Int.T(), optional=True),
                [None, {}, {'a': 1}],
                [None, {}, {'a': 1}]),
            ('s', Dict.T(String.T(), Int.T(), default={'a': 1}),
                [None, {}, {'a': 1}],
                [{'a': 1}, {}, {'a': 1}]),
            ('t', Dict.T(String.T(), Int.T(), default={'a': 1}, optional=True),
                [None, {}, {'a': 1}],
                [{'a': 1}, {}, {'a': 1}]),
        ]

        for k, t, vals, exp, in data:
            last = [None]

            class A(Object):
                def __init__(self, **kwargs):
                    last[0] = len(kwargs)
                    Object.__init__(self, **kwargs)

                v = t

            A.T.class_signature()

            for v, e in zip(vals, exp):
                if isinstance(e, str) and e == 'aerr':
                    with self.assertRaises(ArgumentError):
                        if v is not None:
                            a1 = A(v=v)
                        else:
                            a1 = A()

                    continue
                else:
                    if v is not None:
                        a1 = A(v=v)
                    else:
                        a1 = A()

                if isinstance(e, str) and e == 'err':
                    with self.assertRaises(ValidationError):
                        a1.validate()
                else:
                    a1.validate()
                    a2 = load_string(dump(a1))
                    if isinstance(e, num.ndarray):
                        assert last[0] == int(
                            not (array_equal_noneaware(t.default(), a1.v)
                                 and t.optional))
                        assert_ae(a1.v, e)
                        assert_ae(a1.v, e)
                    else:
                        assert last[0] == int(
                            not(t.default() == a1.v and t.optional))
                        self.assertEqual(a1.v, e)
                        self.assertEqual(a2.v, e)