class EnhancedSacPzResponse(Object): codes = Tuple.T(4, String.T()) tmin = Timestamp.T(optional=True) tmax = Timestamp.T(optional=True) lat = Float.T() lon = Float.T() elevation = Float.T() depth = Float.T() dip = Float.T() azimuth = Float.T() input_unit = String.T() output_unit = String.T() response = trace.PoleZeroResponse.T() def spans(self, *args): if len(args) == 0: return True elif len(args) == 1: return ((self.tmin is None or self.tmin <= args[0]) and (self.tmax is None or args[0] <= self.tmax)) elif len(args) == 2: return ((self.tmin is None or args[1] >= self.tmin) and (self.tmax is None or self.tmax >= args[0]))
class B(A): '''B description''' b = Float.T(default=0, help='the b property') c = List.T(Int.T(), help='the c')
class MTQTSource(gf.SourceWithMagnitude): """ A moment tensor point source. Notes ----- Following Q-T parameterization after Tape & Tape 2015 """ discretized_source_class = meta.DiscretizedMTSource u = Float.T(default=0., help='Lune co-latitude transformed to grid.' 'Defined: 0 <= u <=3/4pi') v = Float.T(default=0., help='Lune co-longitude transformed to grid.' 'Definded: -1/3 <= v <= 1/3') kappa = Float.T(default=0., help='Strike angle equivalent of moment tensor plane.' 'Defined: 0 <= kappa <= 2pi') sigma = Float.T(default=0., help='Rake angle equivalent of moment tensor slip angle.' 'Defined: -pi/2 <= sigma <= pi/2') h = Float.T(default=0., help='Dip angle equivalent of moment tensor plane.' 'Defined: 0 <= h <= 1') def __init__(self, **kwargs): n = 1000 self._beta_mapping = num.linspace(0, pi, n) self._u_mapping = \ (3. / 4. * self._beta_mapping) - \ (1. / 2. * num.sin(2. * self._beta_mapping)) + \ (1. / 16. * num.sin(4. * self._beta_mapping)) self.lambda_factor_matrix = num.array( [[sqrt3, -1., sqrt2], [0., 2., sqrt2], [-sqrt3, -1., sqrt2]], dtype='float64') self.R = get_rotation_matrix() self.roty_pi4 = self.R['y'](-pi4) self.rotx_pi = self.R['x'](pi) self._lune_lambda_matrix = num.zeros((3, 3), dtype='float64') Source.__init__(self, **kwargs) @property def gamma(self): """ Lunar co-longitude, dependend on v """ return (1. / 3.) * num.arcsin(3. * self.v) @property def beta(self): """ Lunar co-latitude, dependend on u """ return num.interp(self.u, self._u_mapping, self._beta_mapping) def delta(self): """ From Tape & Tape 2012, delta measures departure of MT being DC Delta = Gamma = 0 yields pure DC """ return (pi / 2.) - self.beta @property def theta(self): return num.arccos(self.h) @property def rot_theta(self): return self.R['x'](self.theta) @property def rot_kappa(self): return self.R['z'](-self.kappa) @property def rot_sigma(self): return self.R['z'](self.sigma) @property def lune_lambda(self): sin_beta = num.sin(self.beta) cos_beta = num.cos(self.beta) sin_gamma = num.sin(self.gamma) cos_gamma = num.cos(self.gamma) vec = num.array([sin_beta * cos_gamma, sin_beta * sin_gamma, cos_beta]) return 1. / sqrt6 * self.lambda_factor_matrix.dot(vec) @property def lune_lambda_matrix(self): num.fill_diagonal(self._lune_lambda_matrix, self.lune_lambda) return self._lune_lambda_matrix @property def rot_V(self): return self.rot_kappa.dot(self.rot_theta).dot(self.rot_sigma) @property def rot_U(self): return self.rot_V.dot(self.roty_pi4) @property def m9_nwu(self): """ MT orientation is in NWU """ return self.rot_U.dot(self.lune_lambda_matrix).dot( num.linalg.inv(self.rot_U)) @property def m9(self): """ Pyrocko MT in NED """ return self.rotx_pi.dot(self.m9_nwu).dot(self.rotx_pi.T) @property def m6(self): return mtm.to6(self.m9) @property def m6_astuple(self): return tuple(self.m6.ravel().tolist()) def base_key(self): return Source.base_key(self) + self.m6_astuple def discretize_basesource(self, store, target=None): times, amplitudes = self.effective_stf_pre().discretize_t( store.config.deltat, self.time) m0 = mtm.magnitude_to_moment(self.magnitude) m6s = self.m6 * m0 return meta.DiscretizedMTSource(m6s=m6s[num.newaxis, :] * amplitudes[:, num.newaxis], **self._dparams_base_repeated(times)) def pyrocko_moment_tensor(self): return mtm.MomentTensor(m=mtm.symmat6(*self.m6_astuple) * self.moment) def pyrocko_event(self, **kwargs): mt = self.pyrocko_moment_tensor() return Source.pyrocko_event(self, moment_tensor=self.pyrocko_moment_tensor(), magnitude=float(mt.moment_magnitude()), **kwargs) @classmethod def from_pyrocko_event(cls, ev, **kwargs): d = {} mt = ev.moment_tensor if mt: d.update(m6=list(map(float, mt.m6()))) d.update(kwargs) return super(MTQTSource, cls).from_pyrocko_event(ev, **d) def __getstate__(self): state = self.__dict__.copy() state['R'] = None return state def __setstate__(self, state): self.__dict__.update(state) self.R = get_rotation_matrix()
class FDSNStationXML(Object): '''Top-level type for Station XML. Required field are Source (network ID of the institution sending the message) and one or more Network containers or one or more Station containers.''' schema_version = Float.T(default=1.0, xmlstyle='attribute') source = String.T(xmltagname='Source') sender = String.T(optional=True, xmltagname='Sender') module = String.T(optional=True, xmltagname='Module') module_uri = String.T(optional=True, xmltagname='ModuleURI') created = Timestamp.T(xmltagname='Created') network_list = List.T(Network.T(xmltagname='Network')) xmltagname = 'FDSNStationXML' def get_pyrocko_stations(self, nslcs=None, nsls=None, time=None, timespan=None, inconsistencies='warn'): assert inconsistencies in ('raise', 'warn') if nslcs is not None: nslcs = set(nslcs) if nsls is not None: nsls = set(nsls) tt = () if time is not None: tt = (time, ) elif timespan is not None: tt = timespan pstations = [] for network in self.network_list: if not network.spans(*tt): continue for station in network.station_list: if not station.spans(*tt): continue if station.channel_list: loc_to_channels = {} for channel in station.channel_list: if not channel.spans(*tt): continue loc = channel.location_code.strip() if loc not in loc_to_channels: loc_to_channels[loc] = [] loc_to_channels[loc].append(channel) for loc in sorted(loc_to_channels.keys()): channels = loc_to_channels[loc] if nslcs is not None: channels = [ channel for channel in channels if (network.code, station.code, loc, channel.code) in nslcs ] if not channels: continue nsl = network.code, station.code, loc if nsls is not None and nsl not in nsls: continue pstations.append( pyrocko_station_from_channels( nsl, channels, inconsistencies=inconsistencies)) else: pstations.append( model.Station(network.code, station.code, '*', lat=station.latitude.value, lon=station.longitude.value, elevation=value_or_none( station.elevation), name=station.description or '')) return pstations @classmethod def from_pyrocko_stations(cls, pyrocko_stations): ''' Generate :py:class:`FDSNStationXML` from list of :py:class;`pyrocko.model.Station` instances. :param pyrocko_stations: list of :py:class;`pyrocko.model.Station` instances. ''' from collections import defaultdict network_dict = defaultdict(list) for s in pyrocko_stations: network, station, location = s.nsl() channel_list = [] for c in s.channels: channel_list.append( Channel(location_code=location, code=c.name, latitude=Latitude(value=s.lat), longitude=Longitude(value=s.lon), elevation=Distance(value=s.elevation), depth=Distance(value=s.depth), azimuth=Azimuth(value=c.azimuth), dip=Dip(value=c.dip))) network_dict[network].append( Station(code=station, latitude=Latitude(value=s.lat), longitude=Longitude(value=s.lon), elevation=Distance(value=s.elevation), channel_list=channel_list)) timestamp = time.time() network_list = [] for k, station_list in network_dict.items(): network_list.append( Network(code=k, station_list=station_list, total_number_stations=len(station_list))) sxml = FDSNStationXML(source='from pyrocko stations list', created=timestamp, network_list=network_list) sxml.validate() return sxml def iter_network_stations(self, net=None, sta=None, time=None, timespan=None): tt = () if time is not None: tt = (time, ) elif timespan is not None: tt = timespan for network in self.network_list: if not network.spans(*tt) or (net is not None and network.code != net): continue for station in network.station_list: if not station.spans(*tt) or (sta is not None and station.code != sta): continue yield (network, station) def iter_network_station_channels(self, net=None, sta=None, loc=None, cha=None, time=None, timespan=None): if loc is not None: loc = loc.strip() tt = () if time is not None: tt = (time, ) elif timespan is not None: tt = timespan for network in self.network_list: if not network.spans(*tt) or (net is not None and network.code != net): continue for station in network.station_list: if not station.spans(*tt) or (sta is not None and station.code != sta): continue if station.channel_list: for channel in station.channel_list: if (not channel.spans(*tt) or (cha is not None and channel.code != cha) or (loc is not None and channel.location_code.strip() != loc)): continue yield (network, station, channel) def get_channel_groups(self, net=None, sta=None, loc=None, cha=None, time=None, timespan=None): groups = {} for network, station, channel in self.iter_network_station_channels( net, sta, loc, cha, time=time, timespan=timespan): net = network.code sta = station.code cha = channel.code loc = channel.location_code.strip() if len(cha) == 3: bic = cha[:2] # band and intrument code according to SEED elif len(cha) == 1: bic = '' else: bic = cha if channel.response and \ channel.response.instrument_sensitivity and \ channel.response.instrument_sensitivity.input_units: unit = channel.response.instrument_sensitivity.input_units.name else: unit = None bic = (bic, unit) k = net, sta, loc if k not in groups: groups[k] = {} if bic not in groups[k]: groups[k][bic] = [] groups[k][bic].append(channel) for nsl, bic_to_channels in groups.iteritems(): bad_bics = [] for bic, channels in bic_to_channels.iteritems(): sample_rates = [] for channel in channels: sample_rates.append(channel.sample_rate.value) if not same(sample_rates): scs = ','.join(channel.code for channel in channels) srs = ', '.join('%e' % x for x in sample_rates) err = 'ignoring channels with inconsistent sampling ' + \ 'rates (%s.%s.%s.%s: %s)' % (nsl + (scs, srs)) logger.warn(err) bad_bics.append(bic) for bic in bad_bics: del bic_to_channels[bic] return groups def choose_channels(self, target_sample_rate=None, priority_band_code=['H', 'B', 'M', 'L', 'V', 'E', 'S'], priority_units=['M/S', 'M/S**2'], priority_instrument_code=['H', 'L'], time=None, timespan=None): nslcs = {} for nsl, bic_to_channels in self.get_channel_groups( time=time, timespan=timespan).iteritems(): useful_bics = [] for bic, channels in bic_to_channels.iteritems(): rate = channels[0].sample_rate.value if target_sample_rate is not None and \ rate < target_sample_rate*0.99999: continue if len(bic[0]) == 2: if bic[0][0] not in priority_band_code: continue if bic[0][1] not in priority_instrument_code: continue unit = bic[1] prio_unit = len(priority_units) try: prio_unit = priority_units.index(unit) except ValueError: pass prio_inst = len(priority_instrument_code) prio_band = len(priority_band_code) if len(channels[0].code) == 3: try: prio_inst = priority_instrument_code.index( channels[0].code[1]) except ValueError: pass try: prio_band = priority_band_code.index( channels[0].code[0]) except ValueError: pass if target_sample_rate is None: rate = -rate useful_bics.append((-len(channels), prio_band, rate, prio_unit, prio_inst, bic)) useful_bics.sort() for _, _, rate, _, _, bic in useful_bics: channels = sorted(bic_to_channels[bic]) if channels: for channel in channels: nslcs[nsl + (channel.code, )] = channel break return nslcs def get_pyrocko_response(self, nslc, time=None, timespan=None, fake_input_units=None): net, sta, loc, cha = nslc resps = [] for _, _, channel in self.iter_network_station_channels( net, sta, loc, cha, time=time, timespan=timespan): resp = channel.response if resp: resps.append( resp.get_pyrocko_response( nslc, fake_input_units=fake_input_units)) if not resps: raise NoResponseInformation('%s.%s.%s.%s' % nslc) elif len(resps) > 1: raise MultipleResponseInformation('%s.%s.%s.%s' % nslc) return resps[0] @property def n_code_list(self): return sorted(set(x.code for x in self.network_list)) @property def ns_code_list(self): nss = set() for network in self.network_list: for station in network.station_list: nss.add((network.code, station.code)) return sorted(nss) @property def nsl_code_list(self): nsls = set() for network in self.network_list: for station in network.station_list: for channel in station.channel_list: nsls.add( (network.code, station.code, channel.location_code)) return sorted(nsls) @property def nslc_code_list(self): nslcs = set() for network in self.network_list: for station in network.station_list: for channel in station.channel_list: nslcs.add((network.code, station.code, channel.location_code, channel.code)) return sorted(nslcs) def summary(self): l = [ 'number of n codes: %i' % len(self.n_code_list), 'number of ns codes: %i' % len(self.ns_code_list), 'number of nsl codes: %i' % len(self.nsl_code_list), 'number of nslc codes: %i' % len(self.nslc_code_list) ] return '\n'.join(l)
class Map(Object): lat = Float.T(optional=True) lon = Float.T(optional=True) radius = Float.T(optional=True) width = Float.T(default=20.) height = Float.T(default=14.) margins = List.T(Float.T()) illuminate = Bool.T(default=True) skip_feature_factor = Float.T(default=0.02) show_grid = Bool.T(default=False) show_topo = Bool.T(default=True) show_topo_scale = Bool.T(default=False) show_center_mark = Bool.T(default=False) show_rivers = Bool.T(default=True) show_plates = Bool.T(default=False) illuminate_factor_land = Float.T(default=0.5) illuminate_factor_ocean = Float.T(default=0.25) color_wet = Tuple.T(3, Int.T(), default=(216, 242, 254)) color_dry = Tuple.T(3, Int.T(), default=(172, 208, 165)) topo_resolution_min = Float.T( default=40., help='minimum resolution of topography [dpi]') topo_resolution_max = Float.T( default=200., help='maximum resolution of topography [dpi]') replace_topo_color_only = FloatTile.T( optional=True, help='replace topo color while keeping topographic shading') topo_cpt_wet = String.T(default='light_sea') topo_cpt_dry = String.T(default='light_land') axes_layout = String.T(optional=True) custom_cities = List.T(City.T()) gmt_config = Dict.T(String.T(), String.T()) comment = String.T(optional=True) def __init__(self, gmtversion='newest', **kwargs): Object.__init__(self, **kwargs) self._gmt = None self._scaler = None self._widget = None self._corners = None self._wesn = None self._minarea = None self._coastline_resolution = None self._rivers = None self._dems = None self._have_topo_land = None self._have_topo_ocean = None self._jxyr = None self._prep_topo_have = None self._labels = [] self._area_labels = [] self._gmtversion = gmtversion def save(self, outpath, resolution=75., oversample=2., size=None, width=None, height=None): ''' Save the image. Save the image to ``outpath``. The format is determined by the filename extension. Formats are handled as follows: ``'.eps'`` and ``'.ps'`` produce EPS and PS, respectively, directly with GMT. If the file name ends with ``'.pdf'``, GMT output is fed through ``gmtpy-epstopdf`` to create a PDF file. For any other filename extension, output is first converted to PDF with ``gmtpy-epstopdf``, then with ``pdftocairo`` to PNG with a resolution oversampled by the factor ``oversample`` and finally the PNG is downsampled and converted to the target format with ``convert``. The resolution of rasterized target image can be controlled either by ``resolution`` in DPI or by specifying ``width`` or ``height`` or ``size``, where the latter fits the image into a square with given side length. ''' gmt = self.gmt self.draw_labels() self.draw_axes() if self.show_topo and self.show_topo_scale: self._draw_topo_scale() gmt.save(outpath, resolution=resolution, oversample=oversample, size=size, width=width, height=height) @property def scaler(self): if self._scaler is None: self._setup_geometry() return self._scaler @property def wesn(self): if self._wesn is None: self._setup_geometry() return self._wesn @property def widget(self): if self._widget is None: self._setup() return self._widget @property def layout(self): if self._layout is None: self._setup() return self._layout @property def jxyr(self): if self._jxyr is None: self._setup() return self._jxyr @property def pxyr(self): if self._pxyr is None: self._setup() return self._pxyr @property def gmt(self): if self._gmt is None: self._setup() if self._have_topo_ocean is None: self._draw_background() return self._gmt def _setup(self): if not self._widget: self._setup_geometry() self._setup_lod() self._setup_gmt() def _setup_geometry(self): wpage, hpage = self.width, self.height ml, mr, mt, mb = self._expand_margins() wpage -= ml + mr hpage -= mt + mb wreg = self.radius * 2.0 hreg = self.radius * 2.0 if wpage >= hpage: wreg *= wpage/hpage else: hreg *= hpage/wpage self._wreg = wreg self._hreg = hreg self._corners = corners(self.lon, self.lat, wreg, hreg) west, east, south, north = extent(self.lon, self.lat, wreg, hreg, 10) x, y, z = ((west, east), (south, north), (-6000., 4500.)) xax = gmtpy.Ax(mode='min-max', approx_ticks=4.) yax = gmtpy.Ax(mode='min-max', approx_ticks=4.) zax = gmtpy.Ax(mode='min-max', inc=1000., label='Height', scaled_unit='km', scaled_unit_factor=0.001) scaler = gmtpy.ScaleGuru(data_tuples=[(x, y, z)], axes=(xax, yax, zax)) par = scaler.get_params() west = par['xmin'] east = par['xmax'] south = par['ymin'] north = par['ymax'] self._wesn = west, east, south, north self._scaler = scaler def _setup_lod(self): w, e, s, n = self._wesn if self.radius > 1500.*km: coastline_resolution = 'i' rivers = False else: coastline_resolution = 'f' rivers = True self._minarea = (self.skip_feature_factor * self.radius/km)**2 self._coastline_resolution = coastline_resolution self._rivers = rivers self._prep_topo_have = {} self._dems = {} cm2inch = gmtpy.cm/gmtpy.inch dmin = 2.0 * self.radius * m2d / (self.topo_resolution_max * (self.height * cm2inch)) dmax = 2.0 * self.radius * m2d / (self.topo_resolution_min * (self.height * cm2inch)) for k in ['ocean', 'land']: self._dems[k] = topo.select_dem_names(k, dmin, dmax, self._wesn) if self._dems[k]: logger.debug('using topography dataset %s for %s' % (','.join(self._dems[k]), k)) def _expand_margins(self): if len(self.margins) == 0 or len(self.margins) > 4: ml = mr = mt = mb = 2.0 elif len(self.margins) == 1: ml = mr = mt = mb = self.margins[0] elif len(self.margins) == 2: ml = mr = self.margins[0] mt = mb = self.margins[1] elif len(self.margins) == 4: ml, mr, mt, mb = self.margins return ml, mr, mt, mb def _setup_gmt(self): w, h = self.width, self.height scaler = self._scaler if gmtpy.is_gmt5(self._gmtversion): gmtconf = dict( MAP_TICK_PEN_PRIMARY='1.25p', MAP_TICK_PEN_SECONDARY='1.25p', MAP_TICK_LENGTH_PRIMARY='0.2c', MAP_TICK_LENGTH_SECONDARY='0.6c', FONT_ANNOT_PRIMARY='12p,1,black', FONT_LABEL='12p,1,black', PS_CHAR_ENCODING='ISOLatin1+', MAP_FRAME_TYPE='fancy', FORMAT_GEO_MAP='D', PS_MEDIA='Custom_%ix%i' % ( w*gmtpy.cm, h*gmtpy.cm), PS_PAGE_ORIENTATION='portrait', MAP_GRID_PEN_PRIMARY='thinnest,0/50/0', MAP_ANNOT_OBLIQUE='6') else: gmtconf = dict( TICK_PEN='1.25p', TICK_LENGTH='0.2c', ANNOT_FONT_PRIMARY='1', ANNOT_FONT_SIZE_PRIMARY='12p', LABEL_FONT='1', LABEL_FONT_SIZE='12p', CHAR_ENCODING='ISOLatin1+', BASEMAP_TYPE='fancy', PLOT_DEGREE_FORMAT='D', PAPER_MEDIA='Custom_%ix%i' % ( w*gmtpy.cm, h*gmtpy.cm), GRID_PEN_PRIMARY='thinnest/0/50/0', DOTS_PR_INCH='1200', OBLIQUE_ANNOTATION='6') gmtconf.update( (k.upper(), v) for (k, v) in self.gmt_config.items()) gmt = gmtpy.GMT(config=gmtconf, version=self._gmtversion) layout = gmt.default_layout() layout.set_fixed_margins(*[x*cm for x in self._expand_margins()]) widget = layout.get_widget() widget['P'] = widget['J'] widget['J'] = ('-JA%g/%g' % (self.lon, self.lat)) + '/%(width)gp' scaler['R'] = '-R%g/%g/%g/%gr' % self._corners # aspect = gmtpy.aspect_for_projection( # gmt.installation['version'], *(widget.J() + scaler.R())) aspect = self._map_aspect(jr=widget.J() + scaler.R()) widget.set_aspect(aspect) self._gmt = gmt self._layout = layout self._widget = widget self._jxyr = self._widget.JXY() + self._scaler.R() self._pxyr = self._widget.PXY() + [ '-R%g/%g/%g/%g' % (0, widget.width(), 0, widget.height())] self._have_drawn_axes = False self._have_drawn_labels = False def _draw_background(self): self._have_topo_land = False self._have_topo_ocean = False if self.show_topo: self._have_topo = self._draw_topo() self._draw_basefeatures() def _get_topo_tile(self, k): t = None demname = None for dem in self._dems[k]: t = topo.get(dem, self._wesn) demname = dem if t is not None: break if not t: raise NoTopo() return t, demname def _prep_topo(self, k): gmt = self._gmt t, demname = self._get_topo_tile(k) if demname not in self._prep_topo_have: grdfile = gmt.tempfilename() is_flat = num.all(t.data[0] == t.data) gmtpy.savegrd( t.x(), t.y(), t.data, filename=grdfile, naming='lonlat') if self.illuminate and not is_flat: if k == 'ocean': factor = self.illuminate_factor_ocean else: factor = self.illuminate_factor_land ilumfn = gmt.tempfilename() gmt.grdgradient( grdfile, N='e%g' % factor, A=-45, G=ilumfn, out_discard=True) ilumargs = ['-I%s' % ilumfn] else: ilumargs = [] if self.replace_topo_color_only: t2 = self.replace_topo_color_only grdfile2 = gmt.tempfilename() gmtpy.savegrd( t2.x(), t2.y(), t2.data, filename=grdfile2, naming='lonlat') if gmt.is_gmt5(): gmt.grdsample( grdfile2, G=grdfile, n='l', I='%g/%g' % (t.dx, t.dy), # noqa R=grdfile, out_discard=True) else: gmt.grdsample( grdfile2, G=grdfile, Q='l', I='%g/%g' % (t.dx, t.dy), # noqa R=grdfile, out_discard=True) gmt.grdmath( grdfile, '0.0', 'AND', '=', grdfile2, out_discard=True) grdfile = grdfile2 self._prep_topo_have[demname] = grdfile, ilumargs return self._prep_topo_have[demname] def _draw_topo(self): widget = self._widget scaler = self._scaler gmt = self._gmt cres = self._coastline_resolution minarea = self._minarea JXY = widget.JXY() R = scaler.R() try: grdfile, ilumargs = self._prep_topo('ocean') gmt.pscoast(D=cres, S='c', A=minarea, *(JXY+R)) gmt.grdimage(grdfile, C=topo.cpt(self.topo_cpt_wet), *(ilumargs+JXY+R)) gmt.pscoast(Q=True, *(JXY+R)) self._have_topo_ocean = True except NoTopo: self._have_topo_ocean = False try: grdfile, ilumargs = self._prep_topo('land') gmt.pscoast(D=cres, G='c', A=minarea, *(JXY+R)) gmt.grdimage(grdfile, C=topo.cpt(self.topo_cpt_dry), *(ilumargs+JXY+R)) gmt.pscoast(Q=True, *(JXY+R)) self._have_topo_land = True except NoTopo: self._have_topo_land = False def _draw_topo_scale(self, label='Elevation [km]'): dry = read_cpt(topo.cpt(self.topo_cpt_dry)) wet = read_cpt(topo.cpt(self.topo_cpt_wet)) combi = cpt_merge_wet_dry(wet, dry) for level in combi.levels: level.vmin /= km level.vmax /= km topo_cpt = self.gmt.tempfilename() + '.cpt' write_cpt(combi, topo_cpt) (w, h), (xo, yo) = self.widget.get_size() self.gmt.psscale( D='%gp/%gp/%gp/%gph' % (xo + 0.5*w, yo - 2.0*gmtpy.cm, w, 0.5*gmtpy.cm), C=topo_cpt, B='1:%s:' % label) def _draw_basefeatures(self): gmt = self._gmt cres = self._coastline_resolution rivers = self._rivers minarea = self._minarea color_wet = self.color_wet color_dry = self.color_dry if self.show_rivers and rivers: rivers = ['-Ir/0.25p,%s' % gmtpy.color(self.color_wet)] else: rivers = [] fill = {} if not self._have_topo_land: fill['G'] = color_dry if not self._have_topo_ocean: fill['S'] = color_wet gmt.pscoast( D=cres, W='thinnest,%s' % gmtpy.color(darken(gmtpy.color_tup(color_dry))), A=minarea, *(rivers+self._jxyr), **fill) if self.show_plates: self.draw_plates() def _draw_axes(self): gmt = self._gmt scaler = self._scaler widget = self._widget if self.axes_layout is None: if self.lat > 0.0: axes_layout = 'WSen' else: axes_layout = 'WseN' else: axes_layout = self.axes_layout scale_km = gmtpy.nice_value(self.radius/5.) / 1000. if self.show_center_mark: gmt.psxy( in_rows=[[self.lon, self.lat]], S='c20p', W='2p,black', *self._jxyr) if self.show_grid: btmpl = ('%(xinc)gg%(xinc)g:%(xlabel)s:/' '%(yinc)gg%(yinc)g:%(ylabel)s:') else: btmpl = '%(xinc)g:%(xlabel)s:/%(yinc)g:%(ylabel)s:' gmt.psbasemap( B=(btmpl % scaler.get_params())+axes_layout, L=('x%gp/%gp/%g/%g/%gk' % ( 6./7*widget.width(), widget.height()/7., self.lon, self.lat, scale_km)), *self._jxyr) if self.comment: font_size = self.gmt.label_font_size() _, east, south, _ = self._wesn if gmt.is_gmt5(): row = [ 1, 0, '%gp,%s,%s' % (font_size, 0, 'black'), 'BR', self.comment] farg = ['-F+f+j'] else: row = [1, 0, font_size, 0, 0, 'BR', self.comment] farg = [] gmt.pstext( in_rows=[row], N=True, R=(0, 1, 0, 1), D='%gp/%gp' % (-font_size*0.2, font_size*0.3), *(widget.PXY() + farg)) def draw_axes(self): if not self._have_drawn_axes: self._draw_axes() self._have_drawn_axes = True def _have_coastlines(self): gmt = self._gmt cres = self._coastline_resolution minarea = self._minarea checkfile = gmt.tempfilename() gmt.pscoast( M=True, D=cres, W='thinnest,black', A=minarea, out_filename=checkfile, *self._jxyr) points = [] with open(checkfile, 'r') as f: for line in f: ls = line.strip() if ls.startswith('#') or ls.startswith('>') or ls == '': continue plon, plat = [float(x) for x in ls.split()] points.append((plat, plon)) points = num.array(points, dtype=num.float) return num.any(points_in_region(points, self._wesn)) def have_coastlines(self): self.gmt return self._have_coastlines() def project(self, lats, lons, jr=None): onepoint = False if isinstance(lats, float) and isinstance(lons, float): lats = [lats] lons = [lons] onepoint = True if jr is not None: j, r = jr gmt = gmtpy.GMT(version=self._gmtversion) else: j, _, _, r = self.jxyr gmt = self.gmt f = BytesIO() gmt.mapproject(j, r, in_columns=(lons, lats), out_stream=f, D='p') f.seek(0) data = num.loadtxt(f, ndmin=2) xs, ys = data.T if onepoint: xs = xs[0] ys = ys[0] return xs, ys def _map_box(self, jr=None): ll_lon, ll_lat, ur_lon, ur_lat = self._corners xs_corner, ys_corner = self.project( (ll_lat, ur_lat), (ll_lon, ur_lon), jr=jr) w = xs_corner[1] - xs_corner[0] h = ys_corner[1] - ys_corner[0] return w, h def _map_aspect(self, jr=None): w, h = self._map_box(jr=jr) return h/w def _draw_labels(self): points_taken = [] regions_taken = [] def no_points_in_rect(xs, ys, xmin, ymin, xmax, ymax): xx = not num.any(la(la(xmin < xs, xs < xmax), la(ymin < ys, ys < ymax))) return xx def roverlaps(a, b): return (a[0] < b[2] and b[0] < a[2] and a[1] < b[3] and b[1] < a[3]) w, h = self._map_box() label_font_size = self.gmt.label_font_size() if self._labels: n = len(self._labels) lons, lats, texts, sx, sy, colors, fonts, font_sizes, styles = \ list(zip(*self._labels)) font_sizes = [ (font_size or label_font_size) for font_size in font_sizes] sx = num.array(sx, dtype=num.float) sy = num.array(sy, dtype=num.float) xs, ys = self.project(lats, lons) points_taken.append((xs, ys)) dxs = num.zeros(n) dys = num.zeros(n) for i in range(n): dx, dy = gmtpy.text_box( texts[i], font=fonts[i], font_size=font_sizes[i], **styles[i]) dxs[i] = dx dys[i] = dy la = num.logical_and anchors_ok = ( la(xs + sx + dxs < w, ys + sy + dys < h), la(xs - sx - dxs > 0., ys - sy - dys > 0.), la(xs + sx + dxs < w, ys - sy - dys > 0.), la(xs - sx - dxs > 0., ys + sy + dys < h), ) arects = [ (xs, ys, xs + sx + dxs, ys + sy + dys), (xs - sx - dxs, ys - sy - dys, xs, ys), (xs, ys - sy - dys, xs + sx + dxs, ys), (xs - sx - dxs, ys, xs, ys + sy + dys)] for i in range(n): for ianch in range(4): anchors_ok[ianch][i] &= no_points_in_rect( xs, ys, *[xxx[i] for xxx in arects[ianch]]) anchor_choices = [] anchor_take = [] for i in range(n): choices = [ianch for ianch in range(4) if anchors_ok[ianch][i]] anchor_choices.append(choices) if choices: anchor_take.append(choices[0]) else: anchor_take.append(None) def cost(anchor_take): noverlaps = 0 for i in range(n): for j in range(n): if i != j: i_take = anchor_take[i] j_take = anchor_take[j] if i_take is None or j_take is None: continue r_i = [xxx[i] for xxx in arects[i_take]] r_j = [xxx[j] for xxx in arects[j_take]] if roverlaps(r_i, r_j): noverlaps += 1 return noverlaps cur_cost = cost(anchor_take) imax = 30 while cur_cost != 0 and imax > 0: for i in range(n): for t in anchor_choices[i]: anchor_take_new = list(anchor_take) anchor_take_new[i] = t new_cost = cost(anchor_take_new) if new_cost < cur_cost: anchor_take = anchor_take_new cur_cost = new_cost imax -= 1 while cur_cost != 0: for i in range(n): anchor_take_new = list(anchor_take) anchor_take_new[i] = None new_cost = cost(anchor_take_new) if new_cost < cur_cost: anchor_take = anchor_take_new cur_cost = new_cost break anchor_strs = ['BL', 'TR', 'TL', 'BR'] for i in range(n): ianchor = anchor_take[i] color = colors[i] if color is None: color = 'black' if ianchor is not None: regions_taken.append([xxx[i] for xxx in arects[ianchor]]) anchor = anchor_strs[ianchor] yoff = [-sy[i], sy[i]][anchor[0] == 'B'] xoff = [-sx[i], sx[i]][anchor[1] == 'L'] if self.gmt.is_gmt5(): row = ( lons[i], lats[i], '%i,%s,%s' % (font_sizes[i], fonts[i], color), anchor, texts[i]) farg = ['-F+f+j'] else: row = ( lons[i], lats[i], font_sizes[i], 0, fonts[i], anchor, texts[i]) farg = ['-G%s' % color] self.gmt.pstext( in_rows=[row], D='%gp/%gp' % (xoff, yoff), *(self.jxyr + farg), **styles[i]) if self._area_labels: for lons, lats, text, color, font, font_size, style in \ self._area_labels: if font_size is None: font_size = label_font_size if color is None: color = 'black' if self.gmt.is_gmt5(): farg = ['-F+f+j'] else: farg = ['-G%s' % color] xs, ys = self.project(lats, lons) dx, dy = gmtpy.text_box( text, font=font, font_size=font_size, **style) rects = [xs-0.5*dx, ys-0.5*dy, xs+0.5*dx, ys+0.5*dy] locs_ok = num.ones(xs.size, dtype=num.bool) for iloc in range(xs.size): rcandi = [xxx[iloc] for xxx in rects] locs_ok[iloc] = True locs_ok[iloc] &= ( 0 < rcandi[0] and rcandi[2] < w and 0 < rcandi[1] and rcandi[3] < h) overlap = False for r in regions_taken: if roverlaps(r, rcandi): overlap = True break locs_ok[iloc] &= not overlap for xs_taken, ys_taken in points_taken: locs_ok[iloc] &= no_points_in_rect( xs_taken, ys_taken, *rcandi) if not locs_ok[iloc]: break rows = [] for iloc, (lon, lat) in enumerate(zip(lons, lats)): if not locs_ok[iloc]: continue if self.gmt.is_gmt5(): row = ( lon, lat, '%i,%s,%s' % (font_size, font, color), 'MC', text) else: row = ( lon, lat, font_size, 0, font, 'MC', text) rows.append(row) regions_taken.append([xxx[iloc] for xxx in rects]) break self.gmt.pstext( in_rows=rows, *(self.jxyr + farg), **style) def draw_labels(self): self.gmt if not self._have_drawn_labels: self._draw_labels() self._have_drawn_labels = True def add_label( self, lat, lon, text, offset_x=5., offset_y=5., color=None, font='1', font_size=None, style={}): if 'G' in style: style = style.copy() color = style.pop('G') self._labels.append( (lon, lat, text, offset_x, offset_y, color, font, font_size, style)) def add_area_label( self, lat, lon, text, color=None, font='3', font_size=None, style={}): self._area_labels.append( (lon, lat, text, color, font, font_size, style)) def cities_in_region(self): from pyrocko.dataset import geonames cities = geonames.get_cities_region(region=self.wesn, minpop=0) cities.extend(self.custom_cities) cities.sort(key=lambda x: x.population) return cities def draw_cities(self, exact=None, include=[], exclude=[], nmax_soft=10, psxy_style=dict(S='s5p', G='black')): cities = self.cities_in_region() if exact is not None: cities = [c for c in cities if c.name in exact] minpop = None else: cities = [c for c in cities if c.name not in exclude] minpop = 10**3 for minpop_new in [1e3, 3e3, 1e4, 3e4, 1e5, 3e5, 1e6, 3e6, 1e7]: cities_new = [ c for c in cities if c.population > minpop_new or c.name in include] if len(cities_new) == 0 or ( len(cities_new) < 3 and len(cities) < nmax_soft*2): break cities = cities_new minpop = minpop_new if len(cities) <= nmax_soft: break if cities: lats = [c.lat for c in cities] lons = [c.lon for c in cities] self.gmt.psxy( in_columns=(lons, lats), *self.jxyr, **psxy_style) for c in cities: try: text = c.name.encode('iso-8859-1').decode('iso-8859-1') except UnicodeEncodeError: text = c.asciiname self.add_label(c.lat, c.lon, text) self._cities_minpop = minpop def add_stations(self, stations, psxy_style=dict(S='t8p', G='black')): lats = [s.lat for s in stations] lons = [s.lon for s in stations] self.gmt.psxy( in_columns=(lons, lats), *self.jxyr, **psxy_style) for station in stations: self.add_label(station.lat, station.lon, '.'.join( x for x in (station.network, station.station) if x)) def add_kite_scene(self, scene): tile = FloatTile( scene.frame.llLon, scene.frame.llLat, scene.frame.dLon, scene.frame.dLat, scene.displacement) return tile def add_gnss_campaign(self, campaign, psxy_style=dict(), labels=True): offsets = num.array([math.sqrt(s.east.shift**2 + s.north.shift**2) for s in campaign.stations]) scale = 1./offsets.max() default_psxy_style = { 'h': 2, 'h': 2, 'W': '0.5p,black', 't': '30', 'G': 'black', 'L': True, 'S': 'e%d/0.95/8' % scale, } default_psxy_style.update(psxy_style) if labels: rows = [(s.lon, s.lat, s.east.shift, s.north.shift, s.east.sigma, s.north.sigma, 0) for s in campaign.stations] else: rows = [(s.lon, s.lat, s.east.shift, s.north.shift, s.east.sigma, s.north.sigma, 0, s.code) for s in campaign.stations] kwargs = {} kwargs.update(default_psxy_style) kwargs.update(self.jxyr) self.gmt.psvelo(in_rows=rows, **kwargs) def draw_plates(self): from pyrocko.dataset import tectonics neast = 20 nnorth = max(1, int(round(num.round(self._hreg/self._wreg * neast)))) norths = num.linspace(-self._hreg*0.5, self._hreg*0.5, nnorth) easts = num.linspace(-self._wreg*0.5, self._wreg*0.5, neast) norths2 = num.repeat(norths, neast) easts2 = num.tile(easts, nnorth) lats, lons = od.ne_to_latlon( self.lat, self.lon, norths2, easts2) bird = tectonics.PeterBird2003() plates = bird.get_plates() color_plates = gmtpy.color('aluminium5') color_velocities = gmtpy.color('skyblue1') color_velocities_lab = gmtpy.color(darken(gmtpy.color_tup('skyblue1'))) points = num.vstack((lats, lons)).T used = [] for plate in plates: mask = plate.contains_points(points) if num.any(mask): used.append((plate, mask)) if len(used) > 1: candi_fixed = {} label_data = [] for plate, mask in used: mean_north = num.mean(norths2[mask]) mean_east = num.mean(easts2[mask]) iorder = num.argsort(num.sqrt( (norths2[mask] - mean_north)**2 + (easts2[mask] - mean_east)**2)) lat_candis = lats[mask][iorder] lon_candis = lons[mask][iorder] candi_fixed[plate.name] = lat_candis.size label_data.append(( lat_candis, lon_candis, plate, color_plates)) boundaries = bird.get_boundaries() size = 2 psxy_kwargs = [] for boundary in boundaries: if num.any(points_in_region(boundary.points, self._wesn)): for typ, part in boundary.split_types( [['SUB'], ['OSR', 'OTF', 'OCB', 'CTF', 'CCB', 'CRB']]): lats, lons = part.T kwargs = {} if typ[0] == 'SUB': if boundary.kind == '\\': kwargs['S'] = 'f%g/%gp+t+r' % ( 0.45*size, 3.*size) elif boundary.kind == '/': kwargs['S'] = 'f%g/%gp+t+l' % ( 0.45*size, 3.*size) kwargs['G'] = color_plates kwargs['in_columns'] = (lons, lats) kwargs['W'] = '%gp,%s' % (size, color_plates), psxy_kwargs.append(kwargs) if boundary.kind == '\\': if boundary.name2 in candi_fixed: candi_fixed[boundary.name2] += neast*nnorth elif boundary.kind == '/': if boundary.name1 in candi_fixed: candi_fixed[boundary.name1] += neast*nnorth candi_fixed = [name for name in sorted( list(candi_fixed.keys()), key=lambda name: -candi_fixed[name])] candi_fixed.append(None) gsrm = tectonics.GSRM1() for name in candi_fixed: if name not in gsrm.plate_names() \ and name not in gsrm.plate_alt_names(): continue lats, lons, vnorth, veast, vnorth_err, veast_err, corr = \ gsrm.get_velocities(name, region=self._wesn) fixed_plate_name = name self.gmt.psvelo( in_columns=( lons, lats, veast, vnorth, veast_err, vnorth_err, corr), W='0.25p,%s' % color_velocities, A='9p+e+g%s' % color_velocities, S='e0.2p/0.95/10', *self.jxyr) for _ in range(len(lons) // 50 + 1): ii = random.randint(0, len(lons)-1) v = math.sqrt(vnorth[ii]**2 + veast[ii]**2) self.add_label( lats[ii], lons[ii], '%.0f' % v, font_size=0.7*self.gmt.label_font_size(), style=dict( G=color_velocities_lab)) break for (lat_candis, lon_candis, plate, color) in label_data: full_name = bird.full_name(plate.name) if plate.name == fixed_plate_name: full_name = '@_' + full_name + '@_' self.add_area_label( lat_candis, lon_candis, full_name, color=color, font='3') for kwargs in psxy_kwargs: self.gmt.psxy(*self.jxyr, **kwargs)
class QSeisConfigFull(QSeisConfig): time_start = Float.T(default=0.0) time_reduction_velocity = Float.T(default=0.0) time_window = Float.T(default=900.0) source_depth = Float.T(default=10.0) source_mech = QSeisSourceMech.T(optional=True, default=QSeisSourceMechMT.D()) receiver_depth = Float.T(default=0.0) receiver_distances = List.T(Float.T()) nsamples = Int.T(default=256) gf_sw_source_types = Tuple.T(6, Int.T(), default=(1, 1, 1, 1, 0, 0)) gf_filenames = Tuple.T(6, String.T(), default=qseis_greenf_names) seismogram_filename = String.T(default='seis') receiver_azimuths = List.T(Float.T()) earthmodel_1d = gf.meta.Earthmodel1D.T(optional=True) earthmodel_receiver_1d = gf.meta.Earthmodel1D.T(optional=True) @staticmethod def example(): conf = QSeisConfigFull() conf.receiver_distances = [2000.] conf.receiver_azimuths = [0.] conf.time_start = -10.0 conf.time_reduction_velocity = 15.0 conf.earthmodel_1d = cake.load_model().extract(depth_max='cmb') conf.earthmodel_receiver_1d = None conf.sw_flat_earth_transform = 1 return conf def get_output_filenames(self, rundir): return [ pjoin(rundir, self.seismogram_filename + '.t' + c) for c in qseis_components ] def get_output_filenames_gf(self, rundir): return [ pjoin(rundir, fn + '.t' + c) for fn in self.gf_filenames for c in qseis_components ] def string_for_config(self): def aggregate(l): return len(l), '\n'.join([''] + [x.string_for_config() for x in l]) assert len(self.receiver_distances) > 0 assert len(self.receiver_distances) == len(self.receiver_azimuths) assert self.earthmodel_1d is not None d = self.__dict__.copy() # fixing these switches here to reduce the amount of wrapper code d['sw_distance_unit'] = 1 # always give distances in [km] d['sw_t_reduce'] = 1 # time reduction always as velocity [km/s] d['sw_equidistant'] = 0 # always give all distances and azimuths d['sw_irregular_azimuths'] = 1 d['n_distances'] = len(self.receiver_distances) d['str_distances'] = str_float_vals(self.receiver_distances) d['str_azimuths'] = str_float_vals(self.receiver_azimuths) model_str, nlines = cake_model_to_config(self.earthmodel_1d) d['n_model_lines'] = nlines d['model_lines'] = model_str if self.earthmodel_receiver_1d: model_str, nlines = cake_model_to_config( self.earthmodel_receiver_1d) else: model_str = "# no receiver side model" nlines = 0 d['n_model_receiver_lines'] = nlines d['model_receiver_lines'] = model_str d['str_slowness_window'] = str_float_vals(self.slowness_window) d['n_depth_ranges'], d['str_depth_ranges'] = \ aggregate(self.propagation_filters) if self.wavelet_type == 0: # user wavelet d['str_w_samples'] = '\n' \ + '%i\n' % len(self.user_wavelet_samples) \ + str_float_vals(self.user_wavelet_samples) else: d['str_w_samples'] = '' if self.receiver_filter: d['str_receiver_filter'] = self.receiver_filter.string_for_config( self.qseis_version) else: if self.qseis_version == '2006a': d['str_receiver_filter'] = '(1.0,0.0)\n0\n#\n0' else: d['str_receiver_filter'] = '1.0\n0\n#\n0' d['str_gf_sw_source_types'] = str_int_vals(self.gf_sw_source_types) d['str_gf_filenames'] = str_str_vals(self.gf_filenames) if self.source_mech: d['str_source'] = '%s \'%s\'' % ( self.source_mech.string_for_config(), self.seismogram_filename) else: d['str_source'] = '0' template = '''# autogenerated QSEIS input by qseis.py # # This is the input file of FORTRAN77 program "qseis06" for calculation of # synthetic seismograms based on a layered halfspace earth model. # # by # Rongjiang Wang <*****@*****.**> # GeoForschungsZentrum Potsdam # Telegrafenberg, D-14473 Potsdam, Germany # # Last modified: Potsdam, Nov., 2006 # # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # If not specified, SI Unit System is used overall! # # Coordinate systems: # cylindrical (z,r,t) with z = downward, # r = from source outward, # t = azmuth angle from north to east; # cartesian (x,y,z) with x = north, # y = east, # z = downward; # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # # SOURCE PARAMETERS # ================= # 1. source depth [km] #------------------------------------------------------------------------------ %(source_depth)e |dble: source_depth; #------------------------------------------------------------------------------ # # RECEIVER PARAMETERS # =================== # 1. receiver depth [km] # 2. switch for distance sampling role (1/0 = equidistant/irregular); switch # for unit used (1/0 = km/deg) # 3. number of distance samples # 4. if equidistant, then start and end trace distance (> 0); else distance # list (please order the receiver distances from small to large) # 5. (reduced) time begin [sec] & length of time window [sec], number of time # samples (<= 2*nfmax in qsglobal.h) # 6. switch for unit of the following time reduction parameter: 1 = velocity # [km/sec], 0 = slowness [sec/deg]; time reduction parameter #------------------------------------------------------------------------------ %(receiver_depth)e |dble: receiver_depth; %(sw_equidistant)i %(sw_distance_unit)i |int: sw_equidistant, sw_d_unit; %(n_distances)i |int: no_distances; %(str_distances)s |dble: d_1,d_n; or d_1,d_2, ...(no comments in between!); %(time_start)e %(time_window)e %(nsamples)i |dble: t_start,t_window; int: no_t_samples; %(sw_t_reduce)i %(time_reduction_velocity)e |int: sw_t_reduce; dble: t_reduce; #------------------------------------------------------------------------------ # # WAVENUMBER INTEGRATION PARAMETERS # ================================= # 1. select slowness integration algorithm (0 = suggested for full wave-field # modelling; 1 or 2 = suggested when using a slowness window with narrow # taper range - a technique for suppressing space-domain aliasing); # 2. 4 parameters for low and high slowness (Note 1) cut-offs [s/km] with # tapering: 0 < slw1 < slw2 defining cosine taper at the lower end, and 0 < # slw3 < slw4 defining the cosine taper at the higher end. default values # will be used in case of inconsistent input of the cut-offs (possibly with # much more computational effort); # 3. parameter for sampling rate of the wavenumber integration (1 = sampled # with the spatial Nyquist frequency, 2 = sampled with twice higher than # the Nyquist, and so on: the larger this parameter, the smaller the space- # domain aliasing effect, but also the more computation effort); # 4. the factor for suppressing time domain aliasing (> 0 and <= 1) (Note 2). #------------------------------------------------------------------------------ %(sw_algorithm)i |int: sw_algorithm; %(str_slowness_window)s |dble: slw(1-4); %(wavenumber_sampling)e |dble: sample_rate; %(aliasing_suppression_factor)e |dble: supp_factor; #------------------------------------------------------------------------------ # # OPTIONS FOR PARTIAL SOLUTIONS # (only applied to the source-site structure) # =========================================== # # 1. switch for filtering free surface effects (0 = with free surface, i.e., # do not select this filter; 1 = without free surface; 2 = without free # surface but with correction on amplitude and wave form. Note switch 2 # can only be used for receivers at the surface) # 2. switch for filtering waves with a shallow penetration depth (concerning # their whole trace from source to receiver), penetration depth limit [km] # # if this option is selected, waves whose travel path never exceeds the # given depth limit will be filtered ("seismic nuting"). the condition for # selecting this filter is that the given shallow path depth limit should # be larger than both source and receiver depth. # # 3. number of depth ranges where the following selected up/down-sp2oing P or # SV waves should be filtered # 4. the 1. depth range: upper and lower depth [km], switch for filtering P # or SV wave in this depth range: # # switch no: 1 2 3 4 other # filtered phase: P(up) P(down) SV(up) SV(down) Error # # 5. the 2. ... # # The partial solution options are useful tools to increase the numerical # significance of desired wave phases. Especially when the desired phases # are smaller than the undesired phases, these options should be selected # and carefully combined. #------------------------------------------------------------------------------ %(filter_surface_effects)i |int: isurf; %(filter_shallow_paths)i %(filter_shallow_paths_depth)e |int: sw_path_filter; dble:shallow_depth_limit; %(n_depth_ranges)i %(str_depth_ranges)s #------------------------------------------------------------------------------ # # SOURCE TIME FUNCTION (WAVELET) PARAMETERS (Note 3) # ================================================== # 1. wavelet duration [unit = time sample rather than sec!], that is about # equal to the half-amplitude cut-off period of the wavelet (> 0. if <= 0, # then default value = 2 time samples will be used), and switch for the # wavelet form (0 = user's own wavelet; 1 = default wavelet: normalized # square half-sinusoid for simulating a physical delta impulse; 2 = tapered # Heaviside wavelet, i.e. integral of wavelet 1) # 2. IF user's own wavelet is selected, then number of the wavelet time samples # (<= 1024), and followed by # 3. equidistant wavelet time samples # 4 ...(continue) (! no comment lines allowed between the time sample list!) # IF default, delete line 2, 3, 4 ... or comment them out! #------------------------------------------------------------------------------ %(wavelet_duration_samples)e %(wavelet_type)i%(str_w_samples)s #------------------------------------------------------------------------------ # # FILTER PARAMETERS OF RECEIVERS (SEISMOMETERS OR HYDROPHONES) # ============================================================ # 1. constant coefficient (normalization factor) # 2. number of roots (<= nrootmax in qsglobal.h) # 3. list of the root positions in the complex format (Re,Im). If no roots, # comment out this line # 4. number of poles (<= npolemax in qsglobal.h) # 5. list of the pole positions in the complex format (Re,Im). If no poles, # comment out this line #------------------------------------------------------------------------------ %(str_receiver_filter)s #------------------------------------------------------------------------------ # # OUTPUT FILES FOR GREEN'S FUNCTIONS (Note 4) # =========================================== # 1. selections of source types (yes/no = 1/0) # 2. file names of Green's functions (please give the names without extensions, # which will be appended by the program automatically: *.tz, *.tr, *.tt # and *.tv are for the vertical, radial, tangential, and volume change (for # hydrophones) components, respectively) #------------------------------------------------------------------------------ # explosion strike-slip dip-slip clvd single_f_v single_f_h #------------------------------------------------------------------------------ %(str_gf_sw_source_types)s %(str_gf_filenames)s #------------------------------------------------------------------------------ # OUTPUT FILES FOR AN ARBITRARY POINT DISLOCATION SOURCE # (for applications to earthquakes) # ====================================================== # 1. selection (0 = not selected; 1 or 2 = selected), if (selection = 1), then # the 6 moment tensor elements [N*m]: Mxx, Myy, Mzz, Mxy, Myz, Mzx (x is # northward, y is eastward and z is downard); else if (selection = 2), then # Mis [N*m] = isotropic moment part = (MT+MN+MP)/3, Mcl = CLVD moment part # = (2/3)(MT+MP-2*MN), Mdc = double-couple moment part = MT-MN, Strike [deg], # Dip [deg] and Rake [deg]. # # Note: to use this option, the Green's functions above should be computed # (selection = 1) if they do not exist already. # # north(x) # / # /\ strike # *-----------------------> east(y) # |\ \ # |-\ \ # | \ fault plane \ # |90 \ \ # |-dip\ \ # | \ \ # | \ \ # downward(z) \-----------------------\\ # # 2. switch for azimuth distribution of the stations (0 = uniform azimuth, # else = irregular azimuth angles) # 3. list of the azimuth angles [deg] for all stations given above (if the # uniform azimuth is selected, then only one azimuth angle is required) # #------------------------------------------------------------------------------ # Mis Mcl Mdc Strike Dip Rake File #------------------------------------------------------------------------------ # 2 0.00 1.00 6.0E+19 120.0 30.0 25.0 'seis' #------------------------------------------------------------------------------ # Mxx Myy Mzz Mxy Myz Mzx File #------------------------------------------------------------------------------ %(str_source)s %(sw_irregular_azimuths)i %(str_azimuths)s #------------------------------------------------------------------------------ # # GLOBAL MODEL PARAMETERS (Note 5) # ================================ # 1. switch for flat-earth-transform # 2. gradient resolution [%%] of vp, vs, and ro (density), if <= 0, then default # values (depending on wave length at cut-off frequency) will be used #------------------------------------------------------------------------------ %(sw_flat_earth_transform)i |int: sw_flat_earth_transform; %(gradient_resolution_vp)e %(gradient_resolution_vs)e %(gradient_resolution_density)e |dble: vp_res, vs_res, ro_res; #------------------------------------------------------------------------------ # # LAYERED EARTH MODEL # (SHALLOW SOURCE + UNIFORM DEEP SOURCE/RECEIVER STRUCTURE) # ========================================================= # 1. number of data lines of the layered model (source site) #------------------------------------------------------------------------------ %(n_model_lines)i |int: no_model_lines; #------------------------------------------------------------------------------ # # MULTILAYERED MODEL PARAMETERS (source site) # =========================================== # no depth[km] vp[km/s] vs[km/s] ro[g/cm^3] qp qs #------------------------------------------------------------------------------ %(model_lines)s #------------------------------------------------------------------------------ # # LAYERED EARTH MODEL # (ONLY THE SHALLOW RECEIVER STRUCTURE) # ===================================== # 1. number of data lines of the layered model # # Note: if the number = 0, then the receiver site is the same as the # source site, else different receiver-site structure is considered. # please be sure that the lowest interface of the receiver-site # structure given given below can be found within the source-site # structure, too. # #------------------------------------------------------------------------------ %(n_model_receiver_lines)i |int: no_model_lines; #------------------------------------------------------------------------------ # # MULTILAYERED MODEL PARAMETERS (shallow receiver-site structure) # =============================================================== # no depth[km] vp[km/s] vs[km/s] ro[g/cm^3] qp qs #------------------------------------------------------------------------------ %(model_receiver_lines)s #---------------------------------end of all inputs---------------------------- Note 1: The slowness is defined by inverse value of apparent wave velocity = sin(i)/v with i = incident angle and v = true wave velocity. Note 2: The suppression of the time domain aliasing is achieved by using the complex frequency technique. The suppression factor should be a value between 0 and 1. If this factor is set to 0.1, for example, the aliasing phase at the reduced time begin is suppressed to 10%%. Note 3: The default basic wavelet function (option 1) is (2/tau)*sin^2(pi*t/tau), for 0 < t < tau, simulating physical delta impuls. Its half-amplitude cut-off frequency is 1/tau. To avoid high-frequency noise, tau should not be smaller than 4-5 time samples. Note 4: Double-Couple m11/ m22/ m33/ m12/ m23/ m31 Azimuth_Factor_(tz,tr,tv)/(tt) ============================================================================ explosion 1.0/ 1.0/ 1.0/ -- / -- / -- 1.0 / 0.0 strike-slip -- / -- / -- / 1.0/ -- / -- sin(2*azi) / cos(2*azi) 1.0/-1.0/ -- / -- / -- / -- cos(2*azi) / -sin(2*azi) dip-slip -- / -- / -- / -- / -- / 1.0 cos(azi) / sin(azi) -- / -- / -- / -- / 1.0/ -- sin(azi) / -cos(azi) clvd -0.5/-0.5/ 1.0/ -- / -- / -- 1.0 / 0.0 ============================================================================ Single-Force fx / fy / fz Azimuth_Factor_(tz,tr,tv)/(tt) ============================================================================ fz -- / -- / 1.0 1.0 / 0.0 fx 1.0/ -- / -- cos(azi) / sin(azi) fy -- / 1.0/ -- sin(azi) / -cos(azi) ============================================================================ Note 5: Layers with a constant gradient will be discretized with a number of homogeneous sublayers. The gradient resolutions are then used to determine the maximum allowed thickness of the sublayers. If the resolutions of Vp, Vs and Rho (density) require different thicknesses, the smallest is first chosen. If this is even smaller than 1%% of the characteristic wavelength, then the latter is taken finally for the sublayer thickness. ''' # noqa return (template % d).encode('ascii')
class FitsWaveformEnsemblePlot(PlotConfig): ''' Plot showing all waveform fits for the ensemble of solutions''' name = 'fits_waveform_ensemble' size_cm = Tuple.T( 2, Float.T(), default=(9., 5.), help='width and length of the figure in cm') nx = Int.T( default=1, help='horizontal number of subplots on every page') ny = Int.T( default=1, help='vertical number of subplots on every page') misfit_cutoff = Float.T( optional=True, help='Plot fits for models up to this misfit value') color_parameter = String.T( default='misfit', help='Choice of value to color, options: dist and misfit') font_size = Float.T( default=8, help='Font Size of all fonts, except title') font_size_title = Float.T( default=10, help='Font size of title') def make(self, environ): cm = environ.get_plot_collection_manager() mpl_init(fontsize=self.font_size) environ.setup_modelling() ds = environ.get_dataset() history = environ.get_history(subset='harvest') optimiser = environ.get_optimiser() cm.create_group_mpl( self, self.draw_figures(ds, history, optimiser), title=u'Waveform fits for the ensemble', section='fits', feather_icon='activity', description=u''' Plot showing waveform (attribute) fits for the ensemble of solutions. Waveform fits for every nth model in the ensemble of bootstrap solutions. Depending on the target configuration different types of comparisons are possible: (i) time domain waveform differences, (ii) amplitude spectra, (iii) envelopes, (iv) cross correlation functions. Each waveform plot gives a number of details: 1) Target information (left side, from top to bottom) gives station name with component, distance to source, azimuth of station with respect to source, target weight, target misfit and starting time of the waveform relative to the origin time. 2) The background gray area shows the applied taper function. 3) The waveforms shown are: the restituted and filtered observed trace without tapering (light grey) and the same trace with tapering and processing (dark gray), the synthetic trace (light red) and the filtered, tapered and (if enabled) shifted and processed synthetic trace (colored). The colors of the synthetic traces indicate how well the corresponding models fit in the global weighting scheme (when all bootstrap weights are equal), from better fit (red) to worse fit (blue). The amplitudes of the traces are scaled according to the target weight (small weight, small amplitude) and normed relative to the maximum amplitude of the targets of the corresponding normalisation family. 4) The bottom panel shows, depending on the type of comparison, sample-wise residuals for time domain comparisons (red filled), spectra of observed and synthetic traces for amplitude spectrum comparisons, or cross correlation traces.''') def draw_figures(self, ds, history, optimiser): color_parameter = self.color_parameter misfit_cutoff = self.misfit_cutoff fontsize = self.font_size fontsize_title = self.font_size_title nxmax = self.nx nymax = self.ny problem = history.problem for target in problem.targets: target.set_dataset(ds) target_index = {} i = 0 for target in problem.targets: target_index[target] = i, i+target.nmisfits i += target.nmisfits gms = history.get_sorted_primary_misfits()[::-1] models = history.get_sorted_primary_models()[::-1] if misfit_cutoff is not None: ibest = gms < misfit_cutoff gms = gms[ibest] models = models[ibest] gms = gms[::10] models = models[::10] nmodels = models.shape[0] if color_parameter == 'dist': mx = num.mean(models, axis=0) cov = num.cov(models.T) mdists = core.mahalanobis_distance(models, mx, cov) icolor = meta.ordersort(mdists) elif color_parameter == 'misfit': iorder = num.arange(nmodels) icolor = iorder elif color_parameter in problem.parameter_names: ind = problem.name_to_index(color_parameter) icolor = problem.extract(models, ind) target_to_results = defaultdict(list) all_syn_trs = [] dtraces = [] for imodel in range(nmodels): model = models[imodel, :] source = problem.get_source(model) results = problem.evaluate(model) dtraces.append([]) for target, result in zip(problem.targets, results): w = target.get_combined_weight() if isinstance(result, gf.SeismosizerError) or \ not isinstance(target, WaveformMisfitTarget) or \ not num.all(num.isfinite(w)): dtraces[-1].extend([None] * target.nmisfits) continue itarget, itarget_end = target_index[target] assert itarget_end == itarget + 1 if target.misfit_config.domain == 'cc_max_norm': tref = ( result.filtered_obs.tmin + result.filtered_obs.tmax) \ * 0.5 for tr_filt, tr_proc, tshift in ( (result.filtered_obs, result.processed_obs, 0.), (result.filtered_syn, result.processed_syn, result.tshift)): norm = num.sum(num.abs(tr_proc.ydata)) \ / tr_proc.data_len() tr_filt.ydata /= norm tr_proc.ydata /= norm tr_filt.shift(tshift) tr_proc.shift(tshift) ctr = result.cc ctr.shift(tref) dtrace = ctr else: for tr in ( result.filtered_obs, result.filtered_syn, result.processed_obs, result.processed_syn): tr.ydata *= w if result.tshift is not None and result.tshift != 0.0: # result.filtered_syn.shift(result.tshift) result.processed_syn.shift(result.tshift) dtrace = make_norm_trace( result.processed_syn, result.processed_obs, problem.norm_exponent) target_to_results[target].append(result) dtrace.meta = dict( normalisation_family=target.normalisation_family, path=target.path) dtraces[-1].append(dtrace) result.processed_syn.meta = dict( normalisation_family=target.normalisation_family, path=target.path) all_syn_trs.append(result.processed_syn) if not all_syn_trs: logger.warn('No traces to show!') return def skey(tr): return tr.meta['normalisation_family'], tr.meta['path'] trace_minmaxs = trace.minmax(all_syn_trs, skey) dtraces_all = [] for dtraces_group in dtraces: dtraces_all.extend(dtraces_group) dminmaxs = trace.minmax([ dtrace_ for dtrace_ in dtraces_all if dtrace_ is not None], skey) for tr in dtraces_all: if tr: dmin, dmax = dminmaxs[skey(tr)] tr.ydata /= max(abs(dmin), abs(dmax)) cg_to_targets = meta.gather( problem.waveform_targets, lambda t: (t.path, t.codes[3]), filter=lambda t: t in target_to_results) cgs = sorted(cg_to_targets.keys()) from matplotlib import colors cmap = cm.ScalarMappable( norm=colors.Normalize(vmin=num.min(icolor), vmax=num.max(icolor)), cmap=plt.get_cmap('coolwarm')) imodel_to_color = [] for imodel in range(nmodels): imodel_to_color.append(cmap.to_rgba(icolor[imodel])) for cg in cgs: targets = cg_to_targets[cg] frame_to_target, nx, ny, nxx, nyy = layout( source, targets, nxmax, nymax) figures = {} for iy in range(ny): for ix in range(nx): if (iy, ix) not in frame_to_target: continue ixx = ix // nxmax iyy = iy // nymax if (iyy, ixx) not in figures: title = '_'.join(x for x in cg if x) item = PlotItem( name='fig_%s_%i_%i' % (title, ixx, iyy)) item.attributes['targets'] = [] figures[iyy, ixx] = ( item, plt.figure(figsize=self.size_inch)) figures[iyy, ixx][1].subplots_adjust( left=0.03, right=1.0 - 0.03, bottom=0.03, top=1.0 - 0.06, wspace=0.2, hspace=0.2) item, fig = figures[iyy, ixx] target = frame_to_target[iy, ix] item.attributes['targets'].append(target.string_id()) amin, amax = trace_minmaxs[ target.normalisation_family, target.path] absmax = max(abs(amin), abs(amax)) ny_this = nymax # min(ny, nymax) nx_this = nxmax # min(nx, nxmax) i_this = (iy % ny_this) * nx_this + (ix % nx_this) + 1 axes2 = fig.add_subplot(ny_this, nx_this, i_this) space = 0.5 space_factor = 1.0 + space axes2.set_axis_off() axes2.set_ylim(-1.05 * space_factor, 1.05) axes = axes2.twinx() axes.set_axis_off() if target.misfit_config.domain == 'cc_max_norm': axes.set_ylim(-10. * space_factor, 10.) else: axes.set_ylim(-absmax*1.33 * space_factor, absmax*1.33) itarget, itarget_end = target_index[target] assert itarget_end == itarget + 1 for imodel, result in enumerate(target_to_results[target]): syn_color = imodel_to_color[imodel] dtrace = dtraces[imodel][itarget] tap_color_annot = (0.35, 0.35, 0.25) tap_color_edge = (0.85, 0.85, 0.80) tap_color_fill = (0.95, 0.95, 0.90) plot_taper( axes2, result.processed_obs.get_xdata(), result.taper, fc=tap_color_fill, ec=tap_color_edge, alpha=0.2) obs_color = mpl_color('aluminium5') obs_color_light = light(obs_color, 0.5) plot_dtrace( axes2, dtrace, space, 0., 1., fc='none', ec=syn_color) # plot_trace( # axes, result.filtered_syn, # color=syn_color_light, lw=1.0) if imodel == 0: plot_trace( axes, result.filtered_obs, color=obs_color_light, lw=0.75) plot_trace( axes, result.processed_syn, color=syn_color, lw=1.0, alpha=0.3) plot_trace( axes, result.processed_obs, color=obs_color, lw=0.75, alpha=0.3) if imodel != 0: continue xdata = result.filtered_obs.get_xdata() axes.set_xlim(xdata[0], xdata[-1]) tmarks = [ result.processed_obs.tmin, result.processed_obs.tmax] for tmark in tmarks: axes2.plot( [tmark, tmark], [-0.9, 0.1], color=tap_color_annot) dur = tmarks[1] - tmarks[0] for tmark, text, ha in [ (tmarks[0], '$\\,$ ' + meta.str_duration( tmarks[0] - source.time), 'left'), (tmarks[1], '$\\Delta$ ' + meta.str_duration( dur), 'right')]: axes2.annotate( text, xy=(tmark, -0.9), xycoords='data', xytext=( fontsize*0.4 * [-1, 1][ha == 'left'], fontsize*0.2), textcoords='offset points', ha=ha, va='bottom', color=tap_color_annot, fontsize=fontsize) axes2.set_xlim( tmarks[0] - dur*0.1, tmarks[1] + dur*0.1) scale_string = None if target.misfit_config.domain == 'cc_max_norm': scale_string = 'Syn/obs scales differ!' infos = [] if scale_string: infos.append(scale_string) if self.nx == 1 and self.ny == 1: infos.append(target.string_id()) else: infos.append('.'.join(x for x in target.codes if x)) dist = source.distance_to(target) azi = source.azibazi_to(target)[0] infos.append(meta.str_dist(dist)) infos.append(u'%.0f\u00B0' % azi) axes2.annotate( '\n'.join(infos), xy=(0., 1.), xycoords='axes fraction', xytext=(2., 2.), textcoords='offset points', ha='left', va='top', fontsize=fontsize, fontstyle='normal') if (self.nx == 1 and self.ny == 1): yield item, fig del figures[iyy, ixx] if not (self.nx == 1 and self.ny == 1): for (iyy, ixx), (_, fig) in figures.items(): title = '.'.join(x for x in cg if x) if len(figures) > 1: title += ' (%i/%i, %i/%i)' % (iyy+1, nyy, ixx+1, nxx) fig.suptitle(title, fontsize=fontsize_title) for item, fig in figures.values(): yield item, fig
class NoiseAnalyserResult(AnalyserResult): weight = Float.T( help='The inverse of the pre-event data variance or standard ' 'deviation. If traces were checked for other event phase ' 'arrivals, the weight can be zero for contaminated traces.')
class DataGeneratorBase(Object): '''This is the base class for all generators. This class to dump and load data to/from all subclasses into TFRecordDatasets. ''' fn_tfrecord = String.T(optional=True) noise = Noise.T(optional=True, help='Add noise to feature') station_dropout_rate = Float.T(default=0., help='Rate by which to mask all channels of station') station_dropout_distribution = Bool.T(default=True, help='If *true*, station dropout will be drawn from a uniform ' 'distribution limited by this station_dropout.') nmax = Int.T(optional=True) labeled = Bool.T(default=True) blacklist = List.T(optional=True, help='List of indices to ignore.') random_seed = Int.T(default=0) def __init__(self, *args, **kwargs): self.config = kwargs.pop('config', None) super().__init__(**kwargs) self.blacklist = set() if not self.blacklist else set(self.blacklist) self.n_classes = self.config.n_classes self.evolution = 0 def normalize_label(self, label): if self.labeled: return self.config.normalize_label(label) return label def set_config(self, pinky_config): self.config = pinky_config self.setup() def setup(self): ... def reset(self): self.evolution = 0 @property def tensor_shape(self): return self.config.tensor_shape @property def n_samples(self): return self.config._n_samples @n_samples.setter def n_samples(self, v): self.config._n_samples = v @property @lru_cache(maxsize=1) def nsl_to_indices(self): ''' Returns a dictionary which maps nsl codes to indexing arrays.''' indices = OrderedDict() for nslc, index in self.nslc_to_index.items(): key = nslc[:3] _v = indices.get(key, []) _v.append(index) indices[key] = _v for k in indices.keys(): indices[k] = num.array(indices[k]) return indices @property @lru_cache(maxsize=1) def nsl_indices(self): ''' Returns a 2D array of indices of channels belonging to one station.''' return [v for v in self.nsl_to_indices.values()] @property def nslc_to_index(self): ''' Returns a dictionary which maps nslc codes to trace indices.''' d = OrderedDict() idx = 0 for nslc in self.config.channels: if not util.match_nslc(self.config.blacklist, nslc): d[nslc] = idx idx += 1 return d def reject_blacklisted(self, tr): '''returns `False` if nslc codes of `tr` match any of the blacklisting patters. Otherwise returns `True`''' return not util.match_nslc(self.config.blacklist, tr.nslc_id) def filter_iter(self, iterator): '''Apply *blacklist*ing by example indices :param iterator: producing iterator ''' for i, item in enumerate(iterator): if i not in self.blacklist: yield i, item @property def generate_output_types(self): '''Return data types of features and labels''' return tf.float32, tf.float32 def unpack_examples(self, record_iterator): '''Parse examples stored in TFRecordData to `tf.train.Example`''' for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) chunk = example.features.feature['data'].bytes_list.value[0] label = example.features.feature['label'].bytes_list.value[0] chunk = num.fromstring(chunk, dtype=num.float32) chunk = chunk.reshape((self.config.n_channels, -1)) label = num.fromstring(label, dtype=num.float32) yield chunk, label @property def tstart_data(self): return None def iter_chunked(self, tinc): # if data has been written to tf records: return self.iter_examples_and_labels() def iter_examples_and_labels(self): '''Subclass this method! Yields: feature, label Chunks that are all NAN will be skipped. ''' record_iterator = tf.python_io.tf_record_iterator( path=self.fn_tfrecord) for chunk, label in self.unpack_examples(record_iterator): if all_NAN(chunk): logger.debug('all NAN. skipping...') continue yield chunk, label def generate_chunked(self, tinc=1): '''Takes the output of `iter_examples_and_labels` and applies post processing (see: `process_chunk`). ''' for i, (chunk, label) in self.filter_iter(self.iter_chunked(tinc)): yield self.process_chunk(chunk), self.normalize_label(label) def generate(self, return_gaps=False): '''Takes the output of `iter_examples_and_labels` and applies post processing (see: `process_chunk`). ''' self.evolution += 1 num.random.seed(self.random_seed + self.evolution) for i, (chunk, label) in self.filter_iter( self.iter_examples_and_labels()): yield self.process_chunk(chunk, return_gaps=return_gaps), self.normalize_label(label) def extract_labels(self): '''Overwrite this method!''' if not self.labeled: return UNLABELED def iter_labels(self): '''Iterate through labels.''' for i, (_, label) in self.filter_iter( self.iter_examples_and_labels()): yield label @property def text_labels(self): '''Returns a list of strings to identify the labels. Overwrite this method for more meaningfull identifiers.''' return ['%i' % (i) for i, d in self.filter_iter(self.iter_examples_and_labels())] def gaps(self): '''Returns a list containing the gaps of each example''' gaps = [] for (_, gap), _ in self.generate(return_gaps=True): gaps.append(gap) return gaps def snrs(self, split_factor): snrs = [] for chunk, _ in self.generate(): snrs.append(snr(chunk, split_factor)) return snrs @property def output_shapes(self): return (self.config.output_shapes) def get_dataset(self): return tf.data.Dataset.from_generator( self.generate, self.generate_output_types, output_shapes=self.output_shapes) def get_chunked_dataset(self, tinc=1.): gen = partial(self.generate_chunked, tinc=tinc) return tf.data.Dataset.from_generator( gen, self.generate_output_types, output_shapes=self.output_shapes) def get_raw_data_chunk(self, shape): '''Return an array of size (Nchannels x Nsamples_max) filled with NANs.''' empty_array = num.empty(shape, dtype=num.float32) empty_array.fill(num.nan) return empty_array def pack_examples(self): '''Serialize Examples to strings.''' for ydata, label in self.iter_examples_and_labels(): yield tf.train.Example( features=tf.train.Features( feature={ 'data': _BytesFeature(ydata.tobytes()), 'label': _BytesFeature(num.array( label, dtype=num.float32).tobytes()), })) def mask(self, chunk, rate): '''For data augmentation: Mask traces in chunks with NaNs. NaNs will be filled by the imputation method provided by the config file. :param rate: probability with which traces are NaN-ed ''' # print(rate) indices = self.nsl_indices a = num.random.random(len(indices)) i = num.where(a < rate)[0] for ii in i: chunk[indices[ii], :] = num.nan def random_trim(self, chunk, margin): '''For data augmentation: Randomly trim examples in time domain with *margin* seconds.''' sample_margin = int(margin / self.config.effective_deltat) nstart = num.random.randint(low=0, high=sample_margin) _, n_samples = self.config.tensor_shape nstop = nstart + n_samples chunk[:, :nstart] = 0. chunk[:, nstop:] = 0. def process_chunk(self, chunk, return_gaps=False): '''Performs preprocessing of data chunks.''' if self.config.t_translation_max: self.random_trim(chunk, self.config.t_translation_max) # add noise if self.noise: self.noise(chunk) # apply normalization self.config.normalization(chunk) # apply station dropout if self.station_dropout_rate: if self.station_dropout_distribution: self.mask(chunk, num.random.uniform( high=self.station_dropout_rate)) else: self.mask(chunk, self.station_dropout_rate) # fill gaps if self.config.imputation: gaps = num.isnan(chunk) chunk[gaps] = self.config.imputation(chunk) if not return_gaps: return chunk else: return chunk, gaps def write(self, directory): '''Write example data to TFRecordDataset using `self.writer`.''' logger.debug('writing TFRecordDataset: %s' % directory) writer = tf.python_io.TFRecordWriter(directory) for ex in self.pack_examples(): writer.write(ex.SerializeToString()) def cleanup(self): '''Remove remaining folders''' delete_if_exists(self.fn_tfrecord)
class Noise(ChunkOperation): level = Float.T(default=1., optional=True) def __call__(self, chunk): pass
class SatelliteTargetDisplacement(PlotConfig): ''' Maps showing surface displacements from satellite and modelled data ''' name = 'satellite' dpi = Int.T(default=250) size_cm = Tuple.T(2, Float.T(), default=(22., 12.)) colormap = String.T(default='RdBu', help='Colormap for the surface displacements') relative_coordinates = Bool.T( default=False, help='Show relative coordinates, initial location centered at 0N, 0E') def make(self, environ): cm = environ.get_plot_collection_manager() history = environ.get_history(subset='harvest') optimiser = environ.get_optimiser() ds = environ.get_dataset() environ.setup_modelling() cm.create_group_mpl(self, self.draw_static_fits(ds, history, optimiser), title=u'InSAR Displacements', section='fits', feather_icon='navigation', description=u''' Maps showing subsampled surface displacements as observed, modelled and the residual (observed minus modelled). The displacement values predicted by the orbit-ambiguity ramps are added to the modelled displacements (middle panels). The color shows the LOS displacement values associated with, and the extent of, every quadtree box. The light grey dots show the focal point of pixels combined in the quadtree box. This point corresponds to the position of the modelled data point. The large dark grey dot shows the reference source position. The grey filled box shows the surface projection of the modelled source, with the thick-lined edge marking the upper fault edge. Complete data extent is shown. ''') def draw_static_fits(self, ds, history, optimiser, closeup=False): from pyrocko.orthodrome import latlon_to_ne_numpy problem = history.problem sat_targets = problem.satellite_targets for target in sat_targets: target.set_dataset(ds) source = history.get_best_source() best_model = history.get_best_model() results = problem.evaluate(best_model, targets=sat_targets) def initAxes(ax, scene, title, last_axes=False): ax.set_title(title) ax.tick_params(length=2) if scene.frame.isMeter(): ax.set_xlabel('Easting [km]') scale_x = {'scale': 1. / km} scale_y = {'scale': 1. / km} if not self.relative_coordinates: import utm utm_E, utm_N, utm_zone, utm_zone_letter =\ utm.from_latlon(source.effective_lat, source.effective_lon) scale_x['offset'] = utm_E scale_y['offset'] = utm_N if last_axes: ax.text(0.975, 0.025, 'UTM Zone %d%s' % (utm_zone, utm_zone_letter), va='bottom', ha='right', fontsize=8, alpha=.7, transform=ax.transAxes) ax.set_aspect('equal') elif scene.frame.isDegree(): ax.set_xlabel('Lon [°]') scale_x = {'scale': 1.} scale_y = {'scale': 1.} if not self.relative_coordinates: scale_x['offset'] = source.effective_lon scale_y['offset'] = source.effective_lat ax.set_aspect(1. / num.cos(source.effective_lat * d2r)) scale_axes(ax.get_xaxis(), **scale_x) scale_axes(ax.get_yaxis(), **scale_y) def drawSource(ax, scene): if scene.frame.isMeter(): fn, fe = source.outline(cs='xy').T fn -= fn.mean() fe -= fe.mean() elif scene.frame.isDegree(): fn, fe = source.outline(cs='latlon').T fn -= source.effective_lat fe -= source.effective_lon # source is centered ax.scatter(0., 0., color='black', s=3, alpha=.5, marker='o') ax.fill(fe, fn, edgecolor=(0., 0., 0.), facecolor=(.5, .5, .5), alpha=0.7) ax.plot(fe[0:2], fn[0:2], 'k', linewidth=1.3) def mapDisplacementGrid(displacements, scene): arr = num.full_like(scene.displacement, fill_value=num.nan) qt = scene.quadtree for syn_v, l in zip(displacements, qt.leaves): arr[l._slice_rows, l._slice_cols] = syn_v arr[scene.displacement_mask] = num.nan return arr def drawLeaves(ax, scene, offset_e=0., offset_n=0.): rects = scene.quadtree.getMPLRectangles() for r in rects: r.set_edgecolor((.4, .4, .4)) r.set_linewidth(.5) r.set_facecolor('none') r.set_x(r.get_x() - offset_e) r.set_y(r.get_y() - offset_n) map(ax.add_artist, rects) ax.scatter(scene.quadtree.leaf_coordinates[:, 0] - offset_e, scene.quadtree.leaf_coordinates[:, 1] - offset_n, s=.25, c='black', alpha=.1) def addArrow(ax, scene): phi = num.nanmean(scene.phi) los_dx = num.cos(phi + num.pi) * .0625 los_dy = num.sin(phi + num.pi) * .0625 az_dx = num.cos(phi - num.pi / 2) * .125 az_dy = num.sin(phi - num.pi / 2) * .125 anchor_x = .9 if los_dx < 0 else .1 anchor_y = .85 if los_dx < 0 else .975 az_arrow = patches.FancyArrow(x=anchor_x - az_dx, y=anchor_y - az_dy, dx=az_dx, dy=az_dy, head_width=.025, alpha=.5, fc='k', head_starts_at_zero=False, length_includes_head=True, transform=ax.transAxes) los_arrow = patches.FancyArrow(x=anchor_x - az_dx / 2, y=anchor_y - az_dy / 2, dx=los_dx, dy=los_dy, head_width=.02, alpha=.5, fc='k', head_starts_at_zero=False, length_includes_head=True, transform=ax.transAxes) ax.add_artist(az_arrow) ax.add_artist(los_arrow) urE, urN, llE, llN = (0., 0., 0., 0.) for target in sat_targets: if target.scene.frame.isMeter(): off_n, off_e = map( float, latlon_to_ne_numpy(target.scene.frame.llLat, target.scene.frame.llLon, source.effective_lat, source.effective_lon)) if target.scene.frame.isDegree(): off_n = source.effective_lat - target.scene.frame.llLat off_e = source.effective_lon - target.scene.frame.llLon turE, turN, tllE, tllN = zip( *[(l.gridE.max() - off_e, l.gridN.max() - off_n, l.gridE.min() - off_e, l.gridN.min() - off_n) for l in target.scene.quadtree.leaves]) turE, turN = map(max, (turE, turN)) tllE, tllN = map(min, (tllE, tllN)) urE, urN = map(max, ((turE, urE), (urN, turN))) llE, llN = map(min, ((tllE, llE), (llN, tllN))) def generate_plot(sat_target, result, ifig): scene = sat_target.scene fig = plt.figure() fig.set_size_inches(*self.size_inch) gs = gridspec.GridSpec(2, 3, wspace=.15, hspace=.2, left=.1, right=.975, top=.95, height_ratios=[12, 1]) item = PlotItem(name='fig_%i' % ifig, attributes={'targets': [sat_target.path]}, title=u'Satellite Surface Displacements - %s' % scene.meta.scene_title, description=u''' Surface displacements derived from satellite data. (Left) the input data, (center) the modelled data and (right) the model residual. '''.format(meta=scene.meta)) stat_obs = result.statics_obs stat_syn = result.statics_syn['displacement.los'] res = stat_obs - stat_syn if scene.frame.isMeter(): offset_n, offset_e = map( float, latlon_to_ne_numpy(scene.frame.llLat, scene.frame.llLon, source.effective_lat, source.effective_lon)) elif scene.frame.isDegree(): offset_n = source.effective_lat - scene.frame.llLat offset_e = source.effective_lon - scene.frame.llLon im_extent = (scene.frame.E.min() - offset_e, scene.frame.E.max() - offset_e, scene.frame.N.min() - offset_n, scene.frame.N.max() - offset_n) abs_displ = num.abs([ stat_obs.min(), stat_obs.max(), stat_syn.min(), stat_syn.max(), res.min(), res.max() ]).max() cmw = cm.ScalarMappable(cmap=self.colormap) cmw.set_clim(vmin=-abs_displ, vmax=abs_displ) cmw.set_array(stat_obs) axes = [ fig.add_subplot(gs[0, 0]), fig.add_subplot(gs[0, 1]), fig.add_subplot(gs[0, 2]) ] ax = axes[0] ax.imshow(mapDisplacementGrid(stat_obs, scene), extent=im_extent, cmap=self.colormap, vmin=-abs_displ, vmax=abs_displ, origin='lower') drawLeaves(ax, scene, offset_e, offset_n) drawSource(ax, scene) addArrow(ax, scene) initAxes(ax, scene, 'Observed') ax.text(.025, .025, 'Scene ID: %s' % scene.meta.scene_id, fontsize=8, alpha=.7, va='bottom', transform=ax.transAxes) if scene.frame.isDegree(): ax.set_ylabel('Lat [°]') elif scene.frame.isMeter(): ax.set_ylabel('Northing [km]') ax = axes[1] ax.imshow(mapDisplacementGrid(stat_syn, scene), extent=im_extent, cmap=self.colormap, vmin=-abs_displ, vmax=abs_displ, origin='lower') drawLeaves(ax, scene, offset_e, offset_n) drawSource(ax, scene) addArrow(ax, scene) initAxes(ax, scene, 'Model') ax.get_yaxis().set_visible(False) ax = axes[2] ax.imshow(mapDisplacementGrid(res, scene), extent=im_extent, cmap=self.colormap, vmin=-abs_displ, vmax=abs_displ, origin='lower') drawLeaves(ax, scene, offset_e, offset_n) drawSource(ax, scene) addArrow(ax, scene) initAxes(ax, scene, 'Residual', last_axes=True) ax.get_yaxis().set_visible(False) for ax in axes: ax.set_xlim(llE, urE) ax.set_ylim(llN, urN) if closeup: if scene.frame.isMeter(): fn, fe = source.outline(cs='xy').T elif scene.frame.isDegree(): fn, fe = source.outline(cs='latlon').T fn -= source.effective_lat fe -= source.effective_lon if fn.size > 1: off_n = (fn[0] + fn[1]) / 2 off_e = (fe[0] + fe[1]) / 2 else: off_n = fn[0] off_e = fe[0] fault_size = 2 * num.sqrt( max(abs(fn - off_n))**2 + max(abs(fe - off_e))**2) fault_size *= self.map_scale if fault_size == 0.0: extent = (scene.frame.N[-1] + scene.frame.E[-1]) / 2 fault_size = extent * .25 for ax in axes: ax.set_xlim(-fault_size / 2 + off_e, fault_size / 2 + off_e) ax.set_ylim(-fault_size / 2 + off_n, fault_size / 2 + off_n) cax = fig.add_subplot(gs[1, :]) cbar = fig.colorbar(cmw, cax=cax, orientation='horizontal', use_gridspec=True) cbar.set_label('LOS Displacement [m]') return (item, fig) for ifig, (sat_target, result) in enumerate(zip(sat_targets, results)): yield generate_plot(sat_target, result, ifig)
class MTSourceWithMagnitude(gf.SourceWithMagnitude): ''' A moment tensor point source. ''' discretized_source_class = meta.DiscretizedMTSource mnn = Float.T( default=1., help='north-north component of moment tensor') mee = Float.T( default=1., help='east-east component of moment tensor') mdd = Float.T( default=1., help='down-down component of moment tensor') mne = Float.T( default=0., help='north-east component of moment tensor') mnd = Float.T( default=0., help='north-down component of moment tensor') med = Float.T( default=0., help='east-down component of moment tensor') def __init__(self, **kwargs): if 'm6' in kwargs: for (k, v) in zip('mnn mee mdd mne mnd med'.split(), kwargs.pop('m6')): kwargs[k] = float(v) Source.__init__(self, **kwargs) @property def m6(self): return num.array(self.m6_astuple) @property def scaled_m6(self): m9 = mtm.symmat6(*self.m6) m0_unscaled = math.sqrt(num.sum(m9.A ** 2)) / math.sqrt(2.) m9 /= m0_unscaled m6 = mtm.to6(m9) return m6 @property def scaled_m6_dict(self): keys = ['mnn', 'mee', 'mdd', 'mne', 'mnd', 'med'] return {k: m for k, m in zip(keys, self.scaled_m6.tolist())} @property def m6_astuple(self): return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) @m6.setter def m6(self, value): self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value def base_key(self): return Source.base_key(self) + self.m6_astuple def discretize_basesource(self, store, target=None): times, amplitudes = self.effective_stf_pre().discretize_t( store.config.deltat, 0.0) m0 = mtm.magnitude_to_moment(self.magnitude) m6s = self.scaled_m6 * m0 return meta.DiscretizedMTSource( m6s=m6s[num.newaxis, :] * amplitudes[:, num.newaxis], **self._dparams_base_repeated(times)) def pyrocko_moment_tensor(self): return mtm.MomentTensor(m=mtm.symmat6(*self.m6_astuple) * self.moment) def pyrocko_event(self, **kwargs): mt = self.pyrocko_moment_tensor() return Source.pyrocko_event( self, moment_tensor=self.pyrocko_moment_tensor(), magnitude=float(mt.moment_magnitude()), **kwargs) @classmethod def from_pyrocko_event(cls, ev, **kwargs): d = {} mt = ev.moment_tensor if mt: d.update(m6=map(float, mt.m6())) d.update(kwargs) return super(MTSourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
class B(Object): a_list = List.T(A.T()) a_tuple = Tuple.T(3, A.T()) a_dict = Dict.T(Int.T(), A.T()) b = Float.T()
class A(Object): d = Dict.T(Int.T(), Float.T())
class Duration(Object): unit = String.T(optional=True, xmlstyle='attribute') uncertainty = Float.T(optional=True) value = Float.T(optional=True, xmlstyle='content') xmltagname = 'duration'
class RectangularProblem(Problem): # nucleation_x # nucleation_y # time # stf problem_parameters = [ Parameter('east_shift', 'm', label='Easting', **as_km), Parameter('north_shift', 'm', label='Northing', **as_km), Parameter('depth', 'm', label='Depth', **as_km), Parameter('length', 'm', label='Length', **as_km), Parameter('width', 'm', label='Width', **as_km), Parameter('slip', 'm', label='Slip'), Parameter('strike', 'deg', label='Strike'), Parameter('dip', 'deg', label='Dip'), Parameter('rake', 'deg', label='Rake') ] problem_waveform_parameters = [ Parameter('nucleation_x', 'offset', label='Nucleation X'), Parameter('nucleation_y', 'offset', label='Nucleation Y'), Parameter('time', 's', label='Time'), ] dependants = [] distance_min = Float.T(default=0.0) def pack(self, source): arr = self.get_parameter_array(source) for ip, p in enumerate(self.parameters): if p.name == 'time': arr[ip] -= self.base_source.time return arr def get_source(self, x): d = self.get_parameter_dict(x) p = {} for k in self.base_source.keys(): if k in d: p[k] = float(self.ranges[k].make_relative( self.base_source[k], d[k])) source = self.base_source.clone(**p) return source def random_uniform(self, xbounds, rstate): x = num.zeros(self.nparameters) for i in range(self.nparameters): x[i] = rstate.uniform(xbounds[i, 0], xbounds[i, 1]) return x def preconstrain(self, x): # source = self.get_source(x) # if any(self.distance_min > source.distance_to(t) # for t in self.targets): # raise Forbidden() return x @classmethod def get_plot_classes(cls): plots = super(RectangularProblem, cls).get_plot_classes() return plots
class GNSSStation(Location): ''' Representation of a GNSS station during a campaign measurement For more information see http://kb.unavco.org/kb/assets/660/UNAVCO_Campaign_GPS_GNSS_Handbook.pdf ''' code = String.T(help='Four letter station code', optional=True) style = StringChoice.T(choices=['static', 'rapid_static', 'kinematic'], default='static') survey_start = DateTimestamp.T(optional=True, help='Survey start time') survey_end = DateTimestamp.T(optional=True, help='Survey end time') correlation_ne = Float.T(default=0., help='North-East component correlation') correlation_eu = Float.T(default=0., help='East-Up component correlation') correlation_nu = Float.T(default=0., help='North-Up component correlation') north = GNSSComponent.T(optional=True) east = GNSSComponent.T(optional=True) up = GNSSComponent.T(optional=True) def __eq__(self, other): try: return self.code == other.code except AttributeError: return False def get_covariance_matrix(self): components = self.components.values() ncomponents = self.ncomponents covar = num.zeros((ncomponents, ncomponents)) for ic1, comp1 in enumerate(components): for ic2, comp2 in enumerate(components): corr = self._get_comp_correlation(comp1, comp2) covar[ic1, ic2] = corr * comp1.sigma * comp2.sigma # This floating point operation is inaccurate: # corr * comp1.sigma * comp2.sigma != corr * comp2.sigma * comp1.sigma # # Hence this identity covar[num.tril_indices_from(covar, k=-1)] = \ covar[num.triu_indices_from(covar, k=1)] return covar def get_correlation_matrix(self): components = self.components.values() ncomponents = self.ncomponents corr = num.zeros((ncomponents, ncomponents)) corr[num.diag_indices_from(corr)] = num.array( [c.sigma for c in components]) for ic1, comp1 in enumerate(components): for ic2, comp2 in enumerate(components): if comp1 is comp2: continue corr[ic1, ic2] = self._get_comp_correlation(comp1, comp2) # See comment at get_covariance_matrix corr[num.tril_indices_from(corr, k=-1)] = \ corr[num.triu_indices_from(corr, k=1)] return corr def get_displacement_data(self): return num.array([c.shift for c in self.components.values()]) def get_component_mask(self): return num.array([ False if self.__getattribute__(name) is None else True for name in ('north', 'east', 'up') ], dtype=num.bool) @property def components(self): return OrderedDict([(name, self.__getattribute__(name)) for name in ('north', 'east', 'up') if self.__getattribute__(name) is not None]) @property def ncomponents(self): return len(self.components) def _get_comp_correlation(self, comp1, comp2): if comp1 is comp2: return 1. s = self correlation_map = { (s.north, s.east): s.correlation_ne, (s.east, s.up): s.correlation_eu, (s.north, s.up): s.correlation_nu } return correlation_map.get((comp1, comp2), correlation_map.get((comp2, comp1), False))
class Event(Object): '''Seismic event representation :param lat: latitude of hypocenter (default 0.0) :param lon: longitude of hypocenter (default 0.0) :param time: origin time as float in seconds after '1970-01-01 00:00:00 :param name: event identifier as string (optional) :param depth: source depth (optional) :param magnitude: magnitude of event (optional) :param region: source region (optional) :param catalog: name of catalog that lists this event (optional) :param moment_tensor: moment tensor as :py:class:`moment_tensor.MomentTensor` instance (optional) :param duration: source duration as float (optional) ''' lat = Float.T(default=0.0) lon = Float.T(default=0.0) time = Timestamp.T(default=util.str_to_time('1970-01-01 00:00:00')) name = String.T(default='', optional=True) depth = Float.T(optional=True) magnitude = Float.T(optional=True) magnitude_type = String.T(optional=True) region = Unicode.T(optional=True) catalog = String.T(optional=True) moment_tensor = moment_tensor.MomentTensor.T(optional=True) duration = Float.T(optional=True) def __init__(self, lat=0., lon=0., time=0., name='', depth=None, magnitude=None, magnitude_type=None, region=None, load=None, loadf=None, catalog=None, moment_tensor=None, duration=None): vals = None if load is not None: vals = Event.oldload(load) elif loadf is not None: vals = Event.oldloadf(loadf) if vals: lat, lon, time, name, depth, magnitude, magnitude_type, region, \ catalog, moment_tensor, duration = vals Object.__init__(self, lat=lat, lon=lon, time=time, name=name, depth=depth, magnitude=magnitude, magnitude_type=magnitude_type, region=region, catalog=catalog, moment_tensor=moment_tensor, duration=duration) def time_as_string(self): return util.time_to_str(self.time) def set_name(self, name): self.name = name def olddump(self, filename): file = open(filename, 'w') self.olddumpf(file) file.close() def olddumpf(self, file): file.write('name = %s\n' % self.name) file.write('time = %s\n' % util.time_to_str(self.time)) if self.lat is not None: file.write('latitude = %.12g\n' % self.lat) if self.lon is not None: file.write('longitude = %.12g\n' % self.lon) if self.magnitude is not None: file.write('magnitude = %g\n' % self.magnitude) file.write('moment = %g\n' % moment_tensor.magnitude_to_moment(self.magnitude)) if self.magnitude_type is not None: file.write('magnitude_type = %s\n' % self.magnitude_type) if self.depth is not None: file.write('depth = %.10g\n' % self.depth) if self.region is not None: file.write('region = %s\n' % self.region) if self.catalog is not None: file.write('catalog = %s\n' % self.catalog) if self.moment_tensor is not None: m = self.moment_tensor.m() sdr1, sdr2 = self.moment_tensor.both_strike_dip_rake() file.write( ('mnn = %g\nmee = %g\nmdd = %g\nmne = %g\nmnd = %g\nmed = %g\n' 'strike1 = %g\ndip1 = %g\nrake1 = %g\n' 'strike2 = %g\ndip2 = %g\nrake2 = %g\n') % ((m[0, 0], m[1, 1], m[2, 2], m[0, 1], m[0, 2], m[1, 2]) + sdr1 + sdr2)) if self.duration is not None: file.write('duration = %g\n' % self.duration) @staticmethod def unique(events, deltat=10., group_cmp=(lambda a, b: cmp(a.catalog, b.catalog))): groups = Event.grouped(events, deltat) events = [] for group in groups: if group: group.sort(group_cmp) events.append(group[-1]) return events @staticmethod def grouped(events, deltat=10.): events = list(events) groups = [] for ia, a in enumerate(events): groups.append([]) haveit = False for ib, b in enumerate(events[:ia]): if abs(b.time - a.time) < deltat: groups[ib].append(a) haveit = True break if not haveit: groups[ia].append(a) groups = [g for g in groups if g] groups.sort(key=lambda g: sum(e.time for e in g) // len(g)) return groups @staticmethod def dump_catalog(events, filename=None, stream=None): if filename is not None: file = open(filename, 'w') else: file = stream try: i = 0 for ev in events: ev.olddumpf(file) file.write('--------------------------------------------\n') i += 1 finally: if filename is not None: file.close() @staticmethod def oldload(filename): with open(filename, 'r') as file: return Event.oldloadf(file) @staticmethod def oldloadf(file): d = {} try: for line in file: if line.lstrip().startswith('#'): continue toks = line.split(' = ', 1) if len(toks) == 2: k, v = toks[0].strip(), toks[1].strip() if k in ('name', 'region', 'catalog', 'magnitude_type'): d[k] = v if k in (('latitude longitude magnitude depth duration ' 'mnn mee mdd mne mnd med strike1 dip1 rake1 ' 'strike2 dip2 rake2 duration').split()): d[k] = float(v) if k == 'time': d[k] = util.str_to_time(v) if line.startswith('---'): d['have_separator'] = True break except Exception as e: raise FileParseError(e) if not d: raise EOF() if 'have_separator' in d and len(d) == 1: raise EmptyEvent() mt = None m6 = [d[x] for x in 'mnn mee mdd mne mnd med'.split() if x in d] if len(m6) == 6: mt = moment_tensor.MomentTensor(m=moment_tensor.symmat6(*m6)) else: sdr = [d[x] for x in 'strike1 dip1 rake1'.split() if x in d] if len(sdr) == 3: moment = 1.0 if 'moment' in d: moment = d['moment'] elif 'magnitude' in d: moment = moment_tensor.magnitude_to_moment(d['magnitude']) mt = moment_tensor.MomentTensor(strike=sdr[0], dip=sdr[1], rake=sdr[2], scalar_moment=moment) return (d.get('latitude', 0.0), d.get('longitude', 0.0), d.get('time', 0.0), d.get('name', ''), d.get('depth', None), d.get('magnitude', None), d.get('magnitude_type', None), d.get('region', None), d.get('catalog', None), mt, d.get('duration', None)) @staticmethod def load_catalog(filename): file = open(filename, 'r') try: while True: try: ev = Event(loadf=file) yield ev except EmptyEvent: pass except EOF: pass file.close() def get_hash(self): e = self if isinstance(e.time, util.hpfloat): stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.6FRAC') else: stime = util.time_to_str(e.time, format='%Y-%m-%d %H:%M:%S.3FRAC') s = float_or_none_to_str return ehash(', '.join( (stime, s(e.lat), s(e.lon), s(e.depth), s(e.magnitude), str(e.catalog), str(e.name), str(e.region)))) def human_str(self): s = [ 'Latitude [deg]: %g' % self.lat, 'Longitude [deg]: %g' % self.lon, 'Time [UTC]: %s' % util.time_to_str(self.time) ] if self.name: s.append('Name: %s' % self.name) if self.depth is not None: s.append('Depth [km]: %g' % (self.depth / 1000.)) if self.magnitude is not None: s.append('Magnitude [%s]: %3.1f' % (self.magnitude_type or 'M?', self.magnitude)) if self.region: s.append('Region: %s' % self.region) if self.catalog: s.append('Catalog: %s' % self.catalog) if self.moment_tensor: s.append(str(self.moment_tensor)) return '\n'.join(s)
class CheckWaveformsPlot(PlotConfig): ''' Plot for checking the waveforms fit with a number of synthetics ''' name = 'check_waveform' size_cm = Tuple.T( 2, Float.T(), default=(9., 7.5), help='width and length of the figure in cm') n_random_synthetics = Int.T( default=10, help='Number of Synthetics to generate') def make(self, environ): cm = environ.get_plot_collection_manager() mpl_init(fontsize=self.font_size) environ.setup_modelling() problem = environ.get_problem() results_list = [] sources = [] if self.n_random_synthetics == 0: x = problem.preconstrain(problem.get_reference_model()) sources.append(problem.base_source) results = problem.evaluate(x) results_list.append(results) else: for _ in range(self.n_random_synthetics): x = problem.get_random_model() sources.append(problem.get_source(x)) results = problem.evaluate(x) results_list.append(results) cm.create_group_mpl(self, self.draw_figures( sources, problem.targets, results_list), title=u'Waveform Check', section='checks', feather_icon='activity', description=u''' Plot to judge waveform time window settings and source model parameter ranges. For each waveform target, observed and synthetic waveforms are shown. For the latter, models are randomly drawn from the configured parameter search space. The top panel shows the observed waveform; filtered (faint gray), and filtered and tapered (black). The colored outline around the observed trace shows the taper position for each drawn model in a different color. The middle panel shows the filtered synthetic waveforms of the drawn models and the bottom plot shows the corresponding filtered and tapered synthetic waveforms. The colors of taper and synthetic traces are consistent for each random model. The given time is relative to the reference event origin time. ''') def draw_figures(self, sources, targets, results_list): results_list = list(zip(*results_list)) for itarget, target, results in zip( range(len(targets)), targets, results_list): if isinstance(target, WaveformMisfitTarget) and results: item = PlotItem(name='t%i' % itarget) item.attributes['targets'] = [target.string_id()] fig = self.draw_figure(sources, target, results) if fig is not None: yield item, fig def draw_figure(self, sources, target, results): t0_mean = num.mean([s.time for s in sources]) # distances = [ # s.distance_to(target) for s in sources] # distance_min = num.min(distances) # distance_max = num.max(distances) yabsmaxs = [] for result in results: if isinstance(result, WaveformMisfitResult): yabsmaxs.append( num.max(num.abs( result.filtered_obs.get_ydata()))) if yabsmaxs: yabsmax = max(yabsmaxs) or 1.0 else: yabsmax = None fontsize = self.font_size fig = plt.figure(figsize=self.size_inch) labelpos = plot.mpl_margins( fig, nw=1, nh=1, w=1., h=5., units=fontsize) axes = fig.add_subplot(1, 1, 1) labelpos(axes, 2.5, 2.0) axes.set_frame_on(False) axes.set_ylim(1., 4.) axes.get_yaxis().set_visible(False) axes.set_title('%s' % target.string_id()) axes.set_xlabel('Time [s]') ii = 0 for source, result in zip(sources, results): if not isinstance(result, WaveformMisfitResult): continue if result.tobs_shift != 0.0: t0 = result.tsyn_pick else: t0 = t0_mean t = result.filtered_obs.get_xdata() ydata = result.filtered_obs.get_ydata() / yabsmax axes.plot( t-t0, ydata*0.40 + 3.5, color='black', lw=1.0) color = plot.mpl_graph_color(ii) t = result.filtered_syn.get_xdata() ydata = result.filtered_syn.get_ydata() ydata = ydata / (num.max(num.abs(ydata)) or 1.0) axes.plot(t-t0, ydata*0.47 + 2.5, color=color, alpha=0.5, lw=1.0) t = result.processed_syn.get_xdata() ydata = result.processed_syn.get_ydata() ydata = ydata / (num.max(num.abs(ydata)) or 1.0) axes.plot(t-t0, ydata*0.47 + 1.5, color=color, alpha=0.5, lw=1.0) if result.tobs_shift != 0.0: axes.axvline( result.tsyn_pick - t0, color=(0.7, 0.7, 0.7), zorder=2) t = result.processed_syn.get_xdata() taper = result.taper y = num.ones(t.size) * 0.9 taper(y, t[0], t[1] - t[0]) y2 = num.concatenate((y, -y[::-1])) t2 = num.concatenate((t, t[::-1])) axes.plot(t2-t0, y2 * 0.47 + 3.5, color=color, alpha=0.2, lw=1.0) ii += 1 return fig
class Parameter(Object): name__ = String.T() unit = Unicode.T(optional=True) scale_factor = Float.T(default=1., optional=True) scale_unit = Unicode.T(optional=True) label = Unicode.T(optional=True) optional = Bool.T(default=True, optional=True) def __init__(self, *args, **kwargs): if len(args) >= 1: kwargs['name'] = args[0] if len(args) >= 2: kwargs['unit'] = newstr(args[1]) self.groups = [None] self._name = None Object.__init__(self, **kwargs) def get_label(self, with_unit=True): lbl = [self.label or self.name] if with_unit: unit = self.get_unit_label() if unit: lbl.append('[%s]' % unit) return ' '.join(lbl) def set_groups(self, groups): if not isinstance(groups, list): raise AttributeError('Groups must be a list of strings.') self.groups = groups def _get_name(self): if None not in self.groups: return '%s.%s' % ('.'.join(self.groups), self._name) return self._name def _set_name(self, value): self._name = value name = property(_get_name, _set_name) @property def name_nogroups(self): return self._name def get_value_label(self, value, format='%(value)g%(unit)s'): value = self.scaled(value) unit = self.get_unit_suffix() return format % dict(value=value, unit=unit) def get_unit_label(self): if self.scale_unit is not None: return self.scale_unit elif self.unit: return self.unit else: return None def get_unit_suffix(self): unit = self.get_unit_label() if not unit: return '' else: return ' %s' % unit def scaled(self, x): if isinstance(x, tuple): return tuple(v / self.scale_factor for v in x) if isinstance(x, list): return list(v / self.scale_factor for v in x) else: return x / self.scale_factor def inv_scaled(self, x): if isinstance(x, tuple): return tuple(v * self.scale_factor for v in x) if isinstance(x, list): return list(v * self.scale_factor for v in x) else: return x * self.scale_factor
class FitsWaveformPlot(PlotConfig): ''' Plot showing the waveform fits for the best model ''' name = 'fits_waveform' size_cm = Tuple.T( 2, Float.T(), default=(9., 5.), help='width and length of the figure in cm') nx = Int.T( default=1, help='horizontal number of subplots on every page') ny = Int.T( default=1, help='vertical number of subplots on every page') font_size = Float.T( default=8, help='Font Size of all fonts, except title') font_size_title = Float.T( default=10, help='Font Size of title') def make(self, environ): cm = environ.get_plot_collection_manager() mpl_init(fontsize=self.font_size) environ.setup_modelling() ds = environ.get_dataset() optimiser = environ.get_optimiser() environ.setup_modelling() history = environ.get_history(subset='harvest') cm.create_group_mpl( self, self.draw_figures(ds, history, optimiser), title=u'Waveform fits for best model', section='fits', feather_icon='activity', description=u''' Plot showing observed and synthetic waveform (attributes) for the best fitting model. Best model's waveform fits for all targets. Depending on the target configurations different types of comparisons are possible: (i) time domain waveform differences, (ii) amplitude spectra, (iii) envelopes, (iv) cross correlation functions. Each waveform plot gives a number of details: 1) Target information (left side, from top to bottom) gives station name with component, distance to source, azimuth of station with respect to source, target weight, target misfit and starting time of the waveform relative to the origin time. 2) The background gray area shows the applied taper function. 3) The waveforms shown are: the restituted and filtered observed trace without tapering (light grey) and the same trace with tapering and processing (dark gray), the synthetic trace (light red) and the filtered, tapered and (if enabled) shifted and processed synthetic target trace (red). The traces are scaled according to the target weight (small weight, small amplitude) and normed relative to the maximum amplitude of the targets of the corresponding normalisation family. 4) The bottom panel shows, depending on the type of comparison, sample-wise residuals for time domain comparisons (red filled), spectra of observed and synthetic traces for amplitude spectrum comparisons, or cross correlation traces. 5) Colored boxes on the upper right show the relative weight of the target within the entire dataset of the optimisation (top box, orange) and the relative misfit contribution to the global misfit of the optimisation (bottom box, red). ''') def draw_figures(self, ds, history, optimiser): fontsize = self.font_size fontsize_title = self.font_size_title nxmax = self.nx nymax = self.ny problem = history.problem for target in problem.targets: target.set_dataset(ds) target_index = {} i = 0 for target in problem.targets: target_index[target] = i, i+target.nmisfits i += target.nmisfits xbest = history.get_best_model() misfits = history.misfits[history.get_sorted_misfits_idx(chain=0), ...] ws = problem.get_target_weights() gcms = problem.combine_misfits( misfits[:1, :, :], extra_correlated_weights=optimiser.get_correlated_weights(problem), get_contributions=True)[0, :] w_max = num.nanmax(ws) gcm_max = num.nanmax(gcms) source = problem.get_source(xbest) target_to_result = {} all_syn_trs = [] all_syn_specs = [] results = problem.evaluate(xbest) dtraces = [] for target, result in zip(problem.targets, results): if not isinstance(result, WaveformMisfitResult): dtraces.extend([None] * target.nmisfits) continue itarget, itarget_end = target_index[target] assert itarget_end == itarget + 1 w = target.get_combined_weight() if target.misfit_config.domain == 'cc_max_norm': tref = ( result.filtered_obs.tmin + result.filtered_obs.tmax) * 0.5 for tr_filt, tr_proc, tshift in ( (result.filtered_obs, result.processed_obs, 0.), (result.filtered_syn, result.processed_syn, result.tshift)): norm = num.sum(num.abs(tr_proc.ydata)) / tr_proc.data_len() tr_filt.ydata /= norm tr_proc.ydata /= norm tr_filt.shift(tshift) tr_proc.shift(tshift) ctr = result.cc ctr.shift(tref) dtrace = ctr else: for tr in ( result.filtered_obs, result.filtered_syn, result.processed_obs, result.processed_syn): tr.ydata *= w for spec in ( result.spectrum_obs, result.spectrum_syn): if spec is not None: spec.ydata *= w if result.tshift is not None and result.tshift != 0.0: # result.filtered_syn.shift(result.tshift) result.processed_syn.shift(result.tshift) dtrace = make_norm_trace( result.processed_syn, result.processed_obs, problem.norm_exponent) target_to_result[target] = result dtrace.meta = dict( normalisation_family=target.normalisation_family, path=target.path) dtraces.append(dtrace) result.processed_syn.meta = dict( normalisation_family=target.normalisation_family, path=target.path) all_syn_trs.append(result.processed_syn) if result.spectrum_syn: result.spectrum_syn.meta = dict( normalisation_family=target.normalisation_family, path=target.path) all_syn_specs.append(result.spectrum_syn) if not all_syn_trs: logger.warn('No traces to show!') return def skey(tr): return tr.meta['normalisation_family'], tr.meta['path'] trace_minmaxs = trace.minmax(all_syn_trs, skey) amp_spec_maxs = amp_spec_max(all_syn_specs, skey) dminmaxs = trace.minmax([x for x in dtraces if x is not None], skey) for tr in dtraces: if tr: dmin, dmax = dminmaxs[skey(tr)] tr.ydata /= max(abs(dmin), abs(dmax)) cg_to_targets = meta.gather( problem.waveform_targets, lambda t: (t.path, t.codes[3]), filter=lambda t: t in target_to_result) cgs = sorted(cg_to_targets.keys()) for cg in cgs: targets = cg_to_targets[cg] frame_to_target, nx, ny, nxx, nyy = layout( source, targets, nxmax, nymax) figures = {} for iy in range(ny): for ix in range(nx): if (iy, ix) not in frame_to_target: continue ixx = ix // nxmax iyy = iy // nymax if (iyy, ixx) not in figures: title = '_'.join(x for x in cg if x) item = PlotItem( name='fig_%s_%i_%i' % (title, ixx, iyy)) item.attributes['targets'] = [] figures[iyy, ixx] = ( item, plt.figure(figsize=self.size_inch)) figures[iyy, ixx][1].subplots_adjust( left=0.03, right=1.0 - 0.03, bottom=0.03, top=1.0 - 0.06, wspace=0.2, hspace=0.2) item, fig = figures[iyy, ixx] target = frame_to_target[iy, ix] item.attributes['targets'].append(target.string_id()) amin, amax = trace_minmaxs[ target.normalisation_family, target.path] absmax = max(abs(amin), abs(amax)) ny_this = nymax # min(ny, nymax) nx_this = nxmax # min(nx, nxmax) i_this = (iy % ny_this) * nx_this + (ix % nx_this) + 1 axes2 = fig.add_subplot(ny_this, nx_this, i_this) space = 0.5 space_factor = 1.0 + space axes2.set_axis_off() axes2.set_ylim(-1.05 * space_factor, 1.05) axes = axes2.twinx() axes.set_axis_off() if target.misfit_config.domain == 'cc_max_norm': axes.set_ylim(-10. * space_factor, 10.) else: axes.set_ylim( -absmax * 1.33 * space_factor, absmax * 1.33) itarget, itarget_end = target_index[target] assert itarget_end == itarget + 1 result = target_to_result[target] dtrace = dtraces[itarget] tap_color_annot = (0.35, 0.35, 0.25) tap_color_edge = (0.85, 0.85, 0.80) tap_color_fill = (0.95, 0.95, 0.90) plot_taper( axes2, result.processed_obs.get_xdata(), result.taper, fc=tap_color_fill, ec=tap_color_edge) obs_color = mpl_color('aluminium5') obs_color_light = light(obs_color, 0.5) syn_color = mpl_color('scarletred2') syn_color_light = light(syn_color, 0.5) misfit_color = mpl_color('scarletred2') weight_color = mpl_color('chocolate2') cc_color = mpl_color('aluminium5') if target.misfit_config.domain == 'cc_max_norm': tref = (result.filtered_obs.tmin + result.filtered_obs.tmax) * 0.5 plot_dtrace( axes2, dtrace, space, -1., 1., fc=light(cc_color, 0.5), ec=cc_color) plot_dtrace_vline( axes2, tref, space, color=tap_color_annot) elif target.misfit_config.domain == 'frequency_domain': asmax = amp_spec_maxs[ target.normalisation_family, target.path] fmin, fmax = \ target.misfit_config.get_full_frequency_range() plot_spectrum( axes2, result.spectrum_syn, result.spectrum_obs, fmin, fmax, space, 0., asmax, syn_color=syn_color, obs_color=obs_color, syn_lw=1.0, obs_lw=0.75, color_vline=tap_color_annot, fontsize=fontsize) else: plot_dtrace( axes2, dtrace, space, 0., 1., fc=light(misfit_color, 0.3), ec=misfit_color) plot_trace( axes, result.filtered_syn, color=syn_color_light, lw=1.0) plot_trace( axes, result.filtered_obs, color=obs_color_light, lw=0.75) plot_trace( axes, result.processed_syn, color=syn_color, lw=1.0) plot_trace( axes, result.processed_obs, color=obs_color, lw=0.75) # xdata = result.filtered_obs.get_xdata() tmarks = [ result.processed_obs.tmin, result.processed_obs.tmax] for tmark in tmarks: axes2.plot( [tmark, tmark], [-0.9, 0.1], color=tap_color_annot) dur = tmarks[1] - tmarks[0] for tmark, text, ha in [ (tmarks[0], '$\\,$ ' + meta.str_duration( tmarks[0] - source.time), 'left'), (tmarks[1], '$\\Delta$ ' + meta.str_duration(dur), 'right')]: axes2.annotate( text, xy=(tmark, -0.9), xycoords='data', xytext=( fontsize * 0.4 * [-1, 1][ha == 'left'], fontsize * 0.2), textcoords='offset points', ha=ha, va='bottom', color=tap_color_annot, fontsize=fontsize) axes2.set_xlim(tmarks[0] - dur*0.1, tmarks[1] + dur*0.1) rel_w = ws[itarget] / w_max rel_c = gcms[itarget] / gcm_max sw = 0.25 sh = 0.1 ph = 0.01 for (ih, rw, facecolor, edgecolor) in [ (0, rel_w, light(weight_color, 0.5), weight_color), (1, rel_c, light(misfit_color, 0.5), misfit_color)]: bar = patches.Rectangle( (1.0 - rw * sw, 1.0 - (ih + 1) * sh + ph), rw * sw, sh - 2 * ph, facecolor=facecolor, edgecolor=edgecolor, zorder=10, transform=axes.transAxes, clip_on=False) axes.add_patch(bar) scale_string = None if target.misfit_config.domain == 'cc_max_norm': scale_string = 'Syn/obs scales differ!' infos = [] if scale_string: infos.append(scale_string) if self.nx == 1 and self.ny == 1: infos.append(target.string_id()) else: infos.append('.'.join(x for x in target.codes if x)) dist = source.distance_to(target) azi = source.azibazi_to(target)[0] infos.append(meta.str_dist(dist)) infos.append('%.0f\u00B0' % azi) infos.append('%.3g' % ws[itarget]) infos.append('%.3g' % gcms[itarget]) axes2.annotate( '\n'.join(infos), xy=(0., 1.), xycoords='axes fraction', xytext=(2., 2.), textcoords='offset points', ha='left', va='top', fontsize=fontsize, fontstyle='normal') if (self.nx == 1 and self.ny == 1): yield item, fig del figures[iyy, ixx] if not (self.nx == 1 and self.ny == 1): for (iyy, ixx), (_, fig) in figures.items(): title = '.'.join(x for x in cg if x) if len(figures) > 1: title += ' (%i/%i, %i/%i)' % ( iyy + 1, nyy, ixx + 1, nxx) fig.suptitle(title, fontsize=fontsize_title) for item, fig in figures.values(): yield item, fig
class PsGrnConfigFull(PsGrnConfig): earthmodel_1d = gf.meta.Earthmodel1D.T(optional=True) psgrn_outdir = String.T(default='psgrn_green/') sampling_interval = Float.T(default=1.0) # 1.0 for equidistant sw_source_regime = Int.T(default=1) # 1-continental, 0-ocean sw_gravity = Int.T(default=0) accuracy_wavenumber_integration = Float.T(default=0.025) displ_filenames = Tuple.T(3, String.T(), default=psgrn_displ_names) stress_filenames = Tuple.T(6, String.T(), default=psgrn_stress_names) tilt_filenames = Tuple.T(3, String.T(), psgrn_tilt_names) gravity_filenames = Tuple.T(2, String.T(), psgrn_gravity_names) @staticmethod def example(): conf = PsGrnConfigFull() conf.earthmodel_1d = cake.load_model().extract(depth_max=100 * km) conf.psgrn_outdir = 'TEST_psgrn_functions/' return conf def string_for_config(self): assert self.earthmodel_1d is not None d = self.__dict__.copy() model_str, nlines = cake_model_to_config(self.earthmodel_1d) d['n_model_lines'] = nlines d['model_lines'] = model_str d['str_psgrn_outdir'] = "'%s'" % './' d['str_displ_filenames'] = str_str_vals(self.displ_filenames) d['str_stress_filenames'] = str_str_vals(self.stress_filenames) d['str_tilt_filenames'] = str_str_vals(self.tilt_filenames) d['str_gravity_filenames'] = str_str_vals(self.gravity_filenames) d['str_distance_grid'] = self.distance_grid.string_for_config() d['str_depth_grid'] = self.depth_grid.string_for_config() template = '''# autogenerated PSGRN input by psgrn.py #============================================================================= # This is input file of FORTRAN77 program "psgrn08a" for computing responses # (Green's functions) of a multi-layered viscoelastic halfspace to point # dislocation sources buried at different depths. All results will be stored in # the given directory and provide the necessary data base for the program # "pscmp07a" for computing time-dependent deformation, geoid and gravity changes # induced by an earthquake with extended fault planes via linear superposition. # For more details, please read the accompanying READ.ME file. # # written by Rongjiang Wang # GeoForschungsZentrum Potsdam # e-mail: [email protected] # phone +49 331 2881209 # fax +49 331 2881204 # # Last modified: Potsdam, Jan, 2008 # ################################################################# ## ## ## Cylindrical coordinates (Z positive downwards!) are used. ## ## ## ## If not specified otherwise, SI Unit System is used overall! ## ## ## ################################################################# # #------------------------------------------------------------------------------ # # PARAMETERS FOR SOURCE-OBSERVATION CONFIGURATIONS # ================================================ # 1. the uniform depth of the observation points [km], switch for oceanic (0) # or continental(1) earthquakes; # 2. number of (horizontal) observation distances (> 1 and <= nrmax defined in # psgglob.h), start and end distances [km], ratio (>= 1.0) between max. and # min. sampling interval (1.0 for equidistant sampling); # 3. number of equidistant source depths (>= 1 and <= nzsmax defined in # psgglob.h), start and end source depths [km]; # # r1,r2 = minimum and maximum horizontal source-observation # distances (r2 > r1). # zs1,zs2 = minimum and maximum source depths (zs2 >= zs1 > 0). # # Note that the same sampling rates dr_min and dzs will be used later by the # program "pscmp07a" for discretizing the finite source planes to a 2D grid # of point sources. #------------------------------------------------------------------------------ %(observation_depth)e %(sw_source_regime)i %(str_distance_grid)s %(sampling_interval)e %(str_depth_grid)s #------------------------------------------------------------------------------ # # PARAMETERS FOR TIME SAMPLING # ============================ # 1. number of time samples (<= ntmax def. in psgglob.h) and time window [days]. # # Note that nt (> 0) should be power of 2 (the fft-rule). If nt = 1, the # coseismic (t = 0) changes will be computed; If nt = 2, the coseismic # (t = 0) and steady-state (t -> infinity) changes will be computed; # Otherwise, time series for the given time samples will be computed. # #------------------------------------------------------------------------------ %(n_snapshots)i %(max_time)f #------------------------------------------------------------------------------ # # PARAMETERS FOR WAVENUMBER INTEGRATION # ===================================== # 1. relative accuracy of the wave-number integration (suggested: 0.1 - 0.01) # 2. factor (> 0 and < 1) for including influence of earth's gravity on the # deformation field (e.g. 0/1 = without / with 100percent gravity effect). #------------------------------------------------------------------------------ %(accuracy_wavenumber_integration)e %(sw_gravity)i #------------------------------------------------------------------------------ # # PARAMETERS FOR OUTPUT FILES # =========================== # # 1. output directory # 2. file names for 3 displacement components (uz, ur, ut) # 3. file names for 6 stress components (szz, srr, stt, szr, srt, stz) # 4. file names for radial and tangential tilt components (as measured by a # borehole tiltmeter), rigid rotation of horizontal plane, geoid and gravity # changes (tr, tt, rot, gd, gr) # # Note that all file or directory names should not be longer than 80 # characters. Directory and subdirectoy names must be separated and ended # by / (unix) or \ (dos)! All file names should be given without extensions # that will be appended automatically by ".ep" for the explosion (inflation) # source, ".ss" for the strike-slip source, ".ds" for the dip-slip source, # and ".cl" for the compensated linear vector dipole source) # #------------------------------------------------------------------------------ %(str_psgrn_outdir)s %(str_displ_filenames)s %(str_stress_filenames)s %(str_tilt_filenames)s %(str_gravity_filenames)s #------------------------------------------------------------------------------ # # GLOBAL MODEL PARAMETERS # ======================= # 1. number of data lines of the layered model (<= lmax as defined in psgglob.h) # # The surface and the upper boundary of the half-space as well as the # interfaces at which the viscoelastic parameters are continuous, are all # defined by a single data line; All other interfaces, at which the # viscoelastic parameters are discontinuous, are all defined by two data # lines (upper-side and lower-side values). This input format could also be # used for a graphic plot of the layered model. Layers which have different # parameter values at top and bottom, will be treated as layers with a # constant gradient, and will be discretised to a number of homogeneous # sublayers. Errors due to the discretisation are limited within about # 5percent (changeable, see psgglob.h). # # 2.... parameters of the multilayered model # # Burgers rheology (a Kelvin-Voigt body and a Maxwell body in series # connection) for relaxation of shear modulus is implemented. No relaxation # of compressional modulus is considered. # # eta1 = transient viscosity (dashpot of the Kelvin-Voigt body; <= 0 means # infinity value) # eta2 = steady-state viscosity (dashpot of the Maxwell body; <= 0 means # infinity value) # alpha = ratio between the effective and the unrelaxed shear modulus # = mu1/(mu1+mu2) (> 0 and <= 1) # # Special cases: # (1) Elastic: eta1 and eta2 <= 0 (i.e. infinity); alpha meaningless # (2) Maxwell body: eta1 <= 0 (i.e. eta1 = infinity) # or alpha = 1 (i.e. mu1 = infinity) # (3) Standard-Linear-Solid: eta2 <= 0 (i.e. infinity) #------------------------------------------------------------------------------ %(n_model_lines)i |int: no_model_lines; #------------------------------------------------------------------------------ # no depth[km] vp[km/s] vs[km/s] rho[kg/m^3] eta1[Pa*s] eta2[Pa*s] alpha #------------------------------------------------------------------------------ %(model_lines)s #=======================end of input=========================================== ''' # noqa return template % d
class CPTLevel(Object): vmin = Float.T() vmax = Float.T() color_min = Tuple.T(3, Float.T()) color_max = Tuple.T(3, Float.T())
class TargetBalancingAnalyserResult(AnalyserResult): weight = Float.T()
class NumeratorCoefficient(Object): i = Int.T(optional=True, xmlstyle='attribute') value = Float.T(xmlstyle='content')
class WaveformGenerator(TargetGenerator): station_generator = StationGenerator.T( default=RandomStationGenerator.D(), help='The StationGenerator for creating the stations.') noise_generator = WaveformNoiseGenerator.T( default=WhiteNoiseGenerator.D(), help='Add Synthetic noise on the waveforms.') store_id = gf.StringID.T( default=DEFAULT_STORE_ID, help='The GF store to use for forward-calculations.') seismogram_quantity = StringChoice.T( choices=['displacement', 'velocity', 'acceleration', 'counts'], default='displacement') vmin_cut = Float.T( default=2000., help='Minimum velocity to seismic velicty to consider in the model.') vmax_cut = Float.T( default=8000., help='Maximum velocity to seismic velicty to consider in the model.') fmin = Float.T(default=0.01, help='Minimum frequency/wavelength to resolve in the' ' synthetic waveforms.') def get_stations(self): return self.station_generator.get_stations() def get_targets(self): targets = [] for station in self.get_stations(): channel_data = [] channels = station.get_channels() if channels: for channel in channels: channel_data.append( [channel.name, channel.azimuth, channel.dip]) else: for c_name in ['BHZ', 'BHE', 'BHN']: channel_data.append([ c_name, model.guess_azimuth_from_name(c_name), model.guess_dip_from_name(c_name) ]) for c_name, c_azi, c_dip in channel_data: target = gf.Target(codes=(station.network, station.station, station.location, c_name), quantity='displacement', lat=station.lat, lon=station.lon, depth=station.depth, store_id=self.store_id, optimization='enable', interpolation='nearest_neighbor', azimuth=c_azi, dip=c_dip) targets.append(target) return targets def get_time_range(self, sources): dmin, dmax = self.station_generator.get_distance_range(sources) times = num.array([source.time for source in sources], dtype=num.float) tmin_events = num.min(times) tmax_events = num.max(times) tmin = tmin_events + dmin / self.vmax_cut - 10.0 / self.fmin tmax = tmax_events + dmax / self.vmin_cut + 10.0 / self.fmin return tmin, tmax def get_codes_to_deltat(self, engine, sources): deltats = {} for source in sources: for target in self.get_targets(): deltats[target.codes] = engine.get_store( target.store_id).config.deltat return deltats def get_useful_time_increment(self, engine, sources): _, dmax = self.station_generator.get_distance_range(sources) tinc = dmax / self.vmin_cut + 2.0 / self.fmin deltats = set(self.get_codes_to_deltat(engine, sources).values()) deltat = reduce(util.lcm, deltats) tinc = int(round(tinc / deltat)) * deltat return tinc def get_waveforms(self, engine, sources, tmin=None, tmax=None): trs = {} tmin_all, tmax_all = self.get_time_range(sources) tmin = tmin if tmin is not None else tmin_all tmax = tmax if tmax is not None else tmax_all tts = util.time_to_str for nslc, deltat in self.get_codes_to_deltat(engine, sources).items(): tr_tmin = int(round(tmin / deltat)) * deltat tr_tmax = (int(round(tmax / deltat)) - 1) * deltat nsamples = int(round((tr_tmax - tr_tmin) / deltat)) + 1 tr = trace.Trace(*nslc, tmin=tr_tmin, ydata=num.zeros(nsamples), deltat=deltat) self.noise_generator.add_noise(tr) trs[nslc] = tr logger.debug('Calculating waveforms between %s - %s...' % (tts(tmin, format='%Y-%m-%d_%H-%M-%S'), tts(tmax, format='%Y-%m-%d_%H-%M-%S'))) for source in sources: targets = self.get_targets() resp = engine.process(source, targets) for _, target, tr in resp.iter_results(): resp = self.get_transfer_function(target.codes) if resp: tr = tr.transfer(transfer_function=resp) trs[target.codes].add(tr) return list(trs.values()) def get_transfer_function(self, codes): if self.seismogram_quantity == 'displacement': return None elif self.seismogram_quantity == 'velocity': return trace.DifferentiationResponse(1) elif self.seismogram_quantity == 'acceleration': return trace.DifferentiationResponse(2) elif self.seismogram_quantity == 'counts': raise NotImplemented() def dump_data(self, engine, sources, path, tmin=None, tmax=None, overwrite=False): fns = [] fns.extend( self.dump_waveforms(engine, sources, path, tmin, tmax, overwrite)) fns.extend(self.dump_responses(path)) return fns def dump_waveforms(self, engine, sources, path, tmin=None, tmax=None, overwrite=False): path_waveforms = op.join(path, 'waveforms') util.ensuredir(path_waveforms) path_traces = op.join( path_waveforms, '%(wmin_year)s', '%(wmin_month)s', '%(wmin_day)s', 'waveform_%(network)s_%(station)s_' + '%(location)s_%(channel)s_%(tmin)s_%(tmax)s.mseed') tmin_all, tmax_all = self.get_time_range(sources) tmin = tmin if tmin is not None else tmin_all tmax = tmax if tmax is not None else tmax_all tts = util.time_to_str tinc = self.get_useful_time_increment(engine, sources) tmin = math.floor(tmin / tinc) * tinc tmax = math.ceil(tmax / tinc) * tinc nwin = int(round((tmax - tmin) / tinc)) for iwin in range(nwin): tmin_win = max(tmin, tmin + iwin * tinc) tmax_win = min(tmax, tmin + (iwin + 1) * tinc) if tmax_win <= tmin_win: continue trs = self.get_waveforms(engine, sources, tmin_win, tmax_win) try: io.save(trs, path_traces, additional=dict(wmin_year=tts(tmin_win, format='%Y'), wmin_month=tts(tmin_win, format='%m'), wmin_day=tts(tmin_win, format='%d'), wmin=tts(tmin_win, format='%Y-%m-%d_%H-%M-%S'), wmax_year=tts(tmax_win, format='%Y'), wmax_month=tts(tmax_win, format='%m'), wmax_day=tts(tmax_win, format='%d'), wmax=tts(tmax_win, format='%Y-%m-%d_%H-%M-%S')), overwrite=overwrite) except FileSaveError as e: logger.debug('Waveform exists %s' % e) return [path_waveforms] def dump_responses(self, path): from pyrocko.io import stationxml logger.debug('Writing out StationXML...') path_responses = op.join(path, 'meta') util.ensuredir(path_responses) fn_stationxml = op.join(path_responses, 'stations.xml') stations = self.station_generator.get_stations() sxml = stationxml.FDSNStationXML.from_pyrocko_stations(stations) sunit = { 'displacement': 'M', 'velocity': 'M/S', 'acceleration': 'M/S**2', 'counts': 'COUNTS' }[self.seismogram_quantity] response = stationxml.Response( instrument_sensitivity=stationxml.Sensitivity( value=1., frequency=1., input_units=stationxml.Units(sunit), output_units=stationxml.Units('COUNTS')), stage_list=[]) for net, station, channel in sxml.iter_network_station_channels(): channel.response = response sxml.dump_xml(filename=fn_stationxml) return [path_responses] def add_map_artists(self, engine, sources, automap): automap.add_stations(self.get_stations())
class WaveformTargetGroup(TargetGroup): '''Handles seismogram targets or other targets of dynamic ground motion. ''' distance_min = Float.T( optional=True, help='excludes targets nearer to source, along a great circle') distance_max = Float.T( optional=True, help='excludes targets farther from source, along a great circle') distance_3d_min = Float.T( optional=True, help='excludes targets nearer from source (direct distance)') distance_3d_max = Float.T( optional=True, help='excludes targets farther from source (direct distance)') depth_min = Float.T(optional=True, help='excludes targets with smaller depths') depth_max = Float.T(optional=True, help='excludes targets with larger depths') limit = Int.T(optional=True) channels = List.T(String.T(), optional=True, help='set channels to include, e.g. \[\'Z\',\'T\'\]') misfit_config = WaveformMisfitConfig.T() def get_targets(self, ds, event, default_path): logger.debug('Selecting waveform targets...') origin = event targets = [] for st in ds.get_stations(): for cha in self.channels: nslc = st.nsl() + (cha, ) target = WaveformMisfitTarget( quantity='displacement', codes=nslc, lat=st.lat, lon=st.lon, depth=st.depth, interpolation=self.interpolation, store_id=self.store_id, misfit_config=self.misfit_config, manual_weight=self.weight, normalisation_family=self.normalisation_family, path=self.path or default_path) if ds.is_blacklisted((st.nsl() + (cha, ))): log_exclude(target, 'blacklisted') continue if self.distance_min is not None and \ target.distance_to(origin) < self.distance_min: log_exclude(target, 'distance < distance_min') continue if self.distance_max is not None and \ target.distance_to(origin) > self.distance_max: log_exclude(target, 'distance > distance_max') continue if self.distance_3d_min is not None and \ target.distance_3d_to(origin) < self.distance_3d_min: log_exclude(target, 'distance_3d < distance_3d_min') continue if self.distance_3d_max is not None and \ target.distance_3d_to(origin) > self.distance_3d_max: log_exclude(target, 'distance_3d > distance_3d_max') continue if self.depth_min is not None and \ target.depth < self.depth_min: log_exclude(target, 'depth < depth_min') continue if self.depth_max is not None and \ target.depth > self.depth_max: log_exclude(target, 'depth > depth_max') continue azi, _ = target.azibazi_to(origin) if cha == 'R': target.azimuth = azi - 180. target.dip = 0. elif cha == 'T': target.azimuth = azi - 90. target.dip = 0. elif cha == 'Z': target.azimuth = 0. target.dip = -90. target.set_dataset(ds) targets.append(target) if self.limit: return weed(origin, targets, self.limit)[0] else: return targets
class DirectedSamplerPhase(SamplerPhase): scatter_scale = Float.T( optional=True, help='Scales search radius around the current `highscore` models') scatter_scale_begin = Float.T( optional=True, help='Scaling factor at beginning of the phase.') scatter_scale_end = Float.T( optional=True, help='Scaling factor at the end of the directed phase.') starting_point = SamplerStartingPointChoice.T( default='excentricity_compensated', help='Tunes to the center value of the sampler distribution.' 'May increase the likelihood to draw a highscore member model' ' off-center to the mean value') sampler_distribution = SamplerDistributionChoice.T( default='normal', help='Distribution new models are drawn from.') standard_deviation_estimator = StandardDeviationEstimatorChoice.T( default='median_density_single_chain') ntries_sample_limit = Int.T(default=1000) def get_scatter_scale_factor(self, iiter): s = self.scatter_scale sa = self.scatter_scale_begin sb = self.scatter_scale_end assert s is None or (sa is None and sb is None) if sa != sb: tb = float(self.niterations - 1) tau = tb / (math.log(sa) - math.log(sb)) t0 = math.log(sa) * tau t = float(iiter) return num.exp(-(t - t0) / tau) else: return s or 1.0 def get_raw_sample(self, problem, iiter, chains): rstate = self.get_rstate() factor = self.get_scatter_scale_factor(iiter) npar = problem.nparameters pnames = problem.parameter_names xbounds = problem.get_parameter_bounds() ilink_choice = None ichain_choice = num.argmin(chains.accept_sum) if self.starting_point == 'excentricity_compensated': models = chains.models(ichain_choice) ilink_choice = excentricity_compensated_choice( models, chains.standard_deviation_models( ichain_choice, self.standard_deviation_estimator), 2., rstate) xchoice = chains.model(ichain_choice, ilink_choice) elif self.starting_point == 'random': ilink_choice = rstate.randint(0, chains.nlinks) xchoice = chains.model(ichain_choice, ilink_choice) elif self.starting_point == 'mean': xchoice = chains.mean_model(ichain_choice) else: assert False, 'invalid starting_point choice: %s' % ( self.starting_point) ntries_sample = 0 if self.sampler_distribution == 'normal': x = num.zeros(npar, dtype=num.float) sx = chains.standard_deviation_models( ichain_choice, self.standard_deviation_estimator) for ipar in range(npar): ntries = 0 while True: if sx[ipar] > 0.: v = rstate.normal(xchoice[ipar], factor * sx[ipar]) else: v = xchoice[ipar] if xbounds[ipar, 0] <= v and \ v <= xbounds[ipar, 1]: break if ntries > self.ntries_sample_limit: logger.warning('failed to produce a suitable ' 'candidate sample from normal ' 'distribution for parameter \'%s\'' '- drawing from uniform instead.' % pnames[ipar]) v = rstate.uniform(xbounds[ipar, 0], xbounds[ipar, 1]) break ntries += 1 x[ipar] = v elif self.sampler_distribution == 'multivariate_normal': ok_mask_sum = num.zeros(npar, dtype=num.int) while True: ntries_sample += 1 xcandi = rstate.multivariate_normal( xchoice, factor**2 * chains.cov(ichain_choice)) ok_mask = num.logical_and(xbounds[:, 0] <= xcandi, xcandi <= xbounds[:, 1]) if num.all(ok_mask): break ok_mask_sum += ok_mask if ntries_sample > self.ntries_sample_limit: logger.warning( 'failed to produce a suitable candidate ' 'sample from multivariate normal ' 'distribution, (%s) - drawing from uniform instead' % ', '.join('%s:%i' % xx for xx in zip(pnames, ok_mask_sum))) xbounds = problem.get_parameter_bounds() xcandi = problem.random_uniform(xbounds, rstate) break x = xcandi imodel_base = None if ilink_choice is not None: imodel_base = chains.imodel(ichain_choice, ilink_choice) return Sample(model=x, ichain_base=ichain_choice, ilink_base=ilink_choice, imodel_base=imodel_base)
class GNSSTargetMisfitPlot(PlotConfig): ''' Maps showing horizontal surface displacements of a GNSS campaign and model ''' name = 'gnss' size_cm = Tuple.T(2, Float.T(), default=(30., 30.), help='width and length of the figure in cm') show_topo = Bool.T(default=False, help='show topography') show_grid = Bool.T(default=True, help='show the lat/lon grid') show_rivers = Bool.T(default=True, help='show rivers on the map') radius = Float.T(optional=True, help='radius of the map around campaign center lat/lon') def make(self, environ): cm = environ.get_plot_collection_manager() history = environ.get_history(subset='harvest') optimiser = environ.get_optimiser() ds = environ.get_dataset() environ.setup_modelling() cm.create_group_automap(self, self.draw_gnss_fits(ds, history, optimiser), title=u'GNSS Displacements', section='fits', feather_icon='map', description=u''' Maps showing station positions and statiom names of the GNSS targets. Arrows the observed surface displacements (black arrows) and synthetic displacements (red arrows). The top plot shows the horizontal displacements and the bottom plot the vertical displacements. The grey filled box shows the surface projection of the modelled source, with the thick-lined edge marking the upper fault edge. ''') def draw_gnss_fits(self, ds, history, optimiser, vertical=False): problem = history.problem gnss_targets = problem.gnss_targets for target in gnss_targets: target.set_dataset(ds) xbest = history.get_best_model() source = history.get_best_source() results = problem.evaluate(xbest, result_mode='full', targets=gnss_targets) def plot_gnss(gnss_target, result, ifig, vertical=False): campaign = gnss_target.campaign item = PlotItem( name='fig_%i' % ifig, attributes={'targets': gnss_target.path}, title=u'Static GNSS Surface Displacements - Campaign %s' % campaign.name, description=u''' Static surface displacement from GNSS campaign %s (black vectors) and displacements derived from best model (red). ''' % campaign.name) event = source.pyrocko_event() locations = campaign.stations + [event] lat, lon = od.geographic_midpoint_locations(locations) if self.radius is None: coords = num.array([loc.effective_latlon for loc in locations]) radius = od.distance_accurate50m_numpy(lat[num.newaxis], lon[num.newaxis], coords[:, 0].max(), coords[:, 1]).max() radius *= 1.1 if radius < 30. * km: logger.warn('Radius of GNSS campaign %s too small, defaulting' ' to 30 km' % campaign.name) radius = 30 * km model_camp = gnss.GNSSCampaign(stations=copy.deepcopy( campaign.stations), name='grond model') for ista, sta in enumerate(model_camp.stations): sta.north.shift = result.statics_syn['displacement.n'][ista] sta.north.sigma = 0. sta.east.shift = result.statics_syn['displacement.e'][ista] sta.east.sigma = 0. if sta.up: sta.up.shift = -result.statics_syn['displacement.d'][ista] sta.up.sigma = 0. m = automap.Map(width=self.size_cm[0], height=self.size_cm[1], lat=lat, lon=lon, radius=radius, show_topo=self.show_topo, show_grid=self.show_grid, show_rivers=self.show_rivers, color_wet=(216, 242, 254), color_dry=(238, 236, 230)) all_stations = campaign.stations + model_camp.stations offset_scale = num.zeros(len(all_stations)) for ista, sta in enumerate(all_stations): for comp in sta.components.values(): offset_scale[ista] += comp.shift offset_scale = num.sqrt(offset_scale**2).max() m.add_gnss_campaign(campaign, psxy_style={ 'G': 'black', 'W': '0.8p,black', }, offset_scale=offset_scale, vertical=vertical) m.add_gnss_campaign(model_camp, psxy_style={ 'G': 'red', 'W': '0.8p,red', 't': 30, }, offset_scale=offset_scale, vertical=vertical, labels=False) if isinstance(problem, CMTProblem) \ or isinstance(problem, VLVDProblem): from pyrocko import moment_tensor from pyrocko.plot import gmtpy mt = event.moment_tensor.m_up_south_east() ev_lat, ev_lon = event.effective_latlon xx = num.trace(mt) / 3. mc = num.matrix([[xx, 0., 0.], [0., xx, 0.], [0., 0., xx]]) mc = mt - mc mc = mc / event.moment_tensor.scalar_moment() * \ moment_tensor.magnitude_to_moment(5.0) m6 = tuple(moment_tensor.to6(mc)) symbol_size = 20. m.gmt.psmeca(S='%s%g' % ('d', symbol_size / gmtpy.cm), in_rows=[(ev_lon, ev_lat, 10) + m6 + (1, 0, 0)], M=True, *m.jxyr) elif isinstance(problem, RectangularProblem): m.gmt.psxy(in_rows=source.outline(cs='lonlat'), L='+p2p,black', W='1p,black', G='black', t=60, *m.jxyr) elif isinstance(problem, VolumePointProblem): ev_lat, ev_lon = event.effective_latlon dV = abs(source.volume_change) sphere_radius = num.cbrt(dV / (4. / 3. * num.pi)) volcanic_circle = [ev_lon, ev_lat, '%fe' % sphere_radius] m.gmt.psxy(S='E-', in_rows=[volcanic_circle], W='1p,black', G='orange3', *m.jxyr) return (item, m) ifig = 0 for vertical in (False, True): for gnss_target, result in zip(problem.gnss_targets, results): yield plot_gnss(gnss_target, result, ifig, vertical) ifig += 1
class HighScoreOptimiser(Optimiser): '''Monte-Carlo-based directed search optimisation with bootstrap.''' sampler_phases = List.T(SamplerPhase.T()) chain_length_factor = Float.T(default=8.) nbootstrap = Int.T(default=100) bootstrap_type = BootstrapTypeChoice.T(default='bayesian') bootstrap_seed = Int.T(default=23) SPARKS = u'\u2581\u2582\u2583\u2584\u2585\u2586\u2587\u2588' ACCEPTANCE_AVG_LEN = 100 def __init__(self, **kwargs): Optimiser.__init__(self, **kwargs) self._bootstrap_weights = None self._bootstrap_residuals = None self._correlated_weights = None self._status_chains = None self._rstate_bootstrap = None def get_rstate_bootstrap(self): if self._rstate_bootstrap is None: self._rstate_bootstrap = num.random.RandomState( self.bootstrap_seed) return self._rstate_bootstrap def init_bootstraps(self, problem): self.init_bootstrap_weights(problem) self.init_bootstrap_residuals(problem) def init_bootstrap_weights(self, problem): logger.info('Initializing Bayesian bootstrap weights.') nmisfits_w = sum(t.nmisfits for t in problem.targets if t.can_bootstrap_weights) ws = make_bayesian_weights(self.nbootstrap, nmisfits=nmisfits_w, rstate=self.get_rstate_bootstrap()) imf = 0 for t in problem.targets: if t.can_bootstrap_weights: t.set_bootstrap_weights(ws[:, imf:imf + t.nmisfits]) imf += t.nmisfits else: t.set_bootstrap_weights(num.ones( (self.nbootstrap, t.nmisfits))) def init_bootstrap_residuals(self, problem): logger.info('Initializing Bayesian bootstrap residuals.') for t in problem.targets: if t.can_bootstrap_residuals: t.init_bootstrap_residuals(self.nbootstrap, rstate=self.get_rstate_bootstrap(), nthreads=self._nthreads) else: t.set_bootstrap_residuals( num.zeros((self.nbootstrap, t.nmisfits))) def get_bootstrap_weights(self, problem): if self._bootstrap_weights is None: try: problem.targets[0].get_bootstrap_weights() except Exception: self.init_bootstraps(problem) bootstrap_weights = num.hstack( [t.get_bootstrap_weights() for t in problem.targets]) self._bootstrap_weights = num.vstack((num.ones( (1, problem.nmisfits)), bootstrap_weights)) return self._bootstrap_weights def get_bootstrap_residuals(self, problem): if self._bootstrap_residuals is None: try: problem.targets[0].get_bootstrap_residuals() except Exception: self.init_bootstraps(problem) bootstrap_residuals = num.hstack( [t.get_bootstrap_residuals() for t in problem.targets]) self._bootstrap_residuals = num.vstack((num.zeros( (1, problem.nmisfits)), bootstrap_residuals)) return self._bootstrap_residuals def get_correlated_weights(self, problem): if self._correlated_weights is None: corr = dict() misfit_idx = num.cumsum([0.] + [t.nmisfits for t in problem.targets], dtype=num.int) for it, target in enumerate(problem.targets): weights = target.get_correlated_weights( nthreads=self._nthreads) if weights is None: continue corr[misfit_idx[it]] = weights self._correlated_weights = corr return self._correlated_weights @property def nchains(self): return self.nbootstrap + 1 def chains(self, problem, history): nlinks_cap = int( round(self.chain_length_factor * problem.nparameters + 1)) return Chains(problem, history, nchains=self.nchains, nlinks_cap=nlinks_cap) def get_sampler_phase(self, iiter): niter = 0 for iphase, phase in enumerate(self.sampler_phases): if iiter < niter + phase.niterations: return iphase, phase, iiter - niter niter += phase.niterations assert False, 'sample out of bounds' def log_progress(self, problem, iiter, niter, phase, iiter_phase): t = time.time() if self._tlog_last < t - 10. \ or iiter_phase == 0 \ or iiter_phase == phase.niterations - 1: logger.info( '%s at %i/%i (%s, %i/%i)' % (problem.name, iiter + 1, niter, phase.__class__.__name__, iiter_phase, phase.niterations)) self._tlog_last = t def optimise(self, problem, rundir=None): if rundir is not None: self.dump(filename=op.join(rundir, 'optimiser.yaml')) history = ModelHistory(problem, nchains=self.nchains, path=rundir, mode='w') chains = self.chains(problem, history) niter = self.niterations isbad_mask = None self._tlog_last = 0 for iiter in range(niter): iphase, phase, iiter_phase = self.get_sampler_phase(iiter) self.log_progress(problem, iiter, niter, phase, iiter_phase) sample = phase.get_sample(problem, iiter_phase, chains) sample.iphase = iphase if isbad_mask is not None and num.any(isbad_mask): isok_mask = num.logical_not(isbad_mask) else: isok_mask = None misfits = problem.misfits(sample.model, mask=isok_mask, nthreads=self._nthreads) bootstrap_misfits = problem.combine_misfits( misfits, extra_weights=self.get_bootstrap_weights(problem), extra_residuals=self.get_bootstrap_residuals(problem), extra_correlated_weights=self.get_correlated_weights(problem)) isbad_mask_new = num.isnan(misfits[:, 0]) if isbad_mask is not None and num.any( isbad_mask != isbad_mask_new): errmess = [ 'problem %s: inconsistency in data availability' ' at iteration %i' % (problem.name, iiter) ] for target, isbad_new, isbad in zip(problem.targets, isbad_mask_new, isbad_mask): if isbad_new != isbad: errmess.append(' %s, %s -> %s' % (target.string_id(), isbad, isbad_new)) raise BadProblem('\n'.join(errmess)) isbad_mask = isbad_mask_new if num.all(isbad_mask): raise BadProblem( 'Problem %s: all target misfit values are NaN.' % problem.name) history.append(sample.model, misfits, bootstrap_misfits, sample.pack_context()) @property def niterations(self): return sum([ph.niterations for ph in self.sampler_phases]) def get_status(self, history): if self._status_chains is None: self._status_chains = self.chains(history.problem, history) self._status_chains.goto(history.nmodels) chains = self._status_chains problem = history.problem row_names = [p.name_nogroups for p in problem.parameters] row_names.append('Misfit') def colum_array(data): arr = num.full(len(row_names), fill_value=num.nan) arr[:data.size] = data return arr phase = self.get_sampler_phase(history.nmodels - 1)[1] bs_mean = colum_array(chains.mean_model(ichain=None)) bs_std = colum_array( chains.standard_deviation_models( ichain=None, estimator='standard_deviation_all_chains')) glob_mean = colum_array(chains.mean_model(ichain=0)) glob_mean[-1] = num.mean(chains.misfits(ichain=0)) glob_std = colum_array( chains.standard_deviation_models( ichain=0, estimator='standard_deviation_single_chain')) glob_std[-1] = num.std(chains.misfits(ichain=0)) glob_best = colum_array(chains.best_model(ichain=0)) glob_best[-1] = chains.best_model_misfit() glob_misfits = chains.misfits(ichain=0) acceptance_latest = chains.acceptance_history[:, -min( chains.acceptance_history.shape[1], self.ACCEPTANCE_AVG_LEN):] # noqa acceptance_avg = acceptance_latest.mean(axis=1) def spark_plot(data, bins): hist, _ = num.histogram(data, bins) hist_max = num.max(hist) if hist_max == 0.0: hist_max = 1.0 hist = hist / hist_max vec = num.digitize(hist, num.linspace(0., 1., len(self.SPARKS))) return ''.join([self.SPARKS[b - 1] for b in vec]) return OptimiserStatus( row_names=row_names, column_data=OrderedDict( zip([ 'BS mean', 'BS std', 'Glob mean', 'Glob std', 'Glob best' ], [bs_mean, bs_std, glob_mean, glob_std, glob_best])), extra_header= # noqa u'Optimiser phase: {phase}, exploring {nchains} BS chains\n' # noqa u'Global chain misfit distribution: \u2080{mf_dist}\xb9\n' u'Acceptance rate distribution: \u2080{acceptance}' u'\u2081\u2080\u2080\ufe6a (Median {acceptance_med:.1f}%)'.format( phase=phase.__class__.__name__, nchains=chains.nchains, mf_dist=spark_plot(glob_misfits, num.linspace(0., 1., 25)), acceptance=spark_plot(acceptance_avg, num.linspace(0., 1., 25)), acceptance_med=num.median(acceptance_avg) * 100.)) def get_movie_maker(self, problem, history, xpar_name, ypar_name, movie_filename): from . import plot return plot.HighScoreOptimiserPlot(self, problem, history, xpar_name, ypar_name, movie_filename) @classmethod def get_plot_classes(cls): from .plot import HighScoreAcceptancePlot plots = Optimiser.get_plot_classes() plots.append(HighScoreAcceptancePlot) return plots