def __call__(self, sources, sites=None): if sites is None: sites = self.sitecol if self.sitecol is None: # do not filter for source in sources: yield source, sites return for src in sources: if not self.integration_distance: # do not filter yield src, sites elif self.use_rtree: # Rtree filtering, used in the controller box = self.get_affected_box(src) sids = numpy.array(sorted(self.index.intersection(box))) if len(set(sids)) < len(sids): # MS: sanity check against rtree bugs; what happened to me # is that by following the advice in http://toblerity.org/rtree/performance.html#use-stream-loading # self.index.intersection(box) started reporting duplicate # and wrong sids! raise ValueError('sids=%s' % sids) if len(sids): src.nsites = len(sids) yield src, FilteredSiteCollection(sids, sites.complete) else: # normal filtering, used in the workers maxdist = self.integration_distance(src.tectonic_region_type) with context(src): s_sites = src.filter_sites_by_distance_to_source( maxdist, sites) if s_sites is not None: src.nsites = len(s_sites) yield src, s_sites
def __call__(self, sources, sites=None): if sites is None: sites = self.sitecol if self.sitecol is None: # do not filter for source in sources: yield source, sites return for source in sources: if self.use_rtree: # Rtree filtering box = self.get_affected_box(source) sids = numpy.array(sorted(self.index.intersection(box))) if len(sids): source.nsites = len(sids) yield source, FilteredSiteCollection(sids, sites.complete) elif not self.integration_distance: yield source, sites else: # normal filtering try: maxdist = self.integration_distance[ source.tectonic_region_type] except TypeError: # passed a scalar, not a dictionary maxdist = self.integration_distance with context(source): s_sites = source.filter_sites_by_distance_to_source( maxdist, sites) if s_sites is not None: source.nsites = len(s_sites) yield source, s_sites
def get_gmfs_by_imt(fname, sitecol, imts): """ Return a list of dictionaries with a ground motion field per IMT, one dictionary per rupture. :param fname: path to the CSV file :param sitecol: the underlying site collection :param imts: the IMTs corresponding to the columns in the CSV file """ dicts = [] with open(fname) as f: for row in csv.reader(f): indices = map(int, row[1].split()) sc = FilteredSiteCollection(indices, sitecol) dic = AccumDict() for imt, col in zip(imts, row[2:]): gmf = numpy.array(map(float, col.split())) dic[imt] = sc.expand(gmf, 0) dic.tag = row[0] dicts.append(dic) return sorted(dicts, key=lambda dic: dic.tag)
def __iter__(self): gmfset = [] for imt_str in self.imts: gmfs = self.gmfs_by_imt[imt_str] imt, sa_period, sa_damping = from_string(imt_str) for rupture, gmf in zip(self.ruptures, gmfs): if hasattr(rupture, 'indices'): # event based indices = (list(range(len(self.sitecol))) if rupture.indices is None else rupture.indices) sites = FilteredSiteCollection(indices, self.sitecol) else: # scenario sites = self.sitecol nodes = (GroundMotionFieldNode(gmv, site.location) for site, gmv in zip(sites, gmf)) gmfset.append( GroundMotionField(imt, sa_period, sa_damping, rupture.tag, nodes)) yield GmfSet(gmfset, self.investigation_time)
def __call__(self, sources, sites=None): if sites is None: sites = self.sitecol for source in sources: if rtree: # Rtree filtering box = self.get_affected_box(source) sids = numpy.array(sorted(self.index.intersection(box))) if len(sids): source.nsites = len(sids) yield source, FilteredSiteCollection(sids, sites.complete) else: # normal filtering with context(source): s_sites = source.filter_sites_by_distance_to_source( self.integration_distance[source.tectonic_region_type], sites) if s_sites is not None: source.nsites = len(s_sites) yield source, s_sites