예제 #1
0
def make_site_coll(lon, lat, n):
    assert n <= 1000
    sites = []
    for i in range(n):
        site = Site(Point(lon - float(i) / 1000, lat),
                    800., 'measured', 50., 2.5, i)
        sites.append(site)
    return models.SiteCollection(sites)
예제 #2
0
    def __init__(self, imt, site_collection, sites_assets, truncation_level,
                 gsims, correlation_model):
        """
        :param str imt:
            the intensity measure type considered
        :param site_collection:
            a :class:`openquake.engine.db.models.SiteCollection` instance
            holding all the sites of the hazard calculation from which the
            ruptures have been computed
        :param sites_assets:
            an iterator over tuple of the form (site_id, assets), where
            site_id is the id of a
            :class:`openquake.engine.db.models.HazardSite` object and
            assets is a list of asset object associated to such site
        :param float truncation_level:
            the truncation level of the normal distribution used to generate
            random numbers. If none, a non-truncated normal is used
        :param gsims:
            a dictionary of the gsims considered keyed by the tectonic
            region type
        :param correlation_model:
            Instance of correlation model object. See
            :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which
            case non-correlated ground motion fields are calculated.
            Correlation model is not used if ``truncation_level`` is zero.
        """

        self.imt = general.imt_to_hazardlib(imt)
        self.site_collection = site_collection
        self.sites_assets = sites_assets
        self.truncation_level = truncation_level
        self.sites = models.SiteCollection([
            self.site_collection.get_by_id(site_id)
            for site_id, _assets in self.sites_assets
        ])

        all_site_ids = [s.id for s in self.site_collection]
        self.sites_dict = dict(
            (all_site_id, i) for i, all_site_id in enumerate(all_site_ids))

        self.generate_epsilons = truncation_level != 0
        self.correlation_matrix = None
        if self.generate_epsilons:
            if truncation_level is None:
                self.distribution = scipy.stats.norm()
            elif truncation_level > 0:
                self.distribution = scipy.stats.truncnorm(
                    -truncation_level, truncation_level)

            if correlation_model is not None:
                c = correlation_model.get_lower_triangle_correlation_matrix(
                    site_collection, self.imt)
                self.correlation_matrix = c

        self.gsims = gsims
예제 #3
0
    def setUp(self):
        self.imt = "SA(0.15)"

        points = [Point(0, 0), Point(10, 10), Point(20, 20)]
        sites = [Site(point, 10, False, 2, 3, id=i)
                 for i, point in enumerate(points)]
        self.sites = models.SiteCollection(sites)

        assets = [mock.Mock] * 5
        self.sites_assets = ((0, assets[0:1]),
                             (1, assets[1:]),
                             (2, assets[2:]))

        self.gsims = mock.Mock()
        self.gsims.__getitem__ = mock.Mock(return_value=mock.Mock())
        self.cormo = JB2009CorrelationModel(vs30_clustering=False)
예제 #4
0
 def compute_gmf_arg_gen(self):
     """
     Argument generator for the task compute_gmf. For each SES yields a
     tuple of the form (job_id, params, imt, gsims, ses, site_coll,
     rupture_ids, rupture_seeds).
     """
     rnd = random.Random()
     rnd.seed(self.hc.random_seed)
     site_coll = self.hc.site_collection
     params = dict(
         correl_model=haz_general.get_correl_model(self.hc),
         truncation_level=self.hc.truncation_level,
         maximum_distance=self.hc.maximum_distance)
     for lt_rlz in self._get_realizations():
         ltp = logictree.LogicTreeProcessor.from_hc(self.hc)
         gsims = ltp.parse_gmpe_logictree_path(lt_rlz.gsim_lt_path)
         all_ses = models.SES.objects.filter(
             ses_collection__lt_realization=lt_rlz,
             ordinal__isnull=False).order_by('ordinal')
         for ses in all_ses:
             # count the ruptures in the given SES
             rupture_ids = models.SESRupture.objects.filter(
                 ses=ses).values_list('id', flat=True)
             if not rupture_ids:
                 continue
             # compute the associated seeds
             rupture_seeds = [rnd.randint(0, models.MAX_SINT_32)
                              for _ in range(len(rupture_ids))]
             # splitting on IMTs to generate more tasks and save memory
             for imt in self.hc.intensity_measure_types:
                 if self.hc.ground_motion_correlation_model is None:
                     # we split on sites to avoid running out of memory
                     # on the workers for computations like the full Japan
                     for sites in block_splitter(site_coll, BLOCK_SIZE):
                         yield (self.job.id, params, imt, gsims, ses,
                                models.SiteCollection(sites),
                                rupture_ids, rupture_seeds)
                 else:
                     # we split on ruptures to avoid running out of memory
                     rupt_iter = block_splitter(rupture_ids, BLOCK_SIZE)
                     seed_iter = block_splitter(rupture_seeds, BLOCK_SIZE)
                     for rupts, seeds in zip(rupt_iter, seed_iter):
                         yield (self.job.id, params, imt, gsims, ses,
                                site_coll, rupts, seeds)
예제 #5
0
    def task_arg_gen(self, block_size, _check_num_task=True):
        """
        Loop through realizations and sources to generate a sequence of
        task arg tuples. Each tuple of args applies to a single task.

        Yielded results are 6-uples of the form (job_id,
        sites, rupture_id, gmfcoll_id, task_seed, realizations)
        (task_seed will be used to seed numpy for temporal occurence sampling).

        :param int block_size:
            The number of work items for each task. Fixed to 1.
        """
        rnd = random.Random()
        rnd.seed(self.hc.random_seed)

        rupture_id = self.job.parsedrupture.id

        for sites in block_splitter(self.hc.site_collection, BLOCK_SIZE):
            task_seed = rnd.randint(0, models.MAX_SINT_32)
            yield (self.job.id, models.SiteCollection(sites),
                   rupture_id, self.gmfcoll.id, task_seed,
                   self.hc.number_of_ground_motion_fields)