Beispiel #1
0
 def full_enum(self):
     # compute the mean curve with full enumeration
     srcs = []
     weights = []
     grp_id = 0
     for weight, branches in self.bs0.enumerate_paths():
         path = tuple(br.branch_id for br in branches)
         bset_values = self.bs0.get_bset_values(path)
         # first path: [(<b01 b02>, (4.6, 1.1)), (<b11 b12>, 7.0)]
         sg = lt.apply_uncertainties(bset_values, self.sg)
         for src in sg:
             src.grp_id = grp_id
         grp_id += 1
         srcs.extend(sg)
         weights.append(weight)
     for i, src in enumerate(srcs):
         src.id = i
     res = classical(
         srcs, self.srcfilter, self.gsims,
         dict(imtls=self.imtls, truncation_level2=2, collapse_ctxs=True))
     pmap = res['pmap']
     effrups = sum(nr for nr, ns, dt in res['calc_times'].values())
     curves = [pmap[grp_id].array[0, :, 0] for grp_id in sorted(pmap)]
     mean = numpy.average(curves, axis=0, weights=weights)
     return mean, srcs, effrups, weights
Beispiel #2
0
 def full_enum(self):
     # compute the mean curve with full enumeration
     srcs = []
     weights = []
     grp_id = trt_smr = 0
     for weight, branches in self.bs0.enumerate_paths():
         path = tuple(br.branch_id for br in branches)
         bset_values = self.bs0.get_bset_values(path)
         # first path: [(<b01 b02>, (4.6, 1.1)), (<b11 b12>, 7.0)]
         sg = lt.apply_uncertainties(bset_values, self.sg)
         for src in sg:
             src.grp_id = grp_id
             src.trt_smr = trt_smr
         grp_id += 1
         trt_smr += 1
         srcs.extend(sg)
         weights.append(weight)
     for i, src in enumerate(srcs):
         src.id = i
     N = len(self.sitecol.complete)
     time_span = srcs[0].temporal_occurrence_model.time_span
     idist = calc.filters.IntegrationDistance.new('200')
     params = dict(imtls=self.imtls,
                   truncation_level2=2,
                   collapse_level=2,
                   investigation_time=time_span,
                   maximum_distance=idist('default'))
     cmaker = contexts.ContextMaker(srcs[0].tectonic_region_type,
                                    self.gsims, params)
     res = classical(srcs, self.sitecol, cmaker)
     pmap = res['pmap']
     effrups = sum(nr for nr, ns, dt in res['calc_times'].values())
     curve = pmap.array(N)[0, :, 0]
     return curve, srcs, effrups, weights
Beispiel #3
0
 def full_enum(self):
     # compute the mean curve with full enumeration
     srcs = []
     weights = []
     grp_id = et_id = 0
     for weight, branches in self.bs0.enumerate_paths():
         path = tuple(br.branch_id for br in branches)
         bset_values = self.bs0.get_bset_values(path)
         # first path: [(<b01 b02>, (4.6, 1.1)), (<b11 b12>, 7.0)]
         sg = lt.apply_uncertainties(bset_values, self.sg)
         for src in sg:
             src.grp_id = grp_id
             src.et_id = et_id
         grp_id += 1
         et_id += 1
         srcs.extend(sg)
         weights.append(weight)
     for i, src in enumerate(srcs):
         src.id = i
     N = len(self.srcfilter.sitecol.complete)
     res = classical(
         srcs, self.srcfilter, self.gsims,
         dict(imtls=self.imtls, truncation_level2=2, collapse_level=2))
     pmap = res['pmap']
     effrups = sum(nr for nr, ns, dt in res['calc_times'].values())
     curve = pmap.array(N)[0, :, 0]
     return curve, srcs, effrups, weights
Beispiel #4
0
def _build_groups(full_lt, smdict):
    # build all the possible source groups from the full logic tree
    smlt_file = full_lt.source_model_lt.filename
    smlt_dir = os.path.dirname(smlt_file)

    def _groups_ids(value):
        # extract the source groups and ids from a sequence of source files
        groups = []
        for name in value.split():
            fname = os.path.abspath(os.path.join(smlt_dir, name))
            groups.extend(smdict[fname].src_groups)
        return groups, set(src.source_id for grp in groups for src in grp)

    groups = []
    for rlz in full_lt.sm_rlzs:
        src_groups, source_ids = _groups_ids(rlz.value)
        bset_values = full_lt.source_model_lt.bset_values(rlz)
        if bset_values and bset_values[0][0].uncertainty_type == 'extendModel':
            (bset, value), *bset_values = bset_values
            extra, extra_ids = _groups_ids(value)
            common = source_ids & extra_ids
            if common:
                raise InvalidFile(
                    '%s contains source(s) %s already present in %s' %
                    (value, common, rlz.value))
            src_groups.extend(extra)
        for src_group in src_groups:
            grp_id = full_lt.get_grp_id(src_group.trt, rlz.ordinal)
            sg = apply_uncertainties(bset_values, src_group)
            for src in sg:
                src.grp_id = grp_id
                if rlz.samples > 1:
                    src.samples = rlz.samples
            groups.append(sg)

        # check applyToSources
        sm_branch = rlz.lt_path[0]
        srcids = full_lt.source_model_lt.info.applytosources[sm_branch]
        for srcid in srcids:
            if srcid not in source_ids:
                raise ValueError(
                    "The source %s is not in the source model,"
                    " please fix applyToSources in %s or the "
                    "source model(s) %s" % (srcid, smlt_file,
                                            rlz.value.split()))
    return groups