コード例 #1
0
ファイル: imt_test.py プロジェクト: g-weatherill/oq-hazardlib
 def test_from_string(self):
     sa = imt_module.from_string('SA(0.1)')
     self.assertEqual(sa, ('SA', 0.1, 5.0))
     pga = imt_module.from_string('PGA')
     self.assertEqual(pga, ('PGA', None, None))
     with self.assertRaises(ValueError):
         imt_module.from_string('XXX')
コード例 #2
0
ファイル: imt_test.py プロジェクト: g-weatherill/oq-hazardlib
 def test_ordering_and_equality(self):
     a = imt_module.from_string('SA(0.1)')
     b = imt_module.from_string('SA(0.10)')
     c = imt_module.from_string('SA(0.2)')
     self.assertLess(a, c)
     self.assertGreater(c, a)
     self.assertEqual(a, b)
コード例 #3
0
ファイル: oqvalidation.py プロジェクト: amirj700/oq-risklib
    def set_risk_imtls(self, risk_models):
        """
        :param risk_models:
            a dictionary taxonomy -> loss_type -> risk_function

        Set the attribute risk_imtls.
        """
        # NB: different loss types may have different IMLs for the same IMT
        # in that case we merge the IMLs
        imtls = {}
        for taxonomy, risk_functions in risk_models.items():
            for loss_type, rf in risk_functions.items():
                imt = rf.imt
                from_string(imt)  # make sure it is a valid IMT
                imls = list(rf.imls)
                if imt in imtls and imtls[imt] != imls:
                    logging.debug(
                        'Different levels for IMT %s: got %s, expected %s',
                        imt, imls, imtls[imt])
                    imtls[imt] = sorted(set(imls + imtls[imt]))
                else:
                    imtls[imt] = imls
        self.risk_imtls = imtls

        if self.uniform_hazard_spectra:
            self.check_uniform_hazard_spectra()
コード例 #4
0
ファイル: core.py プロジェクト: luisera/oq-engine
def compute_gmfs(job_id, sites, rupture, gmf_id, realizations):
    """
    Compute ground motion fields and store them in the db.

    :param job_id:
        ID of the currently running job.
    :param sites:
        The subset of the full SiteCollection scanned by this task
    :param rupture:
        The hazardlib rupture from which we will generate
        ground motion fields.
    :param gmf_id:
        the id of a :class:`openquake.engine.db.models.Gmf` record
    :param realizations:
        Number of realizations to create.
    """
    hc = models.HazardCalculation.objects.get(oqjob=job_id)
    imts = [from_string(x) for x in hc.intensity_measure_types]
    gsim = AVAILABLE_GSIMS[hc.gsim]()  # instantiate the GSIM class
    correlation_model = haz_general.get_correl_model(hc)

    with EnginePerformanceMonitor('computing gmfs', job_id, gmfs):
        return ground_motion_fields(
            rupture, sites, imts, gsim,
            hc.truncation_level, realizations=realizations,
            correlation_model=correlation_model)
コード例 #5
0
def import_rows(job, ses_coll, gmf_coll, sorted_tags, rows):
    """
    Import a list of records into the gmf_data and hazard_site tables.

    :param job:
        :class:`openquake.engine.db.models.OqJob` instance
    :param gmf_coll:
        :class:`openquake.engine.db.models.Gmf` instance
    :param rows:
        a list of records (imt_type, sa_period, sa_damping, gmvs, wkt)
    """
    gmfs = []  # list of GmfData instance
    site_id = {}  # dictionary wkt -> site id
    rupture = fake_rupture()
    prob_rup_id, ses_rup_ids, seeds = create_db_ruptures(
        rupture, ses_coll, sorted_tags, seed=42)
    tag2id = dict(zip(sorted_tags, ses_rup_ids))

    for imt_str, tag, data in rows:
        imt = from_string(imt_str)
        rup_id = tag2id[tag]
        for wkt, gmv in data:
            if wkt not in site_id:  # create a new site
                site_id[wkt] = models.HazardSite.objects.create(
                    hazard_calculation=job, location=wkt).id
            gmfs.append(
                models.GmfData(
                    imt=imt[0], sa_period=imt[1], sa_damping=imt[2],
                    gmvs=[gmv], rupture_ids=[rup_id],
                    site_id=site_id[wkt], gmf=gmf_coll, task_no=0))
    writer.CacheInserter.saveall(gmfs)
コード例 #6
0
ファイル: core.py プロジェクト: ChristieHale/oq-engine
    def save_gmfs(self, rlzs_assoc):
        """
        Helper method to save the computed GMF data to the database.

        :param rlzs_assoc:
            a :class:`openquake.commonlib.source.RlzsAssoc` instance
        """
        samples = rlzs_assoc.csm_info.get_num_samples(self.trt_model_id)
        for gname, imt_str, site_id in self.gmvs_per_site:
            rlzs = rlzs_assoc[self.trt_model_id, gname]
            if samples > 1:
                # save only the data for the realization corresponding
                # to the current SESCollection
                rlzs = [rlz for rlz in rlzs if self.col_idx in rlz.col_ids]
            for rlz in rlzs:
                imt_name, sa_period, sa_damping = from_string(imt_str)
                inserter.add(models.GmfData(
                    gmf=models.Gmf.objects.get(lt_realization=rlz.id),
                    task_no=0,
                    imt=imt_name,
                    sa_period=sa_period,
                    sa_damping=sa_damping,
                    site_id=site_id,
                    gmvs=self.gmvs_per_site[gname, imt_str, site_id],
                    rupture_ids=self.ruptures_per_site[gname, imt_str, site_id]
                ))
        inserter.flush()
        self.gmvs_per_site.clear()
        self.ruptures_per_site.clear()
コード例 #7
0
def hazard_curves_per_trt(
        sources, sites, imtls, gsims, truncation_level=None,
        source_site_filter=filters.source_site_noop_filter,
        rupture_site_filter=filters.rupture_site_noop_filter,
        monitor=DummyMonitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns:
        A list of G arrays of size N, where N is the number of sites and
        G the number of gsims. Each array contains records with fields given
        by the intensity measure types; the size of each field is given by the
        number of levels in ``imtls``.
    """
    gnames = list(map(str, gsims))
    imt_dt = numpy.dtype([(imt, float, len(imtls[imt]))
                          for imt in sorted(imtls)])
    imts = {from_string(imt): imls for imt, imls in imtls.items()}
    curves = [numpy.ones(len(sites), imt_dt) for gname in gnames]
    sources_sites = ((source, sites) for source in sources)
    ctx_mon = monitor('making contexts', measuremem=False)
    rup_mon = monitor('getting ruptures', measuremem=False)
    pne_mon = monitor('computing poes', measuremem=False)
    monitor.calc_times = []  # pairs (src_id, delta_t)
    for source, s_sites in source_site_filter(sources_sites):
        t0 = time.time()
        try:
            with rup_mon:
                rupture_sites = list(rupture_site_filter(
                    (rupture, s_sites) for rupture in source.iter_ruptures()))
            for rupture, r_sites in rupture_sites:
                for i, gsim in enumerate(gsims):
                    with ctx_mon:
                        sctx, rctx, dctx = gsim.make_contexts(r_sites, rupture)
                    with pne_mon:
                        for imt in imts:
                            poes = gsim.get_poes(
                                sctx, rctx, dctx, imt, imts[imt],
                                truncation_level)
                            pno = rupture.get_probability_no_exceedance(poes)
                            expanded_pno = r_sites.expand(pno, placeholder=1)
                            curves[i][str(imt)] *= expanded_pno
        except Exception as err:
            etype, err, tb = sys.exc_info()
            msg = 'An error occurred with source id=%s. Error: %s'
            msg %= (source.source_id, str(err))
            raise_(etype, msg, tb)

        # we are attaching the calculation times to the monitor
        # so that oq-lite (and the engine) can store them
        monitor.calc_times.append((source.id, time.time() - t0))
        # NB: source.id is an integer; it should not be confused
        # with source.source_id, which is a string
    for i in range(len(gnames)):
        for imt in imtls:
            curves[i][imt] = 1. - curves[i][imt]
    return curves
コード例 #8
0
ファイル: general.py プロジェクト: serkansevilgen/oq-engine
    def save_hazard_curves(self):
        """
        Post-execution actions. At the moment, all we do is finalize the hazard
        curve results.
        """
        imtls = self.hc.intensity_measure_types_and_levels
        points = self.hc.points_to_compute()
        sorted_imts = sorted(imtls)
        curves_by_imt = dict((imt, []) for imt in sorted_imts)

        for rlz in self._get_realizations():
            # create a multi-imt curve
            multicurve = models.Output.objects.create_output(
                self.job, "hc-multi-imt-rlz-%s" % rlz.id,
                "hazard_curve_multi")
            models.HazardCurve.objects.create(
                output=multicurve, lt_realization=rlz,
                investigation_time=self.hc.investigation_time)

            with self.monitor('building curves per realization'):
                imt_curves = zip(
                    sorted_imts, models.build_curves(rlz, self.curves))
            for imt, curves in imt_curves:
                curves_by_imt[imt].append(curves)

                # create a new `HazardCurve` 'container' record for each
                # realization for each intensity measure type
                hc_im_type, sa_period, sa_damping = from_string(imt)

                # save output
                hco = models.Output.objects.create(
                    oq_job=self.job,
                    display_name="Hazard Curve rlz-%s-%s" % (rlz.id, imt),
                    output_type='hazard_curve',
                )

                # save hazard_curve
                haz_curve = models.HazardCurve.objects.create(
                    output=hco,
                    lt_realization=rlz,
                    investigation_time=self.hc.investigation_time,
                    imt=hc_im_type,
                    imls=imtls[imt],
                    sa_period=sa_period,
                    sa_damping=sa_damping,
                )

                # save hazard_curve_data
                logs.LOG.info('saving %d hazard curves for %s, imt=%s',
                              len(points), hco, imt)
                writer.CacheInserter.saveall([models.HazardCurveData(
                    hazard_curve=haz_curve,
                    poes=list(poes),
                    location='POINT(%s %s)' % (p.longitude, p.latitude),
                    weight=rlz.weight)
                    for p, poes in zip(points, curves)])

        self.curves = {}  # save memory for the post-processing phase
        if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
            self.curves_by_imt = curves_by_imt
コード例 #9
0
ファイル: oq_wgi.py プロジェクト: Weginger/hello-world
    def get_ground_motion_values(self):
        """
        Runs the GMPE calculations to retreive ground motion values
        :returns:
            Nested dictionary of valuesobs = np.log(context["Observations"][imtx])
                mean = context["Expected"][gmpe][imtx]["Mean"]
                total_stddev = context["Expected"][gmpe][imtx]["Total"]
            {'GMPE1': {'IM1': , 'IM2': },
             'GMPE2': {'IM1': , 'IM2': }}
        """
        gmvs = OrderedDict()
        for gmpe in self.gsims:
            gmvs.update([(gmpe.__class__.__name__, {})])
            for i_m in self.imts:
                gmvs[gmpe.__class__.__name__][i_m] = np.zeros(
                    [len(self.rctx), self.nsites], dtype=float)
                for iloc, rct in enumerate(self.rctx):
                    try:
                        means, _ = gmpe.get_mean_and_stddevs(
                            self.sctx,
                            rct,
                            self.dctx,
                            imt.from_string(i_m),
                            [self.stddevs])

                        gmvs[gmpe.__class__.__name__][i_m][iloc, :] = np.exp(means)
                    except KeyError:
                        gmvs[gmpe.__class__.__name__][i_m] = []
                        break
        self.gmvs = gmvs
コード例 #10
0
    def get_observations(self, context, component="Geometric"):
        """
        Get the obsered ground motions from the database
        """
        select_records = self.database.select_from_event_id(context["EventID"])
        observations = OrderedDict([(imtx, []) for imtx in self.imts])
        selection_string = "IMS/H/Spectra/Response/Acceleration/"
        for record in select_records:
            fle = h5py.File(record.datafile, "r")
            for imtx in self.imts:
                if imtx in SCALAR_IMTS:
                    if imtx == "PGA":
                        observations[imtx].append(
                            get_scalar(fle, imtx, component) / 981.0)
                    else:
                        observations[imtx].append(
                            get_scalar(fle, imtx, component))

                elif "SA(" in imtx:
                    target_period = imt.from_string(imtx).period
                    
                    spectrum = fle[selection_string + component 
                                   + "/damping_05"].value
                    periods = fle["IMS/H/Spectra/Response/Periods"].value
                    observations[imtx].append(get_interpolated_period(
                        target_period, periods, spectrum) / 981.0)
                else:
                    raise "IMT %s is unsupported!" % imtx
            fle.close()
        for imtx in self.imts:
            observations[imtx] = np.array(observations[imtx])
        context["Observations"] = observations
        context["Num. Sites"] = len(select_records)
        return context
コード例 #11
0
ファイル: kotha_2019.py プロジェクト: digitalsatori/oq-engine
    def __init__(self, sigma_mu_epsilon=0.0, c3=None):
        """
        Instantiate setting the sigma_mu_epsilon and c3 terms
        """
        super().__init__()
        if isinstance(c3, dict):
            # Inputing c3 as a dictionary sorted by the string representation
            # of the IMT
            c3in = {}
            for c3key in c3:
                c3in[from_string(c3key)] = {"c3": c3[c3key]}
            self.c3 = CoeffsTable(sa_damping=5, table=c3in)
        else:
            self.c3 = c3

        self.sigma_mu_epsilon = sigma_mu_epsilon
        if self.sigma_mu_epsilon:
            # Connect to hdf5 and load tables into memory
            self.retreive_sigma_mu_data()
        else:
            # No adjustments, so skip this step
            self.mags = None
            self.dists = None
            self.s_a = None
            self.pga = None
            self.pgv = None
            self.periods = None
コード例 #12
0
ファイル: core.py プロジェクト: scm20008/oq-engine
 def pre_execute(self):
     """
     Do pre-execution work. At the moment, this work entails:
     parsing and initializing sources, parsing and initializing the
     site model (if there is one), parsing vulnerability and
     exposure files, and generating logic tree realizations. (The
     latter piece basically defines the work to be done in the
     `execute` phase.)
     """
     self.parse_risk_models()
     self.initialize_sources()
     self.initialize_site_model()
     self.create_ruptures()
     n_imts = len(distinct(from_string(imt)
                           for imt in self.hc.intensity_measure_types))
     n_sites = len(self.hc.site_collection)
     n_gmf = self.hc.number_of_ground_motion_fields
     output_weight = n_sites * n_imts * n_gmf
     logs.LOG.info('Expected output size=%s', output_weight)
     models.JobInfo.objects.create(
         oq_job=self.job,
         num_sites=n_sites,
         num_realizations=1,
         num_imts=n_imts,
         num_levels=0,
         input_weight=0,
         output_weight=output_weight)
     self.check_limits(input_weight=0, output_weight=output_weight)
     return 0, output_weight
コード例 #13
0
 def get_ground_motion_values(self):
     """
     Runs the GMPE calculations to retreive ground motion values
     :returns:
         Nested dictionary of values
         {'GMPE1': {'IM1': , 'IM2': },
          'GMPE2': {'IM1': , 'IM2': }}
     """
     gmvs = OrderedDict()
     for gmpe in self.gsims:
         gmvs.update([(gmpe.__class__.__name__, {})])
         for i_m in self.imts:
             gmvs[gmpe.__class__.__name__][i_m] = np.zeros([len(self.rctx),
                                                            self.nsites],
                                                            dtype=float)
             for iloc, rct in enumerate(self.rctx):
                 try:
                     _, sigmas = gmpe.get_mean_and_stddevs(
                          self.sctx,
                          rct,
                          self.dctx,
                          imt.from_string(i_m),
                          [self.stddevs])
                     gmvs[gmpe.__class__.__name__][i_m][iloc, :] = sigmas[0]
                 except KeyError:
                     gmvs[gmpe.__class__.__name__][i_m] = []
                     break
     return gmvs
コード例 #14
0
ファイル: hazard.py プロジェクト: gem/oq-risklib
def export_hazard_curves_xml(key, dest, sitecol, curves_by_imt,
                             imtls, investigation_time):
    """
    Export the curves of the given realization into XML.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: site collection
    :param curves_by_imt: dictionary with the curves keyed by IMT
    :param imtls: dictionary with the intensity measure types and levels
    :param investigation_time: investigation time in years
    """
    mdata = []
    hcurves = []
    for imt_str, imls in sorted(imtls.items()):
        hcurves.append(
            [HazardCurve(site.location, poes)
             for site, poes in zip(sitecol, curves_by_imt[imt_str])])
        imt = from_string(imt_str)
        mdata.append({
            'quantile_value': None,
            'statistics': None,
            'smlt_path': '',
            'gsimlt_path': '',
            'investigation_time': investigation_time,
            'imt': imt[0],
            'sa_period': imt[1],
            'sa_damping': imt[2],
            'imls': imls,
        })
    writer = hazard_writers.MultiHazardCurveXMLWriter(dest, mdata)
    with floatformat('%12.8E'):
        writer.serialize(hcurves)
    return {dest: dest}
コード例 #15
0
ファイル: loop.py プロジェクト: mhearne-usgs/Correlation
def calculate_corr(dist_mat, voi, JB_cor_model, var, inc_sta_indices, intensity_factor = 0.9):
    #####
    # Calculates correlation model for distance matrix and voi
    # IN: dist_mat- reduced distance matrix
    #     voi- variable of interest
    #     JB_cor_model- correlation model from correlation in oq-hazardlib
    #OUT: Sig12, Sig11inv- partitions of correlation matrix
    #     R - Sqrt of sigma
    #####
    correlation_model = JB_cor_model._get_correlation_model(dist_mat, from_string(voi))
    
    intensity = var['intensity'][inc_sta_indices]
    if np.size(intensity) != 0:
        for i in range(0,np.size(intensity)):
            if intensity[i] == 1:
                correlation_model[i,i+1:] = correlation_model[i,i+1:].copy()*intensity_factor
                correlation_model[i+1:,i] = correlation_model[i+1:,i].copy()*intensity_factor

    Sig11 = np.mat(correlation_model[0:-1, 0:-1])
    Sig12 = np.mat(correlation_model[0:-1, -1]).T
    Sig22 = np.mat(correlation_model[-1,-1])

    Sig11inv = np.mat(np.linalg.pinv(Sig11))
    sigma = Sig22 - (Sig12.T*Sig11inv*Sig12)
    R = np.sqrt(sigma)
    
    return {'Sig12':Sig12, 'Sig11inv':Sig11inv, 'R':R}
コード例 #16
0
ファイル: hazard.py プロジェクト: gem/oq-risklib
def export_disagg_xml(ekey, dstore):
    oq = dstore['oqparam']
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    group = dstore['disagg']
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    for key in group:
        matrix = pickle.loads(group[key].value)
        attrs = group[key].attrs
        rlz = rlzs[attrs['rlzi']]
        poe = attrs['poe']
        iml = attrs['iml']
        imt, sa_period, sa_damping = from_string(attrs['imt'])
        fname = dstore.export_path(key + '.xml')
        lon, lat = attrs['location']
        # TODO: add poe=poe below
        writer = writercls(
            fname, investigation_time=oq.investigation_time,
            imt=imt, smlt_path='_'.join(rlz.sm_lt_path),
            gsimlt_path=rlz.gsim_rlz.uid, lon=lon, lat=lat,
            sa_period=sa_period, sa_damping=sa_damping,
            mag_bin_edges=attrs['mag_bin_edges'],
            dist_bin_edges=attrs['dist_bin_edges'],
            lon_bin_edges=attrs['lon_bin_edges'],
            lat_bin_edges=attrs['lat_bin_edges'],
            eps_bin_edges=attrs['eps_bin_edges'],
            tectonic_region_types=attrs['trts'],
        )
        data = [DisaggMatrix(poe, iml, dim_labels, matrix[i])
                for i, dim_labels in enumerate(disagg.pmf_map)]
        writer.serialize(data)
        fnames.append(fname)
    return sorted(fnames)
コード例 #17
0
ファイル: valid.py プロジェクト: gem/oq-engine
def intensity_measure_types(value):
    """
    :param value: input string
    :returns: non-empty list of Intensity Measure Type objects

    >>> intensity_measure_types('PGA')
    ['PGA']
    >>> intensity_measure_types('PGA, SA(1.00)')
    ['PGA', 'SA(1.0)']
    >>> intensity_measure_types('SA(0.1), SA(0.10)')
    Traceback (most recent call last):
      ...
    ValueError: Duplicated IMTs in SA(0.1), SA(0.10)
    >>> intensity_measure_types('SA(1), PGA')
    Traceback (most recent call last):
    ...
    ValueError: The IMTs are not sorted by period: SA(1), PGA
    """
    imts = []
    for chunk in value.split(','):
        imts.append(imt.from_string(chunk.strip()))
    sorted_imts = sorted(imts, key=lambda im: getattr(im, 'period', 1))
    if len(distinct(imts)) < len(imts):
        raise ValueError('Duplicated IMTs in %s' % value)
    if sorted_imts != imts:
        raise ValueError('The IMTs are not sorted by period: %s' % value)
    return [str(imt) for imt in imts]
コード例 #18
0
ファイル: hazard.py プロジェクト: digitalsatori/oq-engine
def export_disagg_xml(ekey, dstore):
    oq = dstore['oqparam']
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    group = dstore['disagg']
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    trts = dstore.get_attr('csm_info', 'trts')
    for key in group:
        matrix = dstore['disagg/' + key]
        attrs = group[key].attrs
        rlz = rlzs[attrs['rlzi']]
        poe_agg = attrs['poe_agg']
        iml = attrs['iml']
        imt = from_string(attrs['imt'])
        fname = dstore.export_path(key + '.xml')
        lon, lat = attrs['location']
        writer = writercls(
            fname, investigation_time=oq.investigation_time,
            imt=imt.name, smlt_path='_'.join(rlz.sm_lt_path),
            gsimlt_path=rlz.gsim_rlz.uid, lon=lon, lat=lat,
            sa_period=getattr(imt, 'period', None) or None,
            sa_damping=getattr(imt, 'damping', None),
            mag_bin_edges=attrs['mag_bin_edges'],
            dist_bin_edges=attrs['dist_bin_edges'],
            lon_bin_edges=attrs['lon_bin_edges'],
            lat_bin_edges=attrs['lat_bin_edges'],
            eps_bin_edges=attrs['eps_bin_edges'],
            tectonic_region_types=trts)
        data = []
        for poe, k in zip(poe_agg, oq.disagg_outputs or disagg.pmf_map):
            data.append(DisaggMatrix(poe, iml, k.split('_'), matrix[k]))
        writer.serialize(data)
        fnames.append(fname)
    return sorted(fnames)
コード例 #19
0
ファイル: oqvalidation.py プロジェクト: gem/oq-engine
    def check_gsims(self, gsims):
        """
        :param gsims: a sequence of GSIM instances
        """
        imts = set(from_string(imt).name for imt in self.imtls)
        for gsim in gsims:
            restrict_imts = gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES
            if restrict_imts:
                names = set(cls.__name__ for cls in restrict_imts)
                invalid_imts = ', '.join(imts - names)
                if invalid_imts:
                    raise ValueError(
                        'The IMT %s is not accepted by the GSIM %s' %
                        (invalid_imts, gsim))

            if 'site_model' not in self.inputs:
                # look at the required sites parameters: they must have
                # a valid value; the other parameters can keep a NaN
                # value since they are not used by the calculator
                for param in gsim.REQUIRES_SITES_PARAMETERS:
                    if param in ('lon', 'lat'):  # no check
                        continue
                    param_name = self.siteparam[param]
                    param_value = getattr(self, param_name)
                    if (isinstance(param_value, float) and
                            numpy.isnan(param_value)):
                        raise ValueError(
                            'Please set a value for %r, this is required by '
                            'the GSIM %s' % (param_name, gsim))
コード例 #20
0
def hazard_curves(
    sources,
    sites,
    imtls,
    gsim_by_trt,
    truncation_level=None,
    source_site_filter=filters.source_site_noop_filter,
    rupture_site_filter=filters.rupture_site_noop_filter,
):
    """
    Deprecated. It does the same job of
    :func:`openquake.hazardlib.calc.hazard_curve.calc_hazard_curves`,
    with the only difference that the intensity measure types in input
    and output are hazardlib objects instead of simple strings.
    """
    imtls = {str(imt): imls for imt, imls in imtls.items()}
    curves_by_imt = calc_hazard_curves(
        sources,
        sites,
        imtls,
        gsim_by_trt,
        truncation_level,
        source_site_filter=filters.source_site_noop_filter,
        rupture_site_filter=filters.rupture_site_noop_filter,
    )
    return {from_string(imt): curves_by_imt[imt] for imt in imtls}
コード例 #21
0
ファイル: hazard_getters.py プロジェクト: julgp/oq-engine
    def _get_data(self, ho):
        # extract the poes for each site from the given hazard output
        imt_type, sa_period, sa_damping = from_string(self.imt)
        oc = ho.output_container
        if oc.output.output_type == 'hazard_curve':
            imls = oc.imls
        elif oc.output.output_type == 'hazard_curve_multi':
            oc = models.HazardCurve.objects.get(
                output__oq_job=oc.output.oq_job,
                output__output_type='hazard_curve',
                statistics=oc.statistics,
                lt_realization=oc.lt_realization,
                imt=imt_type,
                sa_period=sa_period,
                sa_damping=sa_damping)
            imls = oc.imls

        cursor = models.getcursor('job_init')
        query = """\
        SELECT hzrdr.hazard_curve_data.poes
        FROM hzrdr.hazard_curve_data
        WHERE hazard_curve_id = %s AND location = %s
        """
        all_curves = []
        for site_id in self.site_ids:
            location = models.HazardSite.objects.get(pk=site_id).location
            cursor.execute(query, (oc.id, 'SRID=4326; ' + location.wkt))
            poes = cursor.fetchall()[0][0]
            all_curves.append(zip(imls, poes))
        return all_curves
コード例 #22
0
def export_disagg_csv(ekey, dstore):
    oq = dstore['oqparam']
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    group = dstore['disagg']
    fnames = []
    for key in group:
        matrix = dstore['disagg/' + key]
        attrs = group[key].attrs
        rlz = rlzs[attrs['rlzi']]
        poe = attrs['poe']
        iml = attrs['iml']
        imt, sa_period, sa_damping = from_string(attrs['imt'])
        lon, lat = attrs['location']
        metadata = collections.OrderedDict()
        # Loads "disaggMatrices" nodes
        metadata['smlt_path'] = '_'.join(rlz.sm_lt_path)
        metadata['gsimlt_path'] = rlz.gsim_rlz.uid
        metadata['imt'] = imt
        metadata['investigation_time'] = oq.investigation_time
        metadata['lon'] = lon
        metadata['lat'] = lat
        metadata['Mag'] = attrs['mag_bin_edges']
        metadata['Dist'] = attrs['dist_bin_edges']
        metadata['Lon'] = attrs['lon_bin_edges']
        metadata['Lat'] = attrs['lat_bin_edges']
        metadata['Eps'] = attrs['eps_bin_edges']
        metadata['TRT'] = attrs['trts']
        data = {}
        for dim_labels in disagg.pmf_map:
            label = '_'.join(dim_labels)
            fname = dstore.export_path(key + '_%s.csv' % label)
            data[dim_labels] = poe, iml, matrix[label].value, fname
            fnames.append(fname)
        save_disagg_to_csv(metadata, data)
    return fnames
コード例 #23
0
ファイル: hazard.py プロジェクト: digitalsatori/oq-engine
def export_hcurves_xml(ekey, dstore):
    key, kind, fmt = get_kkf(ekey)
    len_ext = len(fmt) + 1
    oq = dstore['oqparam']
    sitemesh = get_mesh(dstore['sitecol'])
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    R = len(rlzs_assoc.realizations)
    fnames = []
    writercls = hazard_writers.HazardCurveXMLWriter
    for kind in oq.get_kinds(kind, R):
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        name = hazard_curve_name(dstore, ekey, kind)
        hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind]
        for im in oq.imtls:
            slc = oq.imtls(im)
            imt = from_string(im)
            fname = name[:-len_ext] + '-' + im + '.' + fmt
            data = [HazardCurve(Location(site), poes[slc])
                    for site, poes in zip(sitemesh, hcurves)]
            writer = writercls(fname,
                               investigation_time=oq.investigation_time,
                               imls=oq.imtls[im], imt=imt.name,
                               sa_period=getattr(imt, 'period', None) or None,
                               sa_damping=getattr(imt, 'damping', None),
                               smlt_path=smlt_path, gsimlt_path=gsimlt_path)
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
コード例 #24
0
def export_hcurves_xml_json(ekey, dstore):
    export_type = ekey[1]
    len_ext = len(export_type) + 1
    oq = dstore['oqparam']
    sitemesh = get_mesh(dstore['sitecol'])
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    hcurves = dstore[ekey[0]]
    fnames = []
    writercls = (hazard_writers.HazardCurveGeoJSONWriter
                 if export_type == 'geojson' else
                 hazard_writers.HazardCurveXMLWriter)
    for kind in hcurves:
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        curves = dstore[ekey[0] + '/' + kind].convert(oq.imtls, len(sitemesh))
        name = hazard_curve_name(dstore, ekey, kind, rlzs_assoc)
        for imt in oq.imtls:
            imtype, sa_period, sa_damping = from_string(imt)
            fname = name[:-len_ext] + '-' + imt + '.' + export_type
            data = [HazardCurve(Location(site), poes[imt])
                    for site, poes in zip(sitemesh, curves)]
            writer = writercls(fname,
                               investigation_time=oq.investigation_time,
                               imls=oq.imtls[imt], imt=imtype,
                               sa_period=sa_period, sa_damping=sa_damping,
                               smlt_path=smlt_path, gsimlt_path=gsimlt_path)
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
コード例 #25
0
ファイル: gmf.py プロジェクト: digitalsatori/oq-engine
 def __init__(self, rupture, sitecol, imts, cmaker,
              truncation_level=None, correlation_model=None):
     if len(sitecol) == 0:
         raise ValueError('No sites')
     elif len(imts) == 0:
         raise ValueError('No IMTs')
     elif len(cmaker.gsims) == 0:
         raise ValueError('No GSIMs')
     self.rupture = rupture
     self.imts = [from_string(imt) for imt in imts]
     self.gsims = sorted(cmaker.gsims)
     self.truncation_level = truncation_level
     self.correlation_model = correlation_model
     # `rupture` can be an EBRupture instance
     if hasattr(rupture, 'srcidx'):
         self.srcidx = rupture.srcidx  # the source the rupture comes from
         rupture = rupture.rupture  # the underlying rupture
     else:
         self.srcidx = '?'
     try:
         self.sctx, self.dctx = rupture.sctx, rupture.dctx
     except AttributeError:
         self.sctx, self.dctx = cmaker.make_contexts(sitecol, rupture)
     self.sids = self.sctx.sids
     if correlation_model:  # store the filtered sitecol
         self.sites = sitecol.complete.filtered(self.sids)
コード例 #26
0
    def __init__(self, gmpe_name, branch="central",
                 homoskedastic_sigma=False,  scaling_factor=None,
                 vskappa=None, phi_ds2s=True):
        super().__init__(gmpe_name=gmpe_name)
        self.gmpe = registry[gmpe_name]()
        # Update the required_parameters
        for name in uppernames:
            setattr(self, name,
                    frozenset(getattr(self, name) | getattr(self.gmpe, name)))

        # If the scaling factor take the natural log
        if scaling_factor:
            self.scaling_factor = np.log(scaling_factor)
        else:
            self.scaling_factor = None

        # If vs-kappa is passed as a dictionary then transform to CoeffsTable
        if isinstance(vskappa, dict):
            in_vskappa = {}
            for key in vskappa:
                in_vskappa[from_string(key)] = {"vskappa":
                                                np.log(vskappa[key])}
            self.vskappa = CoeffsTable(sa_damping=5, table=in_vskappa)
        else:
            self.vskappa = None
        self.branch = branch
        self.homoskedastic_sigma = homoskedastic_sigma
        self.phi_ds2s = phi_ds2s
コード例 #27
0
ファイル: core.py プロジェクト: MohsenKohrangi/oq-engine
def save_disagg_result(job_id, site_id, bin_edges, trt_names, matrix,
                       rlz_id, investigation_time, imt_str, iml, poe):
    """
    Save a computed disaggregation matrix to `hzrdr.disagg_result` (see
    :class:`~openquake.engine.db.models.DisaggResult`).

    :param int job_id:
        id of the current job.
    :param int site_id:
        id of the current site
    :param bin_edges:
        The 5-uple mag, dist, lon, lat, eps
    :param trt_names:
        The list of Tectonic Region Types
    :param matrix:
        A probability array
    :param rlz:
        :class:`openquake.engine.db.models.LtRealization` to which these
        results belong.
    :param float investigation_time:
        Investigation time (years) for the calculation.
    :param imt_str:
        Intensity measure type (PGA, SA, etc.)
    :param float iml:
        Intensity measure level interpolated (using ``poe``) from the hazard
        curve at the ``site``.
    :param float poe:
        Disaggregation probability of exceedance value for this result.
    """
    job = models.OqJob.objects.get(id=job_id)

    site_wkt = models.HazardSite.objects.get(pk=site_id).location.wkt

    disp_name = _DISAGG_RES_NAME_FMT % dict(
        poe=poe, rlz=rlz_id, imt=imt_str, wkt=site_wkt)

    output = models.Output.objects.create_output(
        job, disp_name, 'disagg_matrix')

    imt, sa_period, sa_damping = from_string(imt_str)
    mag, dist, lon, lat, eps = bin_edges
    models.DisaggResult.objects.create(
        output=output,
        lt_realization_id=rlz_id,
        investigation_time=investigation_time,
        imt=imt,
        sa_period=sa_period,
        sa_damping=sa_damping,
        iml=iml,
        poe=poe,
        mag_bin_edges=mag,
        dist_bin_edges=dist,
        lon_bin_edges=lon,
        lat_bin_edges=lat,
        eps_bin_edges=eps,
        trts=trt_names,
        location=site_wkt,
        matrix=matrix,
    )
コード例 #28
0
ファイル: disagg.py プロジェクト: gem/oq-hazardlib
def _collect_bins_data(trt_num, sources, site, curves, src_group_id,
                       rlzs_assoc, gsims, imtls, poes, truncation_level,
                       n_epsilons, iml_disagg, mon):
    # returns a BinData instance
    sitecol = SiteCollection([site])
    mags = []
    dists = []
    lons = []
    lats = []
    trts = []
    pnes = collections.defaultdict(list)
    sitemesh = sitecol.mesh
    make_ctxt = mon('making contexts', measuremem=False)
    disagg_poe = mon('disaggregate_poe', measuremem=False)
    cmaker = ContextMaker(gsims)
    for source in sources:
        try:
            tect_reg = trt_num[source.tectonic_region_type]
            for rupture in source.iter_ruptures():
                with make_ctxt:
                    try:
                        sctx, rctx, dctx = cmaker.make_contexts(
                            sitecol, rupture)
                    except filters.FarAwayRupture:
                        continue
                # extract rupture parameters of interest
                mags.append(rupture.mag)
                dists.append(dctx.rjb[0])  # single site => single distance
                [closest_point] = rupture.surface.get_closest_points(sitemesh)
                lons.append(closest_point.longitude)
                lats.append(closest_point.latitude)
                trts.append(tect_reg)
                # a dictionary rlz.id, poe, imt_str -> (iml, prob_no_exceed)
                for gsim in gsims:
                    gs = str(gsim)
                    for imt_str, imls in imtls.items():
                        imt = from_string(imt_str)
                        imls = numpy.array(imls[::-1])
                        for rlz in rlzs_assoc[src_group_id, gs]:
                            rlzi = rlz.ordinal
                            iml = iml_disagg.get(imt_str)
                            curve_poes = curves[rlzi, imt_str][::-1]
                            for k, v in _disagg(
                                    iml, poes, curve_poes, imls, gsim, rupture,
                                    rlzi, imt, imt_str, sctx, rctx, dctx,
                                    truncation_level, n_epsilons, disagg_poe):
                                pnes[k].append(v)
        except Exception as err:
            etype, err, tb = sys.exc_info()
            msg = 'An error occurred with source id=%s. Error: %s'
            msg %= (source.source_id, err)
            raise_(etype, msg, tb)

    return BinData(numpy.array(mags, float),
                   numpy.array(dists, float),
                   numpy.array(lons, float),
                   numpy.array(lats, float),
                   numpy.array(trts, int),
                   pnes)
コード例 #29
0
def gmf_to_hazard_curve_arg_gen(job):
    """
    Generate a sequence of args for the GMF to hazard curve post-processing job
    for a given ``job``. These are task args.

    Yielded arguments are as follows:

    * job ID
    * point geometry
    * logic tree realization ID
    * IMT
    * IMLs
    * hazard curve "collection" ID
    * investigation time
    * duration
    * SA period
    * SA damping

    See :func:`gmf_to_hazard_curve_task` for more information about these
    arguments.

    As a side effect, :class:`openquake.engine.db.models.HazardCurve`
    records are
    created for each :class:`openquake.engine.db.models.LtRealization` and IMT.

    :param job:
        :class:`openquake.engine.db.models.OqJob` instance.
    """
    hc = job.hazard_calculation
    sites = models.HazardSite.objects.filter(hazard_calculation=hc)

    lt_realizations = models.LtRealization.objects.filter(
        lt_model__hazard_calculation=hc.id)

    invest_time = hc.investigation_time
    duration = hc.ses_per_logic_tree_path * invest_time

    for raw_imt, imls in hc.intensity_measure_types_and_levels.iteritems():
        imt, sa_period, sa_damping = from_string(raw_imt)

        for lt_rlz in lt_realizations:
            hc_output = models.Output.objects.create_output(
                job,
                HAZ_CURVE_DISP_NAME_FMT % dict(imt=raw_imt, rlz=lt_rlz.id),
                'hazard_curve')

            # Create the hazard curve "collection":
            hc_coll = models.HazardCurve.objects.create(
                output=hc_output,
                lt_realization=lt_rlz,
                investigation_time=invest_time,
                imt=imt,
                imls=imls,
                sa_period=sa_period,
                sa_damping=sa_damping)

            for site in sites:
                yield (job.id, site, lt_rlz.id, imt, imls, hc_coll.id,
                       invest_time, duration, sa_period, sa_damping)
コード例 #30
0
ファイル: core.py プロジェクト: monellid/oq-engine
def gmfs(job_id, ses_ruptures, sitecol, gmf_id):
    """
    :param int job_id: the current job ID
    :param ses_ruptures: a set of `SESRupture` instances
    :param sitecol: a `SiteCollection` instance
    :param int gmf_id: the ID of a `Gmf` instance
    """
    job = models.OqJob.objects.get(pk=job_id)
    hc = job.hazard_calculation
    # distinct is here to make sure that IMTs such as
    # SA(0.8) and SA(0.80) are considered the same
    imts = distinct(from_string(x) for x in sorted(hc.intensity_measure_types))
    gsim = AVAILABLE_GSIMS[hc.gsim]()  # instantiate the GSIM class
    correlation_model = models.get_correl_model(job)

    cache = collections.defaultdict(list)  # {site_id, imt -> gmvs}
    inserter = writer.CacheInserter(models.GmfData, 1000)
    # insert GmfData in blocks of 1000 sites

    # NB: ses_ruptures a non-empty list produced by the block_splitter
    rupture = ses_ruptures[0].rupture  # ProbabilisticRupture instance
    with EnginePerformanceMonitor('computing gmfs', job_id, gmfs):
        gmf = GmfComputer(rupture, sitecol, imts, [gsim], hc.truncation_level,
                          correlation_model)
        gname = gsim.__class__.__name__
        for ses_rup in ses_ruptures:
            for (gname, imt), gmvs in gmf.compute(ses_rup.seed):
                for site_id, gmv in zip(sitecol.sids, gmvs):
                    # float may be needed below to convert 1x1 matrices
                    cache[site_id, imt].append((gmv, ses_rup.id))

    with EnginePerformanceMonitor('saving gmfs', job_id, gmfs):
        for (site_id, imt_str), data in cache.iteritems():
            imt = from_string(imt_str)
            gmvs, rup_ids = zip(*data)
            inserter.add(
                models.GmfData(
                    gmf_id=gmf_id,
                    task_no=0,
                    imt=imt[0],
                    sa_period=imt[1],
                    sa_damping=imt[2],
                    site_id=site_id,
                    rupture_ids=rup_ids,
                    gmvs=gmvs))
        inserter.flush()
コード例 #31
0
ファイル: gmf.py プロジェクト: larsbutler/oq-hazardlib
def ground_motion_fields(rupture,
                         sites,
                         imts,
                         gsim,
                         truncation_level,
                         realizations,
                         correlation_model=None,
                         rupture_site_filter=filters.rupture_site_noop_filter,
                         seed=None):
    """
    Given an earthquake rupture, the ground motion field calculator computes
    ground shaking over a set of sites, by randomly sampling a ground shaking
    intensity model. A ground motion field represents a possible 'realization'
    of the ground shaking due to an earthquake rupture. If a non-trivial
    filtering function is passed, the final result is expanded and filled
    with zeros in the places corresponding to the filtered out sites.

    .. note::

     This calculator is using random numbers. In order to reproduce the
     same results numpy random numbers generator needs to be seeded, see
     http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html

    :param openquake.hazardlib.source.rupture.Rupture rupture:
        Rupture to calculate ground motion fields radiated from.
    :param openquake.hazardlib.site.SiteCollection sites:
        Sites of interest to calculate GMFs.
    :param imts:
        List of intensity measure type objects (see
        :mod:`openquake.hazardlib.imt`).
    :param gsim:
        Ground-shaking intensity model, instance of subclass of either
        :class:`~openquake.hazardlib.gsim.base.GMPE` or
        :class:`~openquake.hazardlib.gsim.base.IPE`.
    :param truncation_level:
        Float, number of standard deviations for truncation of the intensity
        distribution, or ``None``.
    :param realizations:
        Integer number of GMF realizations to compute.
    :param correlation_model:
        Instance of correlation model object. See
        :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which case
        non-correlated ground motion fields are calculated. Correlation model
        is not used if ``truncation_level`` is zero.
    :param rupture_site_filter:
        Optional rupture-site filter function. See
        :mod:`openquake.hazardlib.calc.filters`.
    :param int seed:
        The seed used in the numpy random number generator
    :returns:
        Dictionary mapping intensity measure type objects (same
        as in parameter ``imts``) to 2d numpy arrays of floats,
        representing different realizations of ground shaking intensity
        for all sites in the collection. First dimension represents
        sites and second one is for realizations.
    """
    ruptures_sites = list(rupture_site_filter([(rupture, sites)]))
    if not ruptures_sites:
        return dict(
            (imt, numpy.zeros((len(sites), realizations))) for imt in imts)
    [(rupture, sites)] = ruptures_sites

    gc = GmfComputer(rupture, sites, map(str, imts), [gsim], truncation_level,
                     correlation_model)
    result = gc._compute(seed, gsim, realizations)
    for imt, gmf in result.iteritems():
        # makes sure the lenght of the arrays in output is the same as sites
        if rupture_site_filter is not filters.rupture_site_noop_filter:
            result[imt] = sites.expand(gmf, placeholder=0)

    return {from_string(imt): result[imt] for imt in result}
コード例 #32
0
def hazard_curves_per_trt(sources,
                          sites,
                          imtls,
                          gsims,
                          truncation_level=None,
                          source_site_filter=filters.source_site_noop_filter,
                          rupture_site_filter=filters.rupture_site_noop_filter,
                          monitor=DummyMonitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns:
        A list of G arrays of size N, where N is the number of sites and
        G the number of gsims. Each array contains records with fields given
        by the intensity measure types; the size of each field is given by the
        number of levels in ``imtls``.
    """
    gnames = list(map(str, gsims))
    imt_dt = numpy.dtype([(imt, float, len(imtls[imt]))
                          for imt in sorted(imtls)])
    imts = {from_string(imt): imls for imt, imls in imtls.items()}
    curves = [numpy.ones(len(sites), imt_dt) for gname in gnames]
    sources_sites = ((source, sites) for source in sources)
    ctx_mon = monitor('making contexts', measuremem=False)
    rup_mon = monitor('getting ruptures', measuremem=False)
    pne_mon = monitor('computing poes', measuremem=False)
    monitor.calc_times = []  # pairs (src_id, delta_t)
    for source, s_sites in source_site_filter(sources_sites):
        t0 = time.time()
        try:
            with rup_mon:
                rupture_sites = list(
                    rupture_site_filter((rupture, s_sites)
                                        for rupture in source.iter_ruptures()))
            for rupture, r_sites in rupture_sites:
                for i, gsim in enumerate(gsims):
                    with ctx_mon:
                        sctx, rctx, dctx = gsim.make_contexts(r_sites, rupture)
                    with pne_mon:
                        for imt in imts:
                            poes = gsim.get_poes(sctx, rctx, dctx, imt,
                                                 imts[imt], truncation_level)
                            pno = rupture.get_probability_no_exceedance(poes)
                            expanded_pno = r_sites.expand(pno, placeholder=1)
                            curves[i][str(imt)] *= expanded_pno
        except Exception as err:
            etype, err, tb = sys.exc_info()
            msg = 'An error occurred with source id=%s. Error: %s'
            msg %= (source.source_id, str(err))
            raise_(etype, msg, tb)

        # we are attaching the calculation times to the monitor
        # so that oq-lite (and the engine) can store them
        monitor.calc_times.append((source.id, time.time() - t0))
        # NB: source.id is an integer; it should not be confused
        # with source.source_id, which is a string
    for i in range(len(gnames)):
        for imt in imtls:
            curves[i][imt] = 1. - curves[i][imt]
    return curves
コード例 #33
0
    def execute(self):
        """
        Raises:
            NotADirectoryError: When the event data directory does not exist.
            FileNotFoundError: When the the shake_result HDF file does not
                exist.
        """
        install_path, data_path = get_config_paths()
        datadir = os.path.join(data_path, self._eventid, 'current', 'products')
        if not os.path.isdir(datadir):
            raise NotADirectoryError('%s is not a valid directory.' % datadir)
        datafile = os.path.join(datadir, 'shake_result.hdf')
        if not os.path.isfile(datafile):
            raise FileNotFoundError('%s does not exist.' % datafile)

        # Open the ShakeMapOutputContainer and extract the data
        oc = ShakeMapOutputContainer.load(datafile)
        if oc.getDataType() != 'grid':
            raise NotImplementedError('plotregr module can only operate on '
                                      'gridded data not sets of points')

        # get the path to the products.conf file, load the config
        config_file = os.path.join(install_path, 'config', 'products.conf')
        spec_file = get_configspec('products')
        validator = get_custom_validator()
        config = ConfigObj(config_file, configspec=spec_file)
        results = config.validate(validator)
        if not isinstance(results, bool) or not results:
            config_error(config, results)

        # If mapping runs in parallel, then we want this module too, as well.
        # Otherwise we get weird errors from matplotlib
        max_workers = config['products']['mapping']['max_workers']

        #
        # Cheating here a bit by assuming that the IMTs are the same
        # as the regression IMTs
        #
        rockgrid = {}
        soilgrid = {}
        rocksd = {}
        soilsd = {}
        imtlist = oc.getIMTs('GREATER_OF_TWO_HORIZONTAL')
        for myimt in imtlist:
            rockgrid[myimt], _ = oc.getArray(['attenuation', 'rock', myimt],
                                             'mean')
            soilgrid[myimt], _ = oc.getArray(['attenuation', 'soil', myimt],
                                             'mean')
            rocksd[myimt], _ = oc.getArray(['attenuation', 'rock', myimt],
                                           'std')
            soilsd[myimt], _ = oc.getArray(['attenuation', 'soil', myimt],
                                           'std')
        distances, _ = oc.getArray(['attenuation', 'distances'], 'rrup')

        stations = oc.getStationDict()

        #
        # Make plots
        #
        alist = []
        for myimt in imtlist:
            a = {
                'myimt': myimt,
                'rockgrid': rockgrid,
                'soilgrid': soilgrid,
                'rocksd': rocksd,
                'soilsd': soilsd,
                'stations': stations,
                'distances': distances,
                'eventid': self._eventid,
                'datadir': datadir
            }
            alist.append(a)
            if myimt == 'MMI':
                self.contents.addFile(
                    'miRegr', 'Intensity Regression',
                    'Regression plot of macroseismic '
                    'intensity.', 'mmi_regr.png', 'image/png')
            elif myimt == 'PGA':
                self.contents.addFile(
                    'pgaRegr', 'PGA Regression', 'Regression plot of peak '
                    'ground acceleration (%g).', 'pga_regr.png', 'image/png')
            elif myimt == 'PGV':
                self.contents.addFile(
                    'pgvRegr', 'PGV Regression',
                    'Regression plot of peak ground '
                    'velocity (cm/s).', 'pgv_regr.png', 'image/png')
            else:
                oqimt = imt.from_string(myimt)
                period = str(oqimt.period)
                filebase = oq_to_file(myimt)
                psacap = 'Regression plot of ' + period + ' sec 5% damped ' \
                         'pseudo-spectral acceleration(%g).'
                self.contents.addFile(filebase + 'Regr',
                                      'PSA ' + period + ' sec Regression',
                                      psacap, filebase + '_regr.png',
                                      'image/png')

        if max_workers > 0:
            with cf.ProcessPoolExecutor(max_workers=max_workers) as ex:
                results = ex.map(make_plots, alist)
                list(results)
        else:
            for adict in alist:
                make_plots(adict)

        #
        # Make attenuation_curves.json
        #
        jdict = {'eventid': self._eventid}
        jdict['gmpe'] = {}
        for site in ['soil', 'rock']:
            jdict['gmpe'][site] = {}
            for myimt in imtlist:
                jdict['gmpe'][site][myimt] = {}
                jdict['gmpe'][site][myimt]['mean'] = oc.getArray(
                    ['attenuation', site, myimt], 'mean')[0].tolist()
                jdict['gmpe'][site][myimt]['stddev'] = oc.getArray(
                    ['attenuation', site, myimt], 'std')[0].tolist()
        jdict['distances'] = {}
        for dtype in ['repi', 'rhypo', 'rjb', 'rrup']:
            jdict['distances'][dtype] = oc.getArray(
                ['attenuation', 'distances'], dtype)[0].tolist()
        jdict['mean_bias'] = {}
        info = oc.getMetadata()
        for myimt in imtlist:
            jdict['mean_bias'][myimt] = info['output']['ground_motions'][
                myimt]['bias']
        jstring = json.dumps(jdict, allow_nan=False)
        jfile = os.path.join(datadir, 'attenuation_curves.json')
        f = open(jfile, 'wt')
        f.write(jstring)
        f.close()
        oc.close()
        cap = "Nominal attenuation curves"
        self.contents.addFile('attenuationCurves', 'Attenuation Curves', cap,
                              'attenuation_curves.json', 'application/json')
コード例 #34
0
def signal_end(st,
               event_time,
               event_lon,
               event_lat,
               event_mag,
               method=None,
               vmin=None,
               floor=None,
               model=None,
               epsilon=2.0):
    """
    Estimate end of signal by using a model of the 5-95% significant
    duration, and adding this value to the "signal_split" time. This probably
    only works well when the split is estimated with a p-wave picker since
    the velocity method often ends up with split times that are well before
    signal actually starts.

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_mag (float):
            Event magnitude.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for estimating signal end time. Either 'velocity'
            or 'model'.
        vmin (float):
            Velocity (km/s) for estimating end of signal. Only used if
            method="velocity".
        floor (float):
            Minimum duration (sec) applied along with vmin.
        model (str):
            Short name of duration model to use. Must be defined in the
            gmprocess/data/modules.yml file.
        epsilon (float):
            Number of standard deviations; if epsilon is 1.0, then the signal
            window duration is the mean Ds + 1 standard deviation. Only used
            for method="model".

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_end'] dictionary.

    """
    # Load openquake stuff if method="model"
    if method == "model":
        mod_file = pkg_resources.resource_filename(
            'gmprocess', os.path.join('data', 'modules.yml'))
        with open(mod_file, 'r') as f:
            mods = yaml.load(f)

        # Import module
        cname, mpath = mods['modules'][model]
        dmodel = getattr(import_module(mpath), cname)()

        # Set some "conservative" inputs (in that they will tend to give
        # larger durations).
        sctx = SitesContext()
        sctx.vs30 = np.array([180.0])
        sctx.z1pt0 = np.array([0.51])
        rctx = RuptureContext()
        rctx.mag = event_mag
        rctx.rake = -90.0
        dur_imt = imt.from_string('RSD595')
        stddev_types = [const.StdDev.INTRA_EVENT]

    for tr in st:
        if not tr.hasParameter('signal_split'):
            continue
        if method == "velocity":
            if vmin is None:
                raise ValueError('Must specify vmin if method is "velocity".')
            if floor is None:
                raise ValueError('Must specify floor if method is "velocity".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            end_time = event_time + max(floor, epi_dist / vmin)
        elif method == "model":
            if model is None:
                raise ValueError('Must specify model if method is "model".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            dctx = DistancesContext()
            # Repi >= Rrup, so substitution here should be conservative
            # (leading to larger durations).
            dctx.rrup = np.array([epi_dist])
            lnmu, lnstd = dmodel.get_mean_and_stddevs(sctx, rctx, dctx,
                                                      dur_imt, stddev_types)
            duration = np.exp(lnmu + epsilon * lnstd[0])
            # Get split time
            split_time = tr.getParameter('signal_split')['split_time']
            end_time = split_time + float(duration)
        else:
            raise ValueError('method must be either "velocity" or "model".')
        # Update trace params
        end_params = {
            'end_time': end_time,
            'method': method,
            'vsplit': vmin,
            'floor': floor,
            'model': model,
            'epsilon': epsilon
        }
        tr.setParameter('signal_end', end_params)

    return st
コード例 #35
0
 def execute(self):
     """
     Compute the conditional spectrum
     """
     oq = self.oqparam
     self.full_lt = self.datastore['full_lt']
     self.trts = list(self.full_lt.gsim_lt.values)
     self.imts = list(oq.imtls)
     imti = self.imts.index(oq.imt_ref)
     self.M = M = len(self.imts)
     dstore = (self.datastore.parent
               if self.datastore.parent else self.datastore)
     totrups = len(dstore['rup/mag'])
     logging.info('Reading {:_d} ruptures'.format(totrups))
     rdt = [('grp_id', U16), ('nsites', U16), ('idx', U32)]
     rdata = numpy.zeros(totrups, rdt)
     rdata['idx'] = numpy.arange(totrups)
     rdata['grp_id'] = dstore['rup/grp_id'][:]
     rdata['nsites'] = [len(sids) for sids in dstore['rup/sids_']]
     totweight = rdata['nsites'].sum()
     trt_smrs = dstore['trt_smrs'][:]
     rlzs_by_gsim = self.full_lt.get_rlzs_by_gsim_list(trt_smrs)
     _G = sum(len(rbg) for rbg in rlzs_by_gsim)
     self.periods = [from_string(imt).period for imt in self.imts]
     if oq.imls_ref:
         self.imls = oq.imls_ref
     else:  # extract imls from the "mean" hazard map
         curve = self.datastore.sel('hcurves-stats', stat='mean')[0, 0,
                                                                  imti]
         [self.imls] = compute_hazard_maps(curve, oq.imtls[oq.imt_ref],
                                           oq.poes)  # there is 1 site
     self.P = P = len(self.imls)
     self.datastore.create_dset('cs-rlzs', float,
                                (self.R, M, self.N, 2, self.P))
     self.datastore.set_shape_descr('cs-rlzs',
                                    rlz_id=self.R,
                                    period=self.periods,
                                    sid=self.N,
                                    cs=2,
                                    poe_id=P)
     self.datastore.create_dset('cs-stats', float, (1, M, self.N, 2, P))
     self.datastore.set_shape_descr('cs-stats',
                                    stat='mean',
                                    period=self.periods,
                                    sid=self.N,
                                    cs=['spec', 'std'],
                                    poe_id=P)
     self.datastore.create_dset('_c', float, (_G, M, self.N, 2, P))
     self.datastore.create_dset('_s', float, (_G, self.N, P))
     G = max(len(rbg) for rbg in rlzs_by_gsim)
     maxw = 2 * 1024**3 / (16 * G * self.M)  # at max 2 GB
     maxweight = min(numpy.ceil(totweight / (oq.concurrent_tasks or 1)),
                     maxw)
     U = 0
     Ta = 0
     self.cmakers = read_cmakers(self.datastore)
     self.datastore.swmr_on()
     smap = parallel.Starmap(conditional_spectrum, h5=self.datastore.hdf5)
     # IMPORTANT!! we rely on the fact that the classical part
     # of the calculation stores the ruptures in chunks of constant
     # grp_id, therefore it is possible to build (start, stop) slices
     for block in general.block_splitter(rdata, maxweight,
                                         operator.itemgetter('nsites'),
                                         operator.itemgetter('grp_id')):
         Ta += 1
         grp_id = block[0]['grp_id']
         G = len(rlzs_by_gsim[grp_id])
         cmaker = self.cmakers[grp_id]
         U = max(U, block.weight)
         slc = slice(block[0]['idx'], block[-1]['idx'] + 1)
         smap.submit((dstore, slc, cmaker, imti, self.imls))
     return smap.reduce()
コード例 #36
0
def draw_map(adict, override_scenario=False):
    """If adict['imtype'] is MMI, draw a map of intensity draped over
    topography, otherwise Draw IMT contour lines over hill-shaded topography.

    Args:
        adict (dictionary): A dictionary containing the following keys:
            'imtype' (str): The intensity measure type
            'topogrid' (Grid2d): A topography grid
            'allcities' (Cities): A list of global cities,
            'states_provinces' (Cartopy Feature): States/province boundaries.
            'countries' (Cartopy Feature): Country boundaries.
            'oceans' (Cartopy Feature): Oceans.
            'lakes' (Cartopy Feature): Lakes.
            'roads' (Shapely Feature): Roads.
            'faults' (Shapely Feature): Fault traces
            'datadir' (str): The path into which to deposit products
            'operator' (str): The producer of this shakemap
            'filter_size' (int): The size of the filter used before contouring
            'info' (dictionary): The shakemap info structure
            'component' (str): The intensity measure component being plotted
            'imtdict' (dictionary): Dict containing the IMT grids
            'rupdict' (dictionary): Dict containing the rupture data
            'stationdict' (dictionary): Dict of station data
            'config' (dictionary): The configuration data for this shakemap
            'tdict' (dictionary): The text strings to be printed on the map
                in the user's choice of language.
        override_scenario (bool): Turn off scenario watermark.

    Returns:
        Tuple of (Matplotlib figure, Matplotlib figure): Objects containing
        the map generated by this function, and the intensity legend,
        respectively. If the imtype of this map is not 'MMI', the second
        element of the tuple will be None.
    """
    imtype = adict['imtype']
    imtdict = adict['imtdict']      # mmidict
    imtdata = imtdict['mean']       # mmidata
    gd = GeoDict(imtdict['mean_metadata'])
    imtgrid = Grid2D(imtdata, gd)   # mmigrid

    gd = imtgrid.getGeoDict()

    # Retrieve the epicenter - this will get used on the map
    rupture = rupture_from_dict(adict['ruptdict'])
    origin = rupture.getOrigin()
    center_lat = origin.lat
    center_lon = origin.lon

    # load the cities data, limit to cities within shakemap bounds
    cities = adict['allcities'].limitByBounds((gd.xmin, gd.xmax,
                                               gd.ymin, gd.ymax))

    # get the map boundaries and figure size
    bounds, figsize, aspect = _get_map_info(gd)

    # Note: dimensions are: [left, bottom, width, height]
    dim_left = 0.1
    dim_bottom = 0.19
    dim_width = 0.8
    dim_height = dim_width/aspect

    # Create the MercatorMap object, which holds a separate but identical
    # axes object used to determine collisions between city labels.
    mmap = MercatorMap(
        bounds, figsize, cities, padding=0.5,
        dimensions=[dim_left, dim_bottom, dim_width, dim_height])
    fig = mmap.figure
    ax = mmap.axes
    # this needs to be done here so that city label collision
    # detection will work
    fig.canvas.draw()

    # get the geographic projection object
    geoproj = mmap.geoproj
    # get the mercator projection object
    proj = mmap.proj
    # get the proj4 string - used by Grid2D project() method
    projstr = proj.proj4_init

    # get the projected IMT and topo grids
    pimtgrid, ptopogrid = _get_projected_grids(imtgrid, adict['topogrid'],
                                               projstr)

    # get the projected geodict
    proj_gd = pimtgrid.getGeoDict()

    pimtdata = pimtgrid.getData()
    ptopo_data = ptopogrid.getData()

    mmimap = ColorPalette.fromPreset('mmi')

    if imtype == 'MMI':
        draped_hsv = _get_draped(pimtdata, ptopo_data, mmimap)
    else:
        # get the draped topo data
        topo_colormap = ColorPalette.fromPreset('shaketopo')
        draped_hsv = _get_shaded(ptopo_data, topo_colormap)
        # convert units
        if imtype == 'PGV':
            pimtdata = np.exp(pimtdata)
        else:
            pimtdata = np.exp(pimtdata) * 100

    plt.sca(ax)
    ax.set_xlim(proj_gd.xmin, proj_gd.xmax)
    ax.set_ylim(proj_gd.ymin, proj_gd.ymax)
    img_extent = (proj_gd.xmin, proj_gd.xmax, proj_gd.ymin, proj_gd.ymax)

    plt.imshow(draped_hsv, origin='upper', extent=img_extent,
               zorder=IMG_ZORDER, interpolation='none')

    config = adict['config']
    gmice = get_object_from_config('gmice', 'modeling', config)
    gmice_imts = gmice.DEFINED_FOR_INTENSITY_MEASURE_TYPES
    gmice_pers = gmice.DEFINED_FOR_SA_PERIODS

    oqimt = imt.from_string(imtype)

    if imtype != 'MMI' and (not isinstance(oqimt, tuple(gmice_imts)) or
                            (isinstance(oqimt, imt.SA) and
                             oqimt.period not in gmice_pers)):
        my_gmice = None
    else:
        my_gmice = gmice

    if imtype != 'MMI':
        # call the contour module in plotting to get the vertices of the
        # contour lines
        contour_objects = contour(imtdict, imtype, adict['filter_size'],
                                  my_gmice)

        # get a color palette for the levels we have
        # levels = [c['properties']['value'] for c in contour_objects]

        # cartopy shapely feature has some weird behaviors, so I had to go
        # rogue and draw contour lines/labels myself.
        # draw dashed contours first, the ones over land will be overridden by
        # solid contours
        npoints = []
        for contour_object in contour_objects:
            props = contour_object['properties']
            multi_lines = sShape(contour_object['geometry'])
            pmulti_lines = proj.project_geometry(multi_lines, src_crs=geoproj)
            for multi_line in pmulti_lines:
                pmulti_line = mapping(multi_line)['coordinates']
                x, y = zip(*pmulti_line)
                npoints.append(len(x))
                # color = imt_cmap.getDataColor(props['value'])
                ax.plot(x, y, color=props['color'], linestyle='dashed',
                        zorder=DASHED_CONTOUR_ZORDER)

        white_box = dict(
            boxstyle="round",
            ec=(0, 0, 0),
            fc=(1., 1, 1),
            color='k'
        )

        # only label lines with lots of points
        npoints = np.array(npoints)
        # min_npoints = npoints.mean() - (npoints.std()/2)
        min_npoints = npoints.mean()

        # draw solid contours next - the ones over water will be covered by
        # ocean polygon
        for contour_object in contour_objects:
            props = contour_object['properties']
            multi_lines = sShape(contour_object['geometry'])
            pmulti_lines = proj.project_geometry(multi_lines, src_crs=geoproj)
            for multi_line in pmulti_lines:
                pmulti_line = mapping(multi_line)['coordinates']
                x, y = zip(*pmulti_line)
                # color = imt_cmap.getDataColor(props['value'])
                ax.plot(x, y, color=props['color'], linestyle='solid',
                        zorder=CONTOUR_ZORDER)
                if len(x) > min_npoints:
                    # try to label each segment with black text in a white box
                    xc = x[int(len(x)/3)]
                    yc = y[int(len(y)/3)]
                    if _label_close_to_edge(
                            xc, yc, proj_gd.xmin, proj_gd.xmax,
                            proj_gd.ymin, proj_gd.ymax):
                        continue
                    # TODO: figure out if box is going to go outside the map,
                    # if so choose a different point on the line.
                    ax.text(xc, yc, '%.1f' % props['value'], size=8,
                            ha="center", va="center",
                            bbox=white_box, zorder=AXES_ZORDER-1)

    # make the border thicker
    lw = 2.0
    ax.outline_patch.set_zorder(BORDER_ZORDER)
    ax.outline_patch.set_linewidth(lw)
    ax.outline_patch.set_joinstyle('round')
    ax.outline_patch.set_capstyle('round')

    # Coastlines will get drawn when we draw the ocean edges
    # ax.coastlines(resolution="10m", zorder=COAST_ZORDER, linewidth=3)

    if adict['states_provinces']:
        ax.add_feature(adict['states_provinces'], edgecolor='0.5',
                       zorder=COAST_ZORDER)

    if adict['countries']:
        ax.add_feature(adict['countries'], edgecolor='black',
                       zorder=BORDER_ZORDER)

    if adict['oceans']:
        ax.add_feature(adict['oceans'], edgecolor='black',
                       zorder=OCEAN_ZORDER)

    if adict['lakes']:
        ax.add_feature(adict['lakes'], edgecolor='black',
                       zorder=OCEAN_ZORDER)

    if adict['faults'] is not None:
        ax.add_feature(adict['faults'], edgecolor='firebrick',
                       zorder=ROAD_ZORDER)

    if adict['roads'] is not None:
        ax.add_feature(adict['roads'], edgecolor='dimgray',
                       zorder=ROAD_ZORDER)

    # draw graticules, ticks, tick labels
    _draw_graticules(ax, *bounds)

    # is this event a scenario?
    info = adict['info']
    etype = info['input']['event_information']['event_type']
    is_scenario = etype == 'SCENARIO'

    if is_scenario and not override_scenario:
        plt.text(
            center_lon, center_lat,
            adict['tdict']['title_parts']['scenario'],
            fontsize=72,
            zorder=SCENARIO_ZORDER, transform=geoproj,
            alpha=WATERMARK_ALPHA, color=WATERMARK_COLOR,
            horizontalalignment='center',
            verticalalignment='center',
            rotation=45,
            path_effects=[
                path_effects.Stroke(linewidth=1, foreground='black')]
        )

    # Draw the map scale in the unoccupied lower corner.
    corner = 'll'
    draw_scale(ax, corner, pady=0.05, padx=0.05, zorder=SCALE_ZORDER)

    # draw cities
    mmap.drawCities(shadow=True, zorder=CITIES_ZORDER, draw_dots=True)

    # Draw the epicenter as a black star
    plt.sca(ax)
    plt.plot(center_lon, center_lat, 'k*', markersize=16,
             zorder=EPICENTER_ZORDER, transform=geoproj)

    # draw the rupture polygon(s) in black, if not point rupture
    point_source = True
    if not isinstance(rupture, PointRupture):
        point_source = False
        json_dict = rupture._geojson
        for feature in json_dict['features']:
            multi_poly = sShape(feature['geometry'])
            pmulti_poly = proj.project_geometry(multi_poly)
            mpoly = mapping(pmulti_poly)['coordinates']
            for poly in mpoly:
                for spoly in poly:
                    x, y = zip(*spoly)
                    ax.plot(x, y, 'k', lw=1, zorder=FAULT_ZORDER)

    # draw the station data on the map
    stations = adict['stationdict']
    _draw_stations(ax, stations, imtype, mmimap, geoproj)

    _draw_title(imtype, adict)

    process_time = info['processing']['shakemap_versions']['process_time']
    map_version = int(info['processing']['shakemap_versions']['map_version'])
    if imtype == 'MMI':
        _draw_mmi_legend(fig, mmimap, gmice, process_time,
                         map_version, point_source, adict['tdict'])
        # make a separate MMI legend
        fig2 = plt.figure(figsize=figsize)
        _draw_mmi_legend(fig2, mmimap, gmice, process_time,
                         map_version, point_source, adict['tdict'])

    else:
        _draw_imt_legend(fig, mmimap, imtype, gmice, process_time, map_version,
                         point_source, adict['tdict'])
        plt.draw()
        fig2 = None

    return (fig, fig2)
コード例 #37
0
def _draw_mmi_legend(fig, palette, gmice, process_time, map_version,
                     point_source, tdict):
    """Create a legend axis for MMI plots.

    Args:
        fig (Figure): Matplotlib Figure object.
        palette (ColorPalette): ColorPalette using range of input data and
            IMT_CMAP.
        gmice: A gmice object.
        process_time (str): Process time.
        map_version (int): ShakeMap version.
        point_source (bool): Is the rupture a PointRupture?
        tdict (dict): Dictionary containing the text strings for printing
            on the maps (in the language of the user's choice).

    """
    cax = fig.add_axes([0.1, 0.00, 0.8, 0.15])
    plt.axis('off')
    cax_xmin, cax_xmax = cax.get_xlim()
    bottom, top = cax.get_ylim()
    plt.xlim(cax_xmin, cax_xmax)
    plt.ylim(bottom, top)

    acceleration = [tdict['mmi_scale']['acc_label']]
    velocity = [tdict['mmi_scale']['vel_label']]

    imt_edges = np.array([0.5, 1.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5])
    mmi_centers = np.array([1.0, 2.5, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
    pga_values, _ = gmice.getGMfromMI(mmi_centers, imt.from_string('PGA'))
    pgv_values, _ = gmice.getGMfromMI(mmi_centers, imt.from_string('PGV'))
    pga_values = np.exp(pga_values)*100
    pgv_values = np.exp(pgv_values)
    pga_labels = ["{0:.3g}".format(set_num_precision(
        pga, 3, mode='float')) for pga in pga_values]
    pgv_labels = ["{0:.3g}".format(set_num_precision(
        pgv, 3, mode='float')) for pgv in pgv_values]
    pga_labels[0] = '<'+pga_labels[0]
    pga_labels[-1] = '>'+pga_labels[-1]
    pgv_labels[0] = '<'+pgv_labels[0]
    pgv_labels[-1] = '>'+pgv_labels[-1]
    acceleration += pga_labels
    velocity += pgv_labels

    yloc_first_row = 13/14
    yloc_second_row = 11/14
    yloc_third_row = 9/14
    yloc_fourth_row = 7/14
    yloc_fifth_row = 5/14
    yloc_sixth_row = 3/14
    yloc_seventh_row = 1.5/14

    yloc_first_line = 12/14
    yloc_second_line = 10/14
    yloc_third_line = 8/14
    yloc_fourth_line = 6/14
    # yloc_fifth_line = 4/14

    bottom = 4/14

    font0 = FontProperties()
    alignment = {
        'horizontalalignment': 'center',
        'verticalalignment': 'center'
    }
    font0.set_weight('bold')

    font1 = FontProperties()
    font1.set_weight('normal')

    # draw vertical cell separators
    sumwidth = 0.0
    gridleft = 0.0
    plt.plot([gridleft, gridleft], [bottom, top],
             'k', clip_on=False)  # left edge
    plt.plot([0, 1], [top, top], 'k', clip_on=False)
    plt.plot([0, 1], [bottom, bottom], 'k', clip_on=False)

    plt.plot([0, 1], [yloc_first_line, yloc_first_line],
             'k', clip_on=False)
    plt.plot([0, 1], [yloc_second_line, yloc_second_line],
             'k', clip_on=False)
    plt.plot([0, 1], [yloc_third_line, yloc_third_line],
             'k', clip_on=False)
    plt.plot([0, 1], [yloc_fourth_line, yloc_fourth_line],
             'k', clip_on=False)

    # Explanation of symbols: triangle is instrument, circle is mmi,
    # epicenter is black star
    # thick black line is rupture (if available)
    item_sep = [0.2, 0.28, 0.15]
    left_offset = 0.005
    label_pad = 0.02

    # Instrument
    triangle_marker_x = left_offset
    triangle_text_x = triangle_marker_x + label_pad
    plt.plot(triangle_marker_x, yloc_seventh_row, '^', markerfacecolor='w',
             markeredgecolor='k', markersize=6, mew=0.5, clip_on=False)
    plt.text(triangle_text_x,
             yloc_seventh_row,
             tdict['legend']['instrument'],
             va='center',
             ha='left')

    # Macroseismic
    circle_marker_x = triangle_text_x + item_sep[0]
    circle_text_x = circle_marker_x + label_pad
    plt.plot(circle_marker_x,
             yloc_seventh_row, 'o',
             markerfacecolor='w',
             markeredgecolor='k',
             markersize=4,
             mew=0.5)
    plt.text(circle_text_x,
             yloc_seventh_row,
             tdict['legend']['intensity'],
             va='center',
             ha='left')

    # Epicenter
    star_marker_x = circle_marker_x + item_sep[1]
    star_text_x = star_marker_x + label_pad
    plt.plot(star_marker_x,
             yloc_seventh_row, 'k*',
             markersize=12,
             mew=0.5)
    plt.text(star_text_x,
             yloc_seventh_row,
             tdict['legend']['epicenter'],
             va='center',
             ha='left')

    if not point_source:
        rup_marker_x = star_marker_x + item_sep[2]
        rup_text_x = rup_marker_x + label_pad
        rwidth = 0.02
        rheight = 0.05
        rup = patches.Rectangle(
            xy=(rup_marker_x - rwidth,
                yloc_seventh_row-0.5*rheight),
            width=rwidth,
            height=rheight,
            linewidth=2,
            edgecolor='k',
            facecolor='w'
        )
        cax.add_patch(rup)
        plt.text(rup_text_x,
                 yloc_seventh_row,
                 tdict['legend']['rupture'],
                 va='center',
                 ha='left')

    # Add conversion reference and shakemap version/process time
    version_x = 1.0
    tpl = (tdict['legend']['version'], map_version,
           tdict['legend']['processed'], process_time)
    plt.text(version_x, yloc_sixth_row,
             '%s %i: %s %s' % tpl,
             ha='right', va='center')

    ref = gmice.name
    refx = 0
    plt.text(refx, yloc_sixth_row,
             '%s %s' % (tdict['legend']['scale'], ref),
             va='center')

    nsteps = 10
    for i, width in enumerate(tdict['mmi_scale']['box_widths']):
        width /= 100
        textleft = sumwidth + width/2
        sumwidth += width
        plt.text(textleft, yloc_first_row,
                 tdict['mmi_scale']['shaking_labels'][i],
                 fontproperties=font1, **alignment)
        plt.text(textleft, yloc_second_row,
                 tdict['mmi_scale']['damage_labels'][i],
                 fontproperties=font1, **alignment)
        plt.text(textleft, yloc_third_row, acceleration[i],
                 fontproperties=font1, **alignment)
        plt.text(textleft, yloc_fourth_row, velocity[i],
                 fontproperties=font1, **alignment)

        if i == 0:
            font = font1
        else:
            font = font0
        th = plt.text(textleft, yloc_fifth_row,
                      tdict['mmi_scale']['intensity_labels'][i],
                      fontproperties=font, **alignment)
        th.set_path_effects([path_effects.Stroke(linewidth=2.0,
                                                 foreground='white'),
                             path_effects.Normal()])

        # draw right edge of cell
        plt.plot([gridleft+width, gridleft+width],
                 [bottom, top], 'k', clip_on=False)  # right

        # draw little colored rectangles inside the MMI cells
        if i > 0:
            left = gridleft
            ptop = yloc_fourth_line
            imt_min = imt_edges[i-1]
            imt_max = imt_edges[i]
            imts = np.linspace(imt_min, imt_max, nsteps)
            rights = np.linspace(gridleft, gridleft+width, nsteps)
            for mmi, right in zip(imts, rights):
                px = [left, right, right, left, left]
                py = [ptop, ptop, bottom, bottom, ptop]
                mmicolor = palette.getDataColor(mmi, color_format='hex')
                left = right
                plt.fill(px, py, mmicolor, ec=mmicolor)

        gridleft += width
コード例 #38
0
ファイル: check_gsim.py プロジェクト: maswiet/oq-engine
def _parse_csv_line(headers, values, req_site_params):
    """
    Parse a single line from data file.

    :param headers:
        A list of header names, the strings from the first line of csv file.
    :param values:
        A list of values of a single row to parse.
    :returns:
        A tuple of the following values (in specified order):

        sctx
            An instance of :class:`openquake.hazardlib.gsim.base.SitesContext`
            with attributes populated by the information from in row in a form
            of single-element numpy arrays.
        rctx
            An instance of
            :class:`openquake.hazardlib.gsim.base.RuptureContext`.
        dctx
            An instance of
            :class:`openquake.hazardlib.gsim.base.DistancesContext`.
        stddev_types
            An empty list, if the ``result_type`` column says "MEAN"
            for that row, otherwise it is a list with one item --
            a requested standard deviation type.
        expected_results
            A dictionary mapping IMT-objects to one-element arrays of expected
            result values. Those results represent either standard deviation
            or mean value of corresponding IMT depending on ``result_type``.
        result_type
            A string literal, one of ``'STDDEV'`` or ``'MEAN'``. Value
            is taken from column ``result_type``.
    """
    rctx = RuptureContext()
    sctx = SitesContext(slots=req_site_params)
    dctx = DistancesContext()
    expected_results = {}
    stddev_types = result_type = damping = None

    for param, value in zip(headers, values):
        if param == 'result_type':
            value = value.upper()
            if value.endswith('_STDDEV'):
                # the row defines expected stddev results
                result_type = 'STDDEV'
                stddev_types = [getattr(const.StdDev, value[:-len('_STDDEV')])]
            else:
                # the row defines expected exponents of mean values
                assert value == 'MEAN'
                stddev_types = []
                result_type = 'MEAN'
        elif param == 'damping':
            damping = float(value)
        elif param.startswith('site_'):
            # value is sites context object attribute
            if param == 'site_vs30measured' or param == 'site_backarc':
                value = float(value) != 0
            elif param == 'site_siteclass':
                value = numpy.string_(value)
            else:
                value = float(value)
            # site_lons, site_lats, site_depths -> lon, lat, depth
            if param.endswith(('lons', 'lats', 'depths')):
                attr = param[len('site_'):-1]
            else:  # vs30s etc
                attr = param[len('site_'):]
            setattr(sctx, attr, numpy.array([value]))
        elif param.startswith('dist_'):
            # value is a distance measure
            value = float(value)
            setattr(dctx, param[len('dist_'):], numpy.array([value]))
        elif param.startswith('rup_'):
            # value is a rupture context attribute
            try:
                value = float(value)
            except ValueError:
                if value != 'undefined':
                    raise

            setattr(rctx, param[len('rup_'):], value)
        elif param == 'component_type':
            pass
        else:
            # value is the expected result (of result_type type)
            value = float(value)
            if param == 'arias':  # ugly legacy corner case
                param = 'ia'
            try:  # The title of the column should be IMT(args)
                imt = from_string(param.upper())
            except KeyError:  # Then it is just a period for SA
                imt = registry['SA'](float(param), damping)

            expected_results[imt] = numpy.array([value])

    assert result_type is not None
    return sctx, rctx, dctx, stddev_types, expected_results, result_type
コード例 #39
0
def create_polygons(container,
                    datadir,
                    logger,
                    max_workers,
                    method='pcontour'):
    """ Generates a set of closed polygons (with or without holes) using the
    specified method (either pcontour or skimage), and uses fiona to convert
    the resulting GeoJSON objects into ESRI-style shape files which are then
    zipped into an archive along with .prj, .lyr, and metadata .xml files. A
    warning will be emitted if .lyr, or .xml files cannot be found for the
    ground motion parameter in question.

    Args:
        container (ShakeMapOutputContainer): An open ShakeMap output
            container object.
        datadir (str): The products directory for the event in question.
        logger (logger): This module's logger object.
        method (str): Contouring implementation to use (either 'pcontour' or
            'skimage')

    Returns:
        (nothing): Nothing.
    """

    # gmice info for shakelib.plotting.contour
    config = container.getConfig()
    gmice = get_object_from_config('gmice', 'modeling', config)
    gmice_imts = gmice.DEFINED_FOR_INTENSITY_MEASURE_TYPES
    gmice_pers = gmice.DEFINED_FOR_SA_PERIODS

    component = list(container.getComponents())[0]
    imts = container.getIMTs(component)

    if method == 'pcontour':
        schema = {
            'properties':
            OrderedDict([('AREA', 'float:13.3'), ('PERIMETER', 'float:14.3'),
                         ('PGAPOL_', 'int:12'), ('PGAPOL_ID', 'int:12'),
                         ('GRID_CODE', 'int:12'),
                         ('PARAMVALUE', 'float:14.4')]),
            'geometry':
            'Polygon'
        }
    elif method == 'skimage':
        schema = {
            'properties':
            OrderedDict([('value', 'float:2.1'), ('units', 'str'),
                         ('color', 'str'), ('weight', 'float:13.3')]),
            'geometry':
            'MultiLineString'
        }
    else:
        raise ValueError('Unknown contouring method {}'.format(method))

    smdata = os.path.join(get_data_path(), 'gis')
    # Make a directory for the files to live in prior to being zipped
    alist = []
    with tempfile.TemporaryDirectory(dir=datadir) as tdir:
        for imt in imts:
            gdict = container.getIMTGrids(imt, component)
            fgrid = gdict['mean']
            if imt == 'MMI':
                fname = 'mi'
            elif imt == 'PGV':
                fname = 'pgv'
            else:
                fname = oq_to_file(imt)

            if method == 'pcontour':
                my_gmice = None
                if imt == 'MMI':
                    contour_levels = np.arange(0.1, 10.2, 0.2)
                elif imt == 'PGV':
                    fgrid = np.exp(fgrid)
                    cont_max = np.ceil(np.max(fgrid)) + 2.0
                    contour_levels = np.arange(1.0, cont_max, 2.0)
                    if contour_levels.size == 0:
                        contour_levels = np.array([1.0])
                else:
                    fgrid = np.exp(fgrid)
                    cont_max = (np.ceil(100 * np.max(fgrid)) + 2.0) / 100.0
                    contour_levels = np.arange(0.01, cont_max, 0.02)
                    if contour_levels.size == 0:
                        contour_levels = np.array([0.01])
            else:
                # skimage method chooses its own levels
                contour_levels = None
                # but wants gmice info
                oqimt = OQIMT.from_string(imt)
                if imt == 'MMI' or not isinstance(oqimt, tuple(gmice_imts)) or \
                   (isinstance(oqimt, OQIMT.SA) and oqimt.period not in gmice_pers):
                    my_gmice = None
                else:
                    my_gmice = gmice
            a = {
                'fgrid': fgrid,
                'dx': gdict['mean_metadata']['dx'],
                'dy': gdict['mean_metadata']['dy'],
                'xmin': gdict['mean_metadata']['xmin'],
                'ymax': gdict['mean_metadata']['ymax'],
                'contour_levels': contour_levels,
                'tdir': tdir,
                'fname': fname,
                'schema': schema,
                'imt': imt,
                'gmice': my_gmice,
                'gdict': gdict
            }
            alist.append(a)
            copyfile(os.path.join(smdata, 'WGS1984.prj'),
                     os.path.join(tdir, fname + '.prj'))
            lyrfile = os.path.join(smdata, fname + '.lyr')
            if not os.path.isfile(lyrfile):
                logger.warning("No " + fname + ".lyr file in " + smdata)
            else:
                copyfile(lyrfile, os.path.join(tdir, fname + '.lyr'))
            xmlfile = os.path.join(smdata, fname + '.shp.xml')
            if not os.path.isfile(xmlfile):
                logger.warning("No " + fname + ".shp.xml file in " + smdata)
            else:
                copyfile(xmlfile, os.path.join(tdir, fname + '.shp.xml'))

        worker = partial(make_shape_files, method=method)

        if max_workers > 0:
            with cf.ProcessPoolExecutor(max_workers=max_workers) as ex:
                results = ex.map(worker, alist)
                list(results)
        else:
            for adict in alist:
                worker(adict)

        zfilename = os.path.join(datadir, 'shape.zip')
        zfile = zipfile.ZipFile(zfilename,
                                mode='w',
                                compression=zipfile.ZIP_DEFLATED)
        filelist = []
        for (dirpath, dirnames, filenames) in os.walk(tdir):
            filelist.extend(filenames)
            break
        for sfile in filelist:
            zfile.write(os.path.join(tdir, sfile), sfile)
        zfile.close()
コード例 #40
0
 def __init__(self, imtls, ampl_df, amplevels=None):
     # Check input
     if not imtls:
         raise ValueError('There are no intensity_measure_types!')
     # If available, get the filename containing the amplification function
     fname = getattr(ampl_df, 'fname', None)
     # Set the intensity measure types and levels on rock
     self.imtls = imtls
     # Set the intensity levels for which we compute poes on soil. Note
     # that we assume they are the same for all the intensity measure types
     # considered
     self.amplevels = amplevels
     # This is the reference Vs30 for the amplification function
     self.vs30_ref = ampl_df.vs30_ref
     has_levels = 'level' in ampl_df.columns
     has_mags = 'from_mag' in ampl_df.columns
     # Checking the input dataframe. The first case is for amplification
     # functions that depend on magnitude, distance and iml (the latter
     # in this case can be probably removed since is closely correlated
     # to the other two variables
     if has_levels and 'from_mag' in ampl_df.keys():
         keys = ['ampcode', 'level', 'from_mag', 'from_rrup']
         check_unique(ampl_df, keys, fname)
     elif has_levels and 'level' in ampl_df.keys():
         check_unique(ampl_df, ['ampcode', 'level'], fname)
     else:
         check_unique(ampl_df, ['ampcode'], fname)
     missing = set(imtls) - set(ampl_df.columns[has_levels:])
     # Raise an error in case the hazard on rock does not contain
     # all the IMTs included in the amplification function
     if missing:
         raise ValueError('The amplification table does not contain %s' %
                          missing)
     if amplevels is None:  # for event based
         self.periods = [from_string(imt).period for imt in imtls]
     else:
         self.periods, levels = check_same_levels(imtls)
     # Create a dictionary containing for each site-category [key] a
     # dataframe [value] with the corresponding amplification function
     self.coeff = {}  # code -> dataframe
     self.ampcodes = []
     # This is a list with the names of the columns we will use to filter
     # the dataframe with the amplification function
     cols = list(imtls)
     if has_mags:
         cols.extend(['from_mag', 'from_rrup'])
     if has_levels:
         cols.append('level')
     # Appending to the list, the column names for sigma
     for col in ampl_df.columns:
         if col.startswith('sigma_'):
             cols.append(col)
     # Now we populate the dictionary containing for each site class the
     # corresponding dataframe with the amplification
     for code, df in ampl_df.groupby('ampcode'):
         self.ampcodes.append(code)
         if has_levels:
             self.coeff[code] = df[cols].set_index('level')
         else:
             self.coeff[code] = df[cols]
     # This is used in the case of the convolution method. We compute the
     # probability of occurrence for discrete intervals of ground motion
     # and we prepare values of median amplification and std for the
     # midlevels (i.e. ground motion on rock) for each IMT
     if amplevels is not None:
         self.imtls = imtls
         self.levels = levels
         self._set_alpha_sigma(mag=None, dst=None)
コード例 #41
0
def contour(imtdict, imtype, filter_size, gmice):
    """
    Generate contours of a specific IMT and return as a Shapely
    MultiLineString object.

    Args:
        container (ShakeMapOutputContainer): ShakeMapOutputContainer
            with ShakeMap output data.
        imtype (str): String containing the name of an Intensity
            Measure Type found in container.
        filter_size (int): Integer filter (see
            https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.median_filter.html)
    Returns:
        list: List of dictionaries containing two fields

                - geometry: GeoJSON-like representation of one of the objects
                  in https://toblerity.org/fiona/manual.html#geometry-types
                - properties: Dictionary of properties describing that
                  feature.

    Raises:
        NotImplementedError -- if the user attempts to contour a data file
            with sets of points rather than grids.
    """  # noqa
    oqimt = imt.from_string(imtype)

    intensity_colormap = ColorPalette.fromPreset('mmi')
    grid = imtdict['mean']
    metadata = imtdict['mean_metadata']
    if imtype == 'MMI':
        sgrid = grid
        units = 'mmi'
    elif imtype in ['PGV', 'IA']:
        sgrid = np.exp(grid)
        units = 'cms'
    elif imtype in ['PGD', 'IH']:
        sgrid = np.exp(grid)
        units = 'cm'
    else:
        sgrid = np.exp(grid) * 100.0
        units = 'pctg'
    if filter_size > 0:
        fgrid = median_filter(sgrid, size=int(filter_size))
    else:
        fgrid = sgrid

    interval_type = 'log'
    if imtype == 'MMI':
        interval_type = 'linear'

    grid_min = np.nanmin(fgrid)
    grid_max = np.nanmax(fgrid)
    if grid_max - grid_min:
        intervals = getContourLevels(grid_min, grid_max, itype=interval_type)
    else:
        intervals = np.array([])

    lonstart = metadata['xmin']
    latstart = metadata['ymin']

    lonend = metadata['xmax']
    if lonend < lonstart:
        lonstart -= 360

    lonspan = np.abs(lonend - lonstart)
    latspan = np.abs(metadata['ymax'] - latstart)
    nlon = metadata['nx']
    nlat = metadata['ny']

    line_strings = []  # dictionary of MultiLineStrings and props

    for cval in intervals:
        contours = measure.find_contours(fgrid, cval)
        #
        # Convert coords to geographic coordinates; the coordinates
        # are returned in row, column order (i.e., (y, x))
        #
        new_contours = []
        plot_contours = []
        for ic, coords in enumerate(contours):  # coords is a line segment
            #
            # This greatly reduces the number of points in the contours
            # without changing their shape too much
            #
            coords = measure.approximate_polygon(coords, filter_size / 20)

            mylons = np.around(coords[:, 1] * lonspan / nlon + lonstart,
                               decimals=6)
            mylats = np.around((nlat - coords[:, 0]) * latspan / nlat +
                               latstart, decimals=6)

            contours[ic] = np.hstack((mylons[:].reshape((-1, 1)),
                                      mylats[:].reshape((-1, 1))))
            plot_contours.append(contours[ic])
            new_contours.append(contours[ic].tolist())

        if len(new_contours):
            mls = MultiLineString(new_contours)
            props = {
                'value': cval,
                'units': units
            }
            if imtype == 'MMI':
                pass
            elif imtype == 'PGV':
                lcval = np.log(cval)
            else:
                lcval = np.log(cval / 100)
            if gmice:
                mmival = gmice.getMIfromGM(np.array([lcval]), oqimt)[0][0]
            elif imtype == 'MMI':
                mmival = cval
            else:
                mmival = 1
            color_array = np.array(intensity_colormap.getDataColor(mmival))
            color_rgb = np.array(color_array[0:3] * 255, dtype=int).tolist()
            props['color'] = '#%02x%02x%02x' % tuple(color_rgb)
            if imtype == 'MMI':
                if (cval * 2) % 2 == 1:
                    props['weight'] = 4
                else:
                    props['weight'] = 2
            else:
                props['weight'] = 4
            line_strings.append(
                {
                    'geometry': mapping(mls),
                    'properties': props
                }
            )
    return line_strings
コード例 #42
0
import os.path
import unittest
import numpy
from openquake.hazardlib import geo, imt
from openquake.hazardlib.shakemap.maps import \
    get_sitecol_shakemap
from openquake.hazardlib.shakemap.gmfs import (to_gmfs, amplify_ground_shaking,
                                               spatial_correlation_array,
                                               spatial_covariance_array,
                                               cross_correlation_matrix,
                                               cholesky)

aae = numpy.testing.assert_almost_equal
F64 = numpy.float64
imts = [imt.from_string(x) for x in ['PGA', 'SA(0.3)', 'SA(1.0)', 'SA(3.0)']]
imt_dt = numpy.dtype([(str(imt), float) for imt in imts])
shakemap_dt = numpy.dtype([('lon', float), ('lat', float), ('val', imt_dt),
                           ('std', imt_dt), ('vs30', float)])
CDIR = os.path.dirname(__file__)

gmf_dict = {
    'kind': 'Silva&Horspool',
    'spatialcorr': 'yes',
    'crosscorr': 'yes',
    'cholesky_limit': 10000
}


def mean_std(shakemap, site_effects):
    gmf_dict.update({
        'kind': 'Silva&Horspool',
コード例 #43
0
def contour_to_files(container,
                     output_dir,
                     logger,
                     contents,
                     filter_size=DEFAULT_FILTER_SIZE):
    """
    Generate contours of all IMT values.

    Args:
      container (ShakeMapOutputContainer): ShakeMapOutputContainer with
          ShakeMap output data.
      output_dir (str): Path to directory where output files will be written.
      logger (logging.Logger): Python logging Logger instance.

    Raises:
        LookupError: When configured file format is not supported
    """

    # Right now geojson is all we support; if that changes, we'll have
    # to add a configuration or command-line option
    file_format = 'geojson'
    # open a file for writing
    driver, extension = FORMATS[file_format]
    sa_schema = {
        'geometry': 'MultiLineString',
        'properties': {
            'value': 'float',
            'units': 'str',
            'color': 'str',
            'weight': 'int'
        }
    }
    mmi_schema = {
        'geometry': 'MultiLineString',
        'properties': {
            'value': 'float',
            'units': 'str',
            'color': 'str',
            'weight': 'int'
        }
    }
    crs = {
        'no_defs': True,
        'ellps': 'WGS84',
        'datum': 'WGS84',
        'proj': 'longlat'
    }

    config = container.getConfig()
    gmice = get_object_from_config('gmice', 'modeling', config)
    gmice_imts = gmice.DEFINED_FOR_INTENSITY_MEASURE_TYPES
    gmice_pers = gmice.DEFINED_FOR_SA_PERIODS

    imtlist = container.getIMTs()
    for imtype in imtlist:
        component, imtype = imtype.split('/')
        fileimt = oq_to_file(imtype)
        oqimt = imt.from_string(imtype)
        if component == 'GREATER_OF_TWO_HORIZONTAL':
            fname = 'cont_%s.%s' % (fileimt, extension)
        else:
            fname = 'cont_%s_%s.%s' % (fileimt, component, extension)
        if imtype == 'MMI':
            contents.addFile('mmiContour', 'Intensity Contours',
                             'Contours of macroseismic intensity.', fname,
                             'application/json')
            contents.addFile('miContour', 'Intensity Contours (Legacy Naming)',
                             'Contours of macroseismic intensity.',
                             'cont_mi.json', 'application/json')
        elif imtype == 'PGA':
            contents.addFile(
                'pgaContour', 'PGA Contours',
                'Contours of ' + component + ' peak '
                'ground acceleration (%g).', fname, 'application/json')
        elif imtype == 'PGV':
            contents.addFile(
                'pgvContour', 'PGV Contours',
                'Contours of ' + component + ' peak '
                'ground velocity (cm/s).', fname, 'application/json')
        elif imtype == 'PGD':
            contents.addFile(
                'pgdContour', 'PGD Contours',
                'Contours of ' + component + ' peak '
                'ground displacement (cm).', fname, 'application/json')
        elif imtype == 'IA':
            contents.addFile(
                'iaContour', 'IA Contours',
                'Contours of ' + component + ' peak '
                'arias (cm/s).', fname, 'application/json')
        elif imtype == 'IH':
            contents.addFile(
                'ihContour', 'IH Contours',
                'Contours of ' + component + ' peak '
                'Housner (cm).', fname, 'application/json')
        else:
            contents.addFile(
                imtype + 'Contour',
                imtype.upper() + ' Contours',
                'Contours of ' + component + ' 5% damped ' +
                str(oqimt.period) + ' sec spectral acceleration (%g).', fname,
                'application/json')

        filename = os.path.join(output_dir, fname)
        if os.path.isfile(filename):
            fpath, fext = os.path.splitext(filename)
            flist = glob.glob(fpath + '.*')
            for fname in flist:
                os.remove(fname)

        if imtype == 'MMI' or not isinstance(oqimt, tuple(gmice_imts)) or \
           (isinstance(oqimt, imt.SA) and oqimt.period not in gmice_pers):
            my_gmice = None
        else:
            my_gmice = gmice

        # fiona spews a warning here when driver is geojson
        # this warning appears to be un-catchable using
        # with warnings.catch_warnings()
        # or
        # logging.captureWarning()
        # or
        # even redirecting stderr/stdout to IO streams
        # not sure where the warning is coming from,
        # but there appears to be no way to stop it...
        with fiona.Env():
            if imtype == 'MMI':
                selected_schema = mmi_schema
            else:
                selected_schema = sa_schema
            vector_file = fiona.open(filename,
                                     'w',
                                     driver=driver,
                                     schema=selected_schema,
                                     crs=crs)

            line_strings = contour(container.getIMTGrids(imtype, component),
                                   imtype, filter_size, my_gmice)

            for feature in line_strings:
                vector_file.write(feature)

                # Grab some metadata
            meta = container.getMetadata()
            event_info = meta['input']['event_information']
            mdict = {
                'eventid': event_info['event_id'],
                'longitude': float(event_info['longitude']),
                'latitude': float(event_info['latitude'])
            }

            logger.debug('Writing contour file %s' % filename)
            vector_file.close()

            # Get bounds
            tmp = fiona.open(filename)
            bounds = tmp.bounds
            tmp.close()

            # Read back in to add metadata/bounds
            data = json.load(open(filename))
            data['metadata'] = mdict
            data['bbox'] = bounds
            with open(filename, 'w') as outfile:
                json.dump(data, outfile)

            #####################################
            # Make an extra version of the MMI contour file
            # so that the current web rendering code can find it.
            # Delete this file once everyone has moved to new version
            # of ComCat code.

            if imtype == 'MMI':
                old_file = os.path.join(output_dir, 'cont_mi.json')
                shutil.copy(filename, old_file)
コード例 #44
0
ファイル: disaggregation.py プロジェクト: griffij/oq-risklib
def _collect_bins_data(trt_num, source_ruptures, site, curves, trt_model_id,
                       rlzs_assoc, gsims, imtls, poes, truncation_level,
                       n_epsilons, mon):
    # returns a BinData instance
    sitecol = SiteCollection([site])
    mags = []
    dists = []
    lons = []
    lats = []
    trts = []
    pnes = []
    sitemesh = sitecol.mesh
    make_ctxt = mon('making contexts', measuremem=False)
    disagg_poe = mon('disaggregate_poe', measuremem=False)
    cmaker = ContextMaker(gsims)
    for source, ruptures in source_ruptures:
        try:
            tect_reg = trt_num[source.tectonic_region_type]
            for rupture in ruptures:
                with make_ctxt:
                    sctx, rctx, dctx = cmaker.make_contexts(sitecol, rupture)
                # extract rupture parameters of interest
                mags.append(rupture.mag)
                dists.append(dctx.rjb[0])  # single site => single distance
                [closest_point] = rupture.surface.get_closest_points(sitemesh)
                lons.append(closest_point.longitude)
                lats.append(closest_point.latitude)
                trts.append(tect_reg)

                pne_dict = {}
                # a dictionary rlz.id, poe, imt_str -> prob_no_exceed
                for gsim in gsims:
                    gs = str(gsim)
                    for imt_str, imls in imtls.iteritems():
                        imt = from_string(imt_str)
                        imls = numpy.array(imls[::-1])
                        for rlz in rlzs_assoc[trt_model_id, gs]:
                            rlzi = rlz.ordinal
                            curve_poes = curves[rlzi, imt_str][::-1]
                            for poe in poes:
                                iml = numpy.interp(poe, curve_poes, imls)
                                # compute probability of exceeding iml given
                                # the current rupture and epsilon_bin, that is
                                # ``P(IMT >= iml | rup, epsilon_bin)``
                                # for each of the epsilon bins
                                with disagg_poe:
                                    [poes_given_rup_eps] = \
                                        gsim.disaggregate_poe(
                                            sctx, rctx, dctx, imt, iml,
                                            truncation_level, n_epsilons)
                                pne = rupture.get_probability_no_exceedance(
                                    poes_given_rup_eps)
                                pne_dict[rlzi, poe, imt_str] = (iml, pne)

                pnes.append(pne_dict)
        except Exception as err:
            etype, err, tb = sys.exc_info()
            msg = 'An error occurred with source id=%s. Error: %s'
            msg %= (source.source_id, err)
            raise etype, msg, tb

    return BinData(numpy.array(mags, float), numpy.array(dists, float),
                   numpy.array(lons, float), numpy.array(lats, float),
                   numpy.array(trts, int), pnes)
コード例 #45
0
def get_conditional_gmfs(database,
                         rupture,
                         sites,
                         gsims,
                         imts,
                         number_simulations,
                         truncation_level,
                         correlation_model=DEFAULT_CORRELATION):
    """
    Get a set of random fields conditioned on a set of observations
    :param database:
        Ground motion records for the event as instance of :class:
        smtk.sm_database.GroundMotionDatabase
    :param rupture:
        Event rupture as instance of :class:
        openquake.hazardlib.source.rupture.Rupture
    :param sites:
        Target sites as instance of :class:
        openquake.hazardlib.site.SiteCollection
    :param list gsims:
        List of GMPEs required
    :param list imts:
        List of intensity measures required
    :param int number_simulations:
        Number of simulated fields required
    :param float truncation_level:
        Ground motion truncation level
    """

    # Get known sites mesh
    known_sites = database.get_site_collection()

    # Get Observed Residuals
    residuals = Residuals(gsims, imts)
    residuals.get_residuals(database)
    imt_dict = OrderedDict([(imtx,
                             np.zeros([len(sites.lons), number_simulations]))
                            for imtx in imts])
    gmfs = OrderedDict([(gmpe, imt_dict) for gmpe in gsims])
    gmpe_list = [GSIM_LIST[gmpe]() for gmpe in gsims]
    cmaker = ContextMaker(rupture.tectonic_region_type, gmpe_list)
    sctx, dctx = cmaker.make_contexts(sites, rupture)
    for gsim in gmpe_list:
        gmpe = gsim.__class__.__name__
        for imtx in imts:
            if truncation_level == 0:
                gmfs[gmpe][imtx], _ = gsim.get_mean_and_stddevs(
                    sctx, rupture, dctx, from_string(imtx), stddev_types=[])
                continue
            if "Intra event" in gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES:
                epsilon = conditional_simulation(
                    known_sites,
                    residuals.residuals[gmpe][imtx]["Intra event"], sites,
                    imtx, number_simulations, correlation_model)
                tau = np.unique(residuals.residuals[gmpe][imtx]["Inter event"])
                mean, [stddev_inter, stddev_intra] = gsim.get_mean_and_stddevs(
                    sctx, rupture, dctx, from_string(imtx),
                    ["Inter event", "Intra event"])
                for iloc in range(0, number_simulations):
                    gmfs[gmpe][imtx][:, iloc] = np.exp(mean +
                                                       (tau * stddev_inter) +
                                                       (epsilon[:, iloc] *
                                                        stddev_intra))
            else:
                epsilon = conditional_simulation(
                    known_sites, residuals.residuals[gmpe][imtx]["Total"],
                    sites, imtx, number_simulations, correlation_model)
                tau = None
                mean, [stddev_total
                       ] = gsim.get_mean_and_stddevs(sctx, rupture, dctx,
                                                     from_string(imtx),
                                                     ["Total"])
                for iloc in range(0, number_simulations):
                    gmfs[gmpe][imtx][:,
                                     iloc] = np.exp(mean + epsilon[:, iloc] *
                                                    stddev_total.flatten())
    return gmfs
コード例 #46
0
def _draw_imt_legend(fig, palette, imtype, gmice, process_time, map_version,
                     point_source, tdict):
    """Create a legend axis for non MMI plots.

    Args:
        fig (Figure): Matplotlib Figure object.
        levels (sequence): Sequence of contour levels.
        palette (ColorPalette): ColorPalette using range of input data and
            IMT_CMAP.
        imtype (str): One of 'PGV','PGA','SA(0.3)',etc.
        gmice (GMICE object): The GMICE used for this map.
        process_time (str): The processing time of this map.
        map_version (str): The version of this map.
        point_source (bool): Is the rupture a point source?
        tdict (dict): Dictionary containing the text strings for printing
            on the maps (in the language of the user's choice).
    """

    imtlabel = imtype + ' ' + tdict['units'][imtype]
    # imtlabel = imtype

    cax = fig.add_axes([0.1, 0.13, 0.8, 0.02])
    plt.axis('off')
    cax_xmin, cax_xmax = cax.get_xlim()
    bottom, top = cax.get_ylim()
    plt.xlim(cax_xmin, cax_xmax)
    plt.ylim(bottom, top)

    firstcol_width = 0.15

    font0 = FontProperties()
    alignment = {
        'horizontalalignment': 'center',
        'verticalalignment': 'center'
    }
    font0.set_weight('bold')

    xloc = firstcol_width/2
    plt.text(xloc, 0.5, imtlabel,
             fontproperties=font0, **alignment)
    # draw top/bottom edges of table
    plt.plot([bottom, top], [bottom, bottom], 'k', clip_on=False)
    plt.plot([bottom, top], [top, top], 'k', clip_on=False)
    # draw left edge of table
    plt.plot([bottom, bottom], [bottom, top], 'k', clip_on=False)
    # draw right edge of first column
    plt.plot([firstcol_width, firstcol_width], [0, 1], 'k', clip_on=False)
    # draw right edge of table
    plt.plot([1, 1], [0, 1], 'k', clip_on=False)

    # get the MMI/IMT values we need
    itype = 'log'
    divisor = 1
    if imtype != 'PGV':
        divisor = 100
    dmin, dmax = IMT_RANGES[imtype]
    imt_values = np.log(getContourLevels(dmin, dmax, itype=itype)/divisor)
    if gmice.supports(imtype):
        mmi_values, _ = gmice.getMIfromGM(imt_values, imt.from_string(imtype))
    else:
        gmice = WGRW12()
        mmi_values, _ = gmice.getMIfromGM(imt_values, imt.from_string(imtype))
    mmi_colors = [palette.getDataColor(
        mmi, color_format='hex') for mmi in mmi_values]
    new_imts = []
    new_mmi_colors = []
    for mmic, imtv in zip(mmi_colors, imt_values):
        if mmic not in new_mmi_colors:
            new_imts.append(imtv)
            new_mmi_colors.append(mmic)

    width = (1 - firstcol_width)/len(new_imts)
    left = firstcol_width
    for mmic, imtv in zip(new_mmi_colors, imt_values):
        right = left + width
        px = [left, right, right, left, left]
        py = [top, top, bottom, bottom, top]
        plt.plot([right, right], [bottom, top], 'k')
        plt.fill(px, py, mmic, ec=mmic)
        xloc = left + width/2.0
        imtstr = "{0:.3g}".format(np.exp(imtv)*divisor)
        th = plt.text(xloc, 0.5, imtstr, fontproperties=font0, **alignment)
        th.set_path_effects(
            [path_effects.Stroke(linewidth=2.0,
                                 foreground='white'),
             path_effects.Normal()]
        )
        left = right

    # Explanation of symbols: triangle is instrument, circle is mmi,
    # epicenter is black star
    # thick black line is rupture (if available)
    cax = fig.add_axes([0.1, 0.09, 0.8, 0.04])
    plt.axis('off')
    cax_xmin, cax_xmax = cax.get_xlim()
    bottom, top = cax.get_ylim()
    plt.xlim(cax_xmin, cax_xmax)
    plt.ylim(bottom, top)
    item_sep = [0.2, 0.28, 0.15]
    left_offset = 0.005
    label_pad = 0.02

    yloc_sixth_row = 0.6
    yloc_seventh_row = 0.15

    # Instrument
    triangle_marker_x = left_offset
    triangle_text_x = triangle_marker_x + label_pad
    plt.plot(triangle_marker_x, yloc_seventh_row, '^', markerfacecolor='w',
             markeredgecolor='k', markersize=6, mew=0.5, clip_on=False)
    plt.text(triangle_text_x,
             yloc_seventh_row,
             tdict['legend']['instrument'],
             va='center',
             ha='left')

    # Macroseismic
    circle_marker_x = triangle_text_x + item_sep[0]
    circle_text_x = circle_marker_x + label_pad
    plt.plot(circle_marker_x,
             yloc_seventh_row, 'o',
             markerfacecolor='w',
             markeredgecolor='k',
             markersize=4,
             mew=0.5)
    plt.text(circle_text_x,
             yloc_seventh_row,
             tdict['legend']['intensity'],
             va='center',
             ha='left')

    # Epicenter
    star_marker_x = circle_marker_x + item_sep[1]
    star_text_x = star_marker_x + label_pad
    plt.plot(star_marker_x,
             yloc_seventh_row, 'k*',
             markersize=12,
             mew=0.5)
    plt.text(star_text_x,
             yloc_seventh_row,
             tdict['legend']['epicenter'],
             va='center',
             ha='left')

    if not point_source:
        rup_marker_x = star_marker_x + item_sep[2]
        rup_text_x = rup_marker_x + label_pad
        rwidth = 0.02
        rheight = 0.05
        rup = patches.Rectangle(
            xy=(rup_marker_x - rwidth,
                yloc_seventh_row-0.5*rheight),
            width=rwidth,
            height=rheight,
            linewidth=2,
            edgecolor='k',
            facecolor='w'
        )
        cax.add_patch(rup)
        plt.text(rup_text_x,
                 yloc_seventh_row,
                 tdict['legend']['rupture'],
                 va='center',
                 ha='left')

    # Add conversion reference and shakemap version/process time
    version_x = 1.0
    tpl = (tdict['legend']['version'], map_version,
           tdict['legend']['processed'], process_time)
    plt.text(version_x, yloc_sixth_row,
             '%s %i: %s %s' % tpl,
             ha='right', va='center')

    ref = gmice.name
    refx = 0
    plt.text(refx, yloc_sixth_row,
             '%s %s' % (tdict['legend']['scale'], ref),
             va='center')
コード例 #47
0
ファイル: hazard.py プロジェクト: sara80ingv/oq-engine
def export_disagg_csv_xml(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol']
    hmap4 = dstore['hmap4']
    N, M, P, Z = hmap4.shape
    imts = list(oq.imtls)
    rlzs = dstore['full_lt'].get_realizations()
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    bins = {name: dset[:] for name, dset in dstore['disagg-bins'].items()}
    ex = 'disagg?kind=%s&imt=%s&site_id=%s&poe_id=%d&z=%d'
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    for s, m, p, z in iproduct(N, M, P, Z):
        dic = {
            k: dstore['disagg/' + k][s, m, p, ..., z]
            for k in oq.disagg_outputs
        }
        if sum(arr.sum() for arr in dic.values()) == 0:  # no data
            continue
        imt = from_string(imts[m])
        r = hmap4.rlzs[s, z]
        rlz = rlzs[r]
        iml = hmap4[s, m, p, z]
        poe_agg = dstore['poe4'][s, m, p, z]
        fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d.xml' %
                                   (r, imt, s, p))
        lon, lat = sitecol.lons[s], sitecol.lats[s]
        metadata = dstore.metadata
        metadata.update(investigation_time=oq.investigation_time,
                        imt=imt.name,
                        smlt_path='_'.join(rlz.sm_lt_path),
                        gsimlt_path=rlz.gsim_rlz.pid,
                        lon=lon,
                        lat=lat,
                        mag_bin_edges=bins['Mag'].tolist(),
                        dist_bin_edges=bins['Dist'].tolist(),
                        lon_bin_edges=bins['Lon'][s].tolist(),
                        lat_bin_edges=bins['Lat'][s].tolist(),
                        eps_bin_edges=bins['Eps'].tolist(),
                        tectonic_region_types=decode(bins['TRT'].tolist()))
        if ekey[1] == 'xml':
            metadata['sa_period'] = getattr(imt, 'period', None) or None
            metadata['sa_damping'] = getattr(imt, 'damping', None)
            writer = writercls(fname, **metadata)
            data = []
            for k in oq.disagg_outputs:
                data.append(DisaggMatrix(poe_agg, iml, k.split('_'), dic[k]))
            writer.serialize(data)
            fnames.append(fname)
        else:  # csv
            metadata['poe'] = poe_agg
            for k in oq.disagg_outputs:
                header = k.lower().split('_') + ['poe']
                com = {
                    key: value
                    for key, value in metadata.items()
                    if value is not None and key not in skip_keys
                }
                com.update(metadata)
                fname = dstore.export_path('rlz-%d-%s-sid-%d-poe-%d_%s.csv' %
                                           (r, imt, s, p, k))
                values = extract(dstore, ex % (k, imt, s, p, z))
                writers.write_csv(fname,
                                  values,
                                  header=header,
                                  comment=com,
                                  fmt='%.5E')
                fnames.append(fname)
    return sorted(fnames)
コード例 #48
0
    def do_aggregate_post_proc(self):
        """
        Grab hazard data for all realizations and sites from the database and
        compute mean and/or quantile aggregates (depending on which options are
        enabled in the calculation).

        Post-processing results will be stored directly into the database.
        """
        num_rlzs = len(self._realizations)
        if not num_rlzs:
            logs.LOG.warn('No realizations for hazard_calculation_id=%d',
                          self.job.id)
            return
        elif num_rlzs == 1 and self.quantile_hazard_curves:
            logs.LOG.warn(
                'There is only one realization, the configuration parameter '
                'quantile_hazard_curves should not be set')
            return

        weights = (None if self.oqparam.number_of_logic_tree_samples else
                   [rlz.weight for rlz in self._realizations])

        if self.oqparam.mean_hazard_curves:
            # create a new `HazardCurve` 'container' record for mean
            # curves (virtual container for multiple imts)
            models.HazardCurve.objects.create(
                output=models.Output.objects.create_output(
                    self.job, "mean-curves-multi-imt", "hazard_curve_multi"),
                statistics="mean",
                imt=None,
                investigation_time=self.oqparam.investigation_time)

        for quantile in self.quantile_hazard_curves:
            # create a new `HazardCurve` 'container' record for quantile
            # curves (virtual container for multiple imts)
            models.HazardCurve.objects.create(
                output=models.Output.objects.create_output(
                    self.job, 'quantile(%s)-curves' % quantile,
                    "hazard_curve_multi"),
                statistics="quantile",
                imt=None,
                quantile=quantile,
                investigation_time=self.oqparam.investigation_time)

        for imt, imls in self.oqparam.imtls.items():
            im_type, sa_period, sa_damping = from_string(imt)

            # prepare `output` and `hazard_curve` containers in the DB:
            container_ids = dict()
            if self.oqparam.mean_hazard_curves:
                mean_output = self.job.get_or_create_output(
                    display_name='Mean Hazard Curves %s' % imt,
                    output_type='hazard_curve')
                mean_hc = models.HazardCurve.objects.create(
                    output=mean_output,
                    investigation_time=self.oqparam.investigation_time,
                    imt=im_type,
                    imls=imls,
                    sa_period=sa_period,
                    sa_damping=sa_damping,
                    statistics='mean')
                self._hazard_curves.append(mean_hc)
                container_ids['mean'] = mean_hc.id

            for quantile in self.quantile_hazard_curves:
                q_output = self.job.get_or_create_output(
                    display_name=('%s quantile Hazard Curves %s' %
                                  (quantile, imt)),
                    output_type='hazard_curve')
                q_hc = models.HazardCurve.objects.create(
                    output=q_output,
                    investigation_time=self.oqparam.investigation_time,
                    imt=im_type,
                    imls=imls,
                    sa_period=sa_period,
                    sa_damping=sa_damping,
                    statistics='quantile',
                    quantile=quantile)
                self._hazard_curves.append(q_hc)
                container_ids['q%s' % quantile] = q_hc.id

            # num_rlzs * num_sites * num_levels
            # NB: different IMTs can have different num_levels
            all_curves_for_imt = numpy.array(self.curves_by_imt[imt])
            del self.curves_by_imt[imt]  # save memory

            inserter = writer.CacheInserter(models.HazardCurveData,
                                            max_cache_size=10000)

            # curve_poes below is an array num_rlzs * num_levels
            for i, site in enumerate(self.site_collection):
                wkt = site.location.wkt2d
                curve_poes = numpy.array(
                    [c_by_rlz[i] for c_by_rlz in all_curves_for_imt])

                # calc quantiles first
                for quantile in self.quantile_hazard_curves:
                    q_curve = scientific.quantile_curve(
                        curve_poes, quantile, weights)
                    inserter.add(
                        models.HazardCurveData(
                            hazard_curve_id=(container_ids['q%s' % quantile]),
                            poes=q_curve.tolist(),
                            location=wkt))

                # then means
                if self.mean_hazard_curves:
                    m_curve = scientific.mean_curve(curve_poes, weights)
                    inserter.add(
                        models.HazardCurveData(
                            hazard_curve_id=container_ids['mean'],
                            poes=m_curve.tolist(),
                            location=wkt))
            inserter.flush()
コード例 #49
0
ファイル: imt_test.py プロジェクト: yasser64b/oq-engine
 def test_equivalent(self):
     sa1 = imt_module.from_string('SA(0.1)')
     sa2 = imt_module.from_string('SA(0.10)')
     self.assertEqual(sa1, sa2)
     self.assertEqual(set([sa1, sa2]), set([sa1]))
コード例 #50
0
 def __init_subclass__(cls):
     # make sure the name of the outputs are valid IMTs
     for out in cls.outputs:
         imt.from_string(out)
コード例 #51
0
    def fillTables(self, source):
        """Populate tables with derived MMI/PGM values and distances.
        :param source:
          ShakeMap Source object.
        """
        gmice = WGRW12()
        #find all of the instrumented stations
        stationquery = 'SELECT id,lat,lon,code,network FROM station where instrumented = 1'
        self.cursor.execute(stationquery)
        rows = self.cursor.fetchall()
        emag = source.getEventDict()['mag']
        distances = ['rhypo', 'repi', 'rjb', 'rrup']

        #pre-fetch all of the IMT ids before looping
        imtdict = {}
        for imt in IMT_TYPES:
            imtquery = 'SELECT id FROM imt WHERE imt_type = "%s"' % imt
            self.cursor.execute(imtquery)
            try:
                imtdict[imt] = self.cursor.fetchone()[0]
            except:
                x = 1

        for row in rows:
            sid, lat, lon, code, network = row
            #calculate all distance types
            ddict = get_distance(distances, np.array([lat]), np.array([lon]),
                                 np.array([0]), source)
            values = []
            for d in distances:
                values.append(ddict[d][0])
            values.append(sid)
            values = tuple(values)
            station_update = 'UPDATE station set rhypo=%.2f,repi=%.2f,rjb=%.2f,rrup=%.2f WHERE id=%i' % values
            self.cursor.execute(station_update)
            self.db.commit()

            #calculate all derived mmi values
            for imt, imtid in imtdict.items():
                if imt.endswith('_mmi') or imt.startswith('mmi'):
                    continue
                #what distance measure to use here?
                ampquery = 'SELECT amp FROM amp WHERE station_id=%i AND imt_id=%i' % (
                    sid, imtid)
                self.cursor.execute(ampquery)
                imtvalue = self.cursor.fetchone()[0]
                gemimt = GEM_IMT.from_string(IMT_MAP[imt])
                dmmi = gmice.getMIfromGM(imtvalue,
                                         gemimt,
                                         dists=ddict['repi'][0],
                                         mag=emag)
                derived_mmi = imt + '_mmi'
                derived_imtid = imtdict[derived_mmi]
                self.cursor.execute(
                    'INSERT INTO amp (imt_id,amp,station_id,flag) VALUES (%i,%.2f,%i,"0")'
                    % (derived_imtid, dmmi, sid))
                self.db.commit()

            #calculate all derived pgm values
            mmiquery = 'SELECT amp FROM amp WHERE station_id=%i AND imt_id=%i' % (
                sid, imtdict['mmi'])
            self.cursor.execute(mmiquery)
            mmivalue = self.cursor.fetchone()[0]
            for imt, imtid in imtdict.items():
                if not imt.startswith('mmi_'):
                    continue
                #what distance measure to use here?
                tmp, derived_imt = imt.split('_')
                gemimt = GEM_IMT.from_string(IMT_MAP[derived_imt])
                dpgm = gmice.getGMfromMI(mmivalue,
                                         gemimt,
                                         dists=ddict['repi'],
                                         mag=emag)
                self.cursor.execute(
                    'INSERT INTO amp (imt_id,amp,station_id,flag) VALUES (%i,%.2f,%i,"0")'
                    % (imtid, dpgm, sid))
                self.db.commit()
コード例 #52
0
ファイル: utils.py プロジェクト: g-weatherill/oq-engine
def read_cmaker_df(gsim, csvfnames):
    """
    :param gsim:
        a GSIM instance
    :param csvfnames:
        a list of pathnames to CSV files in the format used in
        hazardlib/tests/gsim/data, i.e. with fields rup_XXX, site_XXX,
        dist_XXX, result_type and periods
    :returns: a list RuptureContexts, grouped by rupture parameters
    """
    # build a suitable ContextMaker
    dfs = [pandas.read_csv(fname) for fname in csvfnames]
    num_rows = sum(len(df) for df in dfs)
    if num_rows == 0:
        raise ValueError('The files %s are empty!' % ' '.join(csvfnames))
    logging.info('\n%s' % gsim)
    logging.info('num_checks = {:_d}'.format(num_rows))
    if not all_equals([sorted(df.columns) for df in dfs]):
        colset = set.intersection(*[set(df.columns) for df in dfs])
        cols = [col for col in dfs[0].columns if col in colset]
        extra = set()
        ncols = []
        for df in dfs:
            ncols.append(len(df.columns))
            extra.update(set(df.columns) - colset)
        print('\n%s\nThere are %d extra columns %s over a total of %s' %
              (csvfnames[0], len(extra), extra, ncols))
    else:
        cols = slice(None)
    df = pandas.concat(d[cols] for d in dfs)
    sizes = {r: len(d) for r, d in df.groupby('result_type')}
    if not all_equals(list(sizes.values())):
        raise ValueError('Inconsistent number of rows: %s' % sizes)
    imts = []
    cmap = {}
    for col in df.columns:
        try:
            im = str(imt.from_string(col.upper()))
        except KeyError:
            pass
        else:
            imts.append(im)
            cmap[col] = im
    if gsim.__class__.__name__.endswith('AvgSA'):  # special case
        imts.append('AvgSA')
    assert imts
    imtls = {im: [0] for im in sorted(imts)}
    trt = gsim.DEFINED_FOR_TECTONIC_REGION_TYPE
    cmaker = contexts.ContextMaker(trt.value if trt else "*", [gsim],
                                   {'imtls': imtls})
    for dist in cmaker.REQUIRES_DISTANCES:
        name = 'dist_' + dist
        df[name] = np.array(df[name].to_numpy(), cmaker.dtype[dist])
        logging.info(name, df[name].unique())
    for dist in cmaker.REQUIRES_SITES_PARAMETERS:
        name = 'site_' + dist
        df[name] = np.array(df[name].to_numpy(), cmaker.dtype[dist])
        logging.info(name, df[name].unique())
    for par in cmaker.REQUIRES_RUPTURE_PARAMETERS:
        name = 'rup_' + par
        if name not in df.columns:  # i.e. missing rake
            df[name] = np.zeros(len(df), cmaker.dtype[par])
        else:
            df[name] = np.array(df[name].to_numpy(), cmaker.dtype[par])
        logging.info(name, df[name].unique())
    logging.info('result_type', df['result_type'].unique())
    return cmaker, df.rename(columns=cmap)
コード例 #53
0
def compute_disagg(dstore, slc, cmaker, hmap4, trti, magi, bin_edges, monitor):
    # see https://bugs.launchpad.net/oq-engine/+bug/1279247 for an explanation
    # of the algorithm used
    """
    :param dstore:
        a DataStore instance
    :param slc:
        a slice of ruptures
    :param cmaker:
        a :class:`openquake.hazardlib.gsim.base.ContextMaker` instance
    :param hmap4:
        an ArrayWrapper of shape (N, M, P, Z)
    :param trti:
        tectonic region type index
    :param magi:
        magnitude bin indices
    :param bin_egdes:
        a quartet (dist_edges, lon_edges, lat_edges, eps_edges)
    :param monitor:
        monitor of the currently running job
    :returns:
        a dictionary sid, imti -> 6D-array
    """
    RuptureContext.temporal_occurrence_model = PoissonTOM(
        cmaker.investigation_time)
    with monitor('reading contexts', measuremem=True):
        dstore.open('r')
        allctxs, close_ctxs = read_ctxs(
            dstore, slc, req_site_params=cmaker.REQUIRES_SITES_PARAMETERS)
        for magidx, ctx in zip(magi, allctxs):
            ctx.magi = magidx
    dis_mon = monitor('disaggregate', measuremem=False)
    ms_mon = monitor('disagg mean_std', measuremem=True)
    N, M, P, Z = hmap4.shape
    g_by_z = AccumDict(accum={})  # dict s -> z -> g
    for g, rlzs in enumerate(cmaker.gsims.values()):
        for (s, z), r in numpy.ndenumerate(hmap4.rlzs):
            if r in rlzs:
                g_by_z[s][z] = g
    eps3 = disagg._eps3(cmaker.trunclevel, cmaker.num_epsilon_bins)
    imts = [from_string(im) for im in cmaker.imtls]
    for magi, ctxs in groupby(allctxs, operator.attrgetter('magi')).items():
        res = {'trti': trti, 'magi': magi}
        with ms_mon:
            # compute mean and std for a single IMT to save memory
            # the size is N * U * G * 16 bytes
            disagg.set_mean_std(ctxs, imts, cmaker.gsims)

        # disaggregate by site, IMT
        for s, iml3 in enumerate(hmap4):
            close = [ctx for ctx in close_ctxs[s] if ctx.magi == magi]
            if not g_by_z[s] or not close:
                # g_by_z[s] is empty in test case_7
                continue
            # dist_bins, lon_bins, lat_bins, eps_bins
            bins = (bin_edges[1], bin_edges[2][s], bin_edges[3][s],
                    bin_edges[4])
            iml2 = dict(zip(imts, iml3))
            with dis_mon:
                # 7D-matrix #distbins, #lonbins, #latbins, #epsbins, M, P, Z
                matrix = disagg.disaggregate(
                    close, g_by_z[s], iml2, eps3, s, bins)  # 7D-matrix
                for m in range(M):
                    mat6 = matrix[..., m, :, :]
                    if mat6.any():
                        res[s, m] = output(mat6)
        yield res
コード例 #54
0
    def __init__(self, **names_vals):
        # support legacy names
        for name in list(names_vals):
            if name == 'quantile_hazard_curves':
                names_vals['quantiles'] = names_vals.pop(name)
            elif name == 'mean_hazard_curves':
                names_vals['mean'] = names_vals.pop(name)
            elif name == 'max':
                names_vals['max'] = names_vals.pop(name)
        super().__init__(**names_vals)
        job_ini = self.inputs['job_ini']
        if 'calculation_mode' not in names_vals:
            raise InvalidFile('Missing calculation_mode in %s' % job_ini)
        if 'region_constraint' in names_vals:
            if 'region' in names_vals:
                raise InvalidFile('You cannot have both region and '
                                  'region_constraint in %s' % job_ini)
            logging.warning(
                'region_constraint is obsolete, use region instead')
            self.region = valid.wkt_polygon(
                names_vals.pop('region_constraint'))
        self.risk_investigation_time = (
            self.risk_investigation_time or self.investigation_time)
        self.collapse_level = int(self.collapse_level)
        if ('intensity_measure_types_and_levels' in names_vals and
                'intensity_measure_types' in names_vals):
            logging.warning('Ignoring intensity_measure_types since '
                            'intensity_measure_types_and_levels is set')
        if 'iml_disagg' in names_vals:
            self.iml_disagg.pop('default')
            # normalize things like SA(0.10) -> SA(0.1)
            self.iml_disagg = {str(from_string(imt)): val
                               for imt, val in self.iml_disagg.items()}
            self.hazard_imtls = self.iml_disagg
            if 'intensity_measure_types_and_levels' in names_vals:
                raise InvalidFile(
                    'Please remove the intensity_measure_types_and_levels '
                    'from %s: they will be inferred from the iml_disagg '
                    'dictionary' % job_ini)
        elif 'intensity_measure_types_and_levels' in names_vals:
            self.hazard_imtls = self.intensity_measure_types_and_levels
            delattr(self, 'intensity_measure_types_and_levels')
            lens = set(map(len, self.hazard_imtls.values()))
            if len(lens) > 1:
                dic = {imt: len(ls) for imt, ls in self.hazard_imtls.items()}
                warnings.warn(
                    'Each IMT must have the same number of levels, instead '
                    'you have %s' % dic, DeprecationWarning)
        elif 'intensity_measure_types' in names_vals:
            self.hazard_imtls = dict.fromkeys(self.intensity_measure_types)
            delattr(self, 'intensity_measure_types')
        self._risk_files = get_risk_files(self.inputs)

        self.check_source_model()
        if self.hazard_precomputed() and self.job_type == 'risk':
            self.check_missing('site_model', 'debug')
            self.check_missing('gsim_logic_tree', 'debug')
            self.check_missing('source_model_logic_tree', 'debug')

        # check the gsim_logic_tree
        if self.inputs.get('gsim_logic_tree'):
            if self.gsim != '[FromFile]':
                raise InvalidFile('%s: if `gsim_logic_tree_file` is set, there'
                                  ' must be no `gsim` key' % job_ini)
            path = os.path.join(
                self.base_path, self.inputs['gsim_logic_tree'])
            gsim_lt = logictree.GsimLogicTree(path, ['*'])

            # check the number of branchsets
            branchsets = len(gsim_lt._ltnode)
            if 'scenario' in self.calculation_mode and branchsets > 1:
                raise InvalidFile(
                    '%s: %s for a scenario calculation must contain a single '
                    'branchset, found %d!' % (job_ini, path, branchsets))

            # check the IMTs vs the GSIMs
            self._gsims_by_trt = gsim_lt.values
            for gsims in gsim_lt.values.values():
                self.check_gsims(gsims)
        elif self.gsim is not None:
            self.check_gsims([valid.gsim(self.gsim, self.base_path)])

        # check inputs
        unknown = set(self.inputs) - self.KNOWN_INPUTS
        if unknown:
            raise ValueError('Unknown key %s_file in %s' %
                             (unknown.pop(), self.inputs['job_ini']))

        # checks for disaggregation
        if self.calculation_mode == 'disaggregation':
            if not self.poes_disagg and not self.iml_disagg:
                raise InvalidFile('poes_disagg or iml_disagg must be set '
                                  'in %(job_ini)s' % self.inputs)
            elif self.poes_disagg and self.iml_disagg:
                raise InvalidFile(
                    '%s: iml_disagg and poes_disagg cannot be set '
                    'at the same time' % job_ini)
            for k in ('mag_bin_width', 'distance_bin_width',
                      'coordinate_bin_width', 'num_epsilon_bins'):
                if k not in vars(self):
                    raise InvalidFile('%s must be set in %s' % (k, job_ini))

        # checks for classical_damage
        if self.calculation_mode == 'classical_damage':
            if self.conditional_loss_poes:
                raise InvalidFile(
                    '%s: conditional_loss_poes are not defined '
                    'for classical_damage calculations' % job_ini)

        # checks for event_based_risk
        if (self.calculation_mode == 'event_based_risk' and
                self.asset_correlation not in (0, 1)):
            raise ValueError('asset_correlation != {0, 1} is no longer'
                             ' supported')

        # checks for ebrisk
        if self.calculation_mode == 'ebrisk':
            if self.risk_investigation_time is None:
                raise InvalidFile('Please set the risk_investigation_time in'
                                  ' %s' % job_ini)

        # check for GMFs from file
        if (self.inputs.get('gmfs', '').endswith('.csv')
                and 'sites' not in self.inputs and self.sites is None):
            raise InvalidFile('%s: You forgot sites|sites_csv'
                              % job_ini)
        elif self.inputs.get('gmfs', '').endswith('.xml'):
            raise InvalidFile('%s: GMFs in XML are not supported anymore'
                              % job_ini)

        # checks for event_based
        if 'event_based' in self.calculation_mode:
            if self.ses_per_logic_tree_path >= TWO32:
                raise ValueError('ses_per_logic_tree_path too big: %d' %
                                 self.ses_per_logic_tree_path)
            if self.number_of_logic_tree_samples >= TWO16:
                raise ValueError('number_of_logic_tree_samples too big: %d' %
                                 self.number_of_logic_tree_samples)

        # check grid + sites
        if self.region_grid_spacing and ('sites' in self.inputs or self.sites):
            raise ValueError('You are specifying grid and sites at the same '
                             'time: which one do you want?')

        # check for amplification
        if ('amplification' in self.inputs and self.imtls and
                self.calculation_mode in ['classical', 'classical_risk',
                                          'disaggregation']):
            check_same_levels(self.imtls)
コード例 #55
0
def signal_end(
    st,
    event_time,
    event_lon,
    event_lat,
    event_mag,
    method=None,
    vmin=None,
    floor=None,
    model=None,
    epsilon=2.0,
):
    """
    Estimate end of signal by using a model of the 5-95% significant
    duration, and adding this value to the "signal_split" time. This probably
    only works well when the split is estimated with a p-wave picker since
    the velocity method often ends up with split times that are well before
    signal actually starts.

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_mag (float):
            Event magnitude.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for estimating signal end time. Either 'velocity'
            or 'model'.
        vmin (float):
            Velocity (km/s) for estimating end of signal. Only used if
            method="velocity".
        floor (float):
            Minimum duration (sec) applied along with vmin.
        model (str):
            Short name of duration model to use. Must be defined in the
            gmprocess/data/modules.yml file.
        epsilon (float):
            Number of standard deviations; if epsilon is 1.0, then the signal
            window duration is the mean Ds + 1 standard deviation. Only used
            for method="model".

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_end'] dictionary.

    """
    # Load openquake stuff if method="model"
    if method == "model":
        dmodel = load_model(model)

        # Set some "conservative" inputs (in that they will tend to give
        # larger durations).
        rctx = RuptureContext()
        rctx.mag = event_mag
        rctx.rake = -90.0
        rctx.vs30 = np.array([180.0])
        rctx.z1pt0 = np.array([0.51])
        dur_imt = imt.from_string("RSD595")
        stddev_types = [const.StdDev.TOTAL]

    for tr in st:
        if not tr.hasParameter("signal_split"):
            continue
        if method == "velocity":
            if vmin is None:
                raise ValueError('Must specify vmin if method is "velocity".')
            if floor is None:
                raise ValueError('Must specify floor if method is "velocity".')
            epi_dist = (gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats["coordinates"]["latitude"],
                lon2=tr.stats["coordinates"]["longitude"],
            )[0] / 1000.0)
            end_time = event_time + max(floor, epi_dist / vmin)
        elif method == "model":
            if model is None:
                raise ValueError('Must specify model if method is "model".')
            epi_dist = (gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats["coordinates"]["latitude"],
                lon2=tr.stats["coordinates"]["longitude"],
            )[0] / 1000.0)
            # Repi >= Rrup, so substitution here should be conservative
            # (leading to larger durations).
            rctx.rrup = np.array([epi_dist])
            rctx.sids = np.array(range(np.size(rctx.rrup)))
            lnmu, lnstd = dmodel.get_mean_and_stddevs(rctx, rctx, rctx,
                                                      dur_imt, stddev_types)
            duration = np.exp(lnmu + epsilon * lnstd[0])
            # Get split time
            split_time = tr.getParameter("signal_split")["split_time"]
            end_time = split_time + float(duration)
        else:
            raise ValueError('method must be either "velocity" or "model".')
        # Update trace params
        end_params = {
            "end_time": end_time,
            "method": method,
            "vsplit": vmin,
            "floor": floor,
            "model": model,
            "epsilon": epsilon,
        }
        tr.setParameter("signal_end", end_params)

    return st