예제 #1
0
def read_cmakers(dstore, full_lt=None):
    """
    :param dstore: a DataStore-like object
    :param full_lt: a FullLogicTree instance, if given
    :returns: a list of ContextMaker instance, one per source group
    """
    from openquake.hazardlib.site_amplification import AmplFunction
    cmakers = []
    oq = dstore['oqparam']
    full_lt = full_lt or dstore['full_lt']
    trt_smrs = dstore['trt_smrs'][:]
    toms = dstore['toms'][:]
    rlzs_by_gsim_list = full_lt.get_rlzs_by_gsim_list(trt_smrs)
    trts = list(full_lt.gsim_lt.values)
    num_eff_rlzs = len(full_lt.sm_rlzs)
    start = 0
    for grp_id, rlzs_by_gsim in enumerate(rlzs_by_gsim_list):
        trti = trt_smrs[grp_id][0] // num_eff_rlzs
        trt = trts[trti]
        if ('amplification' in oq.inputs
                and oq.amplification_method == 'kernel'):
            df = AmplFunction.read_df(oq.inputs['amplification'])
            oq.af = AmplFunction.from_dframe(df)
        else:
            oq.af = None
        cmaker = ContextMaker(trt, rlzs_by_gsim, oq)
        cmaker.tom = registry[decode(toms[grp_id])](oq.investigation_time)
        cmaker.trti = trti
        cmaker.start = start
        cmaker.grp_id = grp_id
        start += len(rlzs_by_gsim)
        cmakers.append(cmaker)
    return cmakers
예제 #2
0
    def setUp(self):

        fname = gettemp(ampl_func)
        df = read_csv(fname, {
            'ampcode': ampcode_dt,
            None: numpy.float64
        },
                      index='ampcode')
        self.df = AmplFunction(df)

        # Set GMMs
        gmmA = BooreAtkinson2008()

        # Set parameters
        dsts = [10., 15., 20., 30., 40.]
        dsts = [10.]
        imts = [PGA(), SA(1.0)]
        sites = Dummy.get_site_collection(len(dsts), vs30=760.0)
        self.mag = 5.5
        rup = Dummy.get_rupture(mag=self.mag)
        ctx = full_context(sites, rup)
        ctx.rjb = numpy.array(dsts)
        ctx.rrup = numpy.array(dsts)
        self.rrup = ctx.rrup

        # Compute GM on rock
        self.cmaker = ContextMaker('TRT', [gmmA],
                                   dict(imtls={str(im): [0]
                                               for im in imts}))
        [self.meastd] = self.cmaker.get_mean_stds([ctx], const.StdDev.TOTAL)
예제 #3
0
    def setUp(self):

        fname = gettemp(ampl_func)
        df = read_csv(fname, {
            'ampcode': ampcode_dt,
            None: numpy.float64
        },
                      index='ampcode')
        self.df = AmplFunction(df)

        # Set GMMs
        gmmA = BooreAtkinson2008()
        gmmB = BooreEtAl2014()

        # Set parameters
        dsts = [10., 15., 20., 30., 40.]
        dsts = [10.]
        imts = [PGA(), SA(1.0)]
        sites = Dummy.get_site_collection(len(dsts), vs30=760.0)
        self.mag = 5.5
        rup = Dummy.get_rupture(mag=self.mag)
        ctx = RuptureContext.full(rup, sites)
        ctx.rjb = numpy.array(dsts)
        ctx.rrup = numpy.array(dsts)
        self.rrup = ctx.rrup

        # Compute GM on rock
        self.meastd = gmmA.get_mean_std([ctx], imts)  # shape (2, N=1, M=2)
예제 #4
0
    def test01(self):

        fname = gettemp(ampl_func)
        df = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64},
                      index='ampcode')
        sitecode = b'A'

        imls_soil = numpy.log([0.012, 0.052, 0.12, 0.22, 0.52])
        imls_soil = numpy.log(numpy.logspace(-2, 0, num=20))
        self.cmaker.loglevels = ll = DictArray(
            {'PGA': imls_soil, 'SA(1.0)': imls_soil})
        self.cmaker.af = AmplFunction.from_dframe(df)
        self.cmaker.truncation_level = tl = 3

        # The output in this case will be (1, x, 2) i.e. 1 site, number
        # intensity measure levels times 2 and 2 GMMs
        tmp = _get_poes(self.meastd, ll, tl)

        # This function is rather slow at the moment
        ctx = unittest.mock.Mock(mag=self.mag, rrup=self.rrup, sids=[0],
                                 sites=dict(ampcode=[sitecode]))
        res = get_poes_site(self.meastd, self.cmaker, ctx)

        if False:
            import matplotlib.pyplot as plt
            plt.plot(numpy.exp(imls_soil), res[0, 0:len(imls_soil), 0], '-o',
                     label='soil')
            plt.plot(numpy.exp(imls_soil), tmp[0, 0:len(imls_soil), 0], '-o',
                     label='rock')
            plt.legend()
            plt.xscale('log')
            plt.yscale('log')
            plt.grid(which='both')
            plt.show()
예제 #5
0
 def setUp(self):
     fname = gettemp(ampl_func)
     df = read_csv(fname, {
         'ampcode': ampcode_dt,
         None: numpy.float64
     },
                   index='ampcode')
     self.af = AmplFunction.from_dframe(df)
예제 #6
0
    def read_inputs(self):
        """
        Read risk data and sources if any
        """
        oq = self.oqparam
        self._read_risk_data()
        self.check_overflow()  # check if self.sitecol is too large

        if ('amplification' in oq.inputs
                and oq.amplification_method == 'kernel'):
            logging.info('Reading %s', oq.inputs['amplification'])
            df = readinput.get_amplification(oq)
            check_amplification(df, self.sitecol)
            self.af = AmplFunction.from_dframe(df)

        if getattr(self, 'sitecol', None):
            # can be None for the ruptures-only calculator
            with hdf5.File(self.datastore.tempname, 'w') as tmp:
                tmp['sitecol'] = self.sitecol
        elif (oq.calculation_mode == 'disaggregation'
              and oq.max_sites_disagg < len(self.sitecol)):
            raise ValueError('Please set max_sites_disagg=%d in %s' %
                             (len(self.sitecol), oq.inputs['job_ini']))
        elif oq.disagg_by_src and len(self.sitecol) > oq.max_sites_disagg:
            raise ValueError(
                'There are too many sites to use disagg_by_src=true')
        if ('source_model_logic_tree' in oq.inputs
                and oq.hazard_calculation_id is None):
            with self.monitor('composite source model', measuremem=True):
                self.csm = csm = readinput.get_composite_source_model(
                    oq, self.datastore.hdf5)
                srcs = [src for sg in csm.src_groups for src in sg]
                if not srcs:
                    raise RuntimeError('All sources were discarded!?')
                logging.info('Checking the sources bounding box')
                sids = self.src_filter().within_bbox(srcs)
                if len(sids) == 0:
                    raise RuntimeError('All sources were discarded!?')
                self.full_lt = csm.full_lt
        self.init()  # do this at the end of pre-execute

        if (not oq.hazard_calculation_id
                and oq.calculation_mode != 'preclassical'
                and not oq.save_disk_space):
            self.gzip_inputs()
예제 #7
0
파일: base.py 프로젝트: ARosemary/oq-engine
    def read_inputs(self):
        """
        Read risk data and sources if any
        """
        oq = self.oqparam
        self._read_risk_data()
        self.check_overflow()  # check if self.sitecol is too large

        if ('amplification' in oq.inputs
                and oq.amplification_method == 'kernel'):
            logging.info('Reading %s', oq.inputs['amplification'])
            df = readinput.get_amplification(oq)
            check_amplification(df, self.sitecol)
            self.af = AmplFunction.from_dframe(df)

        if (oq.calculation_mode == 'disaggregation'
                and oq.max_sites_disagg < len(self.sitecol)):
            raise ValueError('Please set max_sites_disagg=%d in %s' %
                             (len(self.sitecol), oq.inputs['job_ini']))
        if ('source_model_logic_tree' in oq.inputs
                and oq.hazard_calculation_id is None):
            with self.monitor('composite source model', measuremem=True):
                self.csm = csm = readinput.get_composite_source_model(
                    oq, self.datastore.hdf5)
                srcs = [src for sg in csm.src_groups for src in sg]
                if not srcs:
                    raise RuntimeError('All sources were discarded!?')
                logging.info('Checking the sources bounding box')
                sids = self.src_filter().within_bbox(srcs)
                if len(sids) == 0:
                    raise RuntimeError('All sources were discarded!?')
                self.full_lt = csm.full_lt
        self.init()  # do this at the end of pre-execute

        if (not oq.hazard_calculation_id
                and oq.calculation_mode != 'preclassical'
                and not oq.save_disk_space):
            self.gzip_inputs()

        # check DEFINED_FOR_REFERENCE_VELOCITY
        if self.amplifier:
            gsim_lt = readinput.get_gsim_lt(oq)
            self.amplifier.check(self.sitecol.vs30, oq.vs30_tolerance,
                                 gsim_lt.values)
예제 #8
0
파일: base.py 프로젝트: jotru/oq-engine
    def read_inputs(self):
        """
        Read risk data and sources if any
        """
        oq = self.oqparam
        self._read_risk_data()
        self.check_overflow()  # check if self.sitecol is too large

        if ('amplification' in oq.inputs
                and oq.amplification_method == 'kernel'):
            logging.info('Reading %s', oq.inputs['amplification'])
            df = readinput.get_amplification(oq)
            check_amplification(df, self.sitecol)
            self.af = AmplFunction.from_dframe(df)

        if (oq.calculation_mode == 'disaggregation'
                and oq.max_sites_disagg < len(self.sitecol)):
            raise ValueError('Please set max_sites_disagg=%d in %s' %
                             (len(self.sitecol), oq.inputs['job_ini']))
        if ('source_model_logic_tree' in oq.inputs
                and oq.hazard_calculation_id is None):
            with self.monitor('composite source model', measuremem=True):
                self.csm = csm = readinput.get_composite_source_model(
                    oq, self.datastore.hdf5)
                mags_by_trt = csm.get_mags_by_trt()
                oq.maximum_distance.interp(mags_by_trt)
                for trt in mags_by_trt:
                    self.datastore['source_mags/' + trt] = numpy.array(
                        mags_by_trt[trt])
                self.full_lt = csm.full_lt
        self.init()  # do this at the end of pre-execute
        self.pre_checks()

        if (not oq.hazard_calculation_id
                and oq.calculation_mode != 'preclassical'
                and not oq.save_disk_space):
            self.gzip_inputs()

        # check DEFINED_FOR_REFERENCE_VELOCITY
        if self.amplifier:
            gsim_lt = readinput.get_gsim_lt(oq)
            self.amplifier.check(self.sitecol.vs30, oq.vs30_tolerance,
                                 gsim_lt.values)
예제 #9
0
파일: base.py 프로젝트: ARosemary/oq-engine
    def _read_risk_data(self):
        # read the exposure (if any), the risk model (if any) and then the
        # site collection, possibly extracted from the exposure.
        oq = self.oqparam
        self.load_crmodel()  # must be called first

        if oq.hazard_calculation_id:
            with util.read(oq.hazard_calculation_id) as dstore:
                haz_sitecol = dstore['sitecol'].complete
                if ('amplification' in oq.inputs
                        and 'ampcode' not in haz_sitecol.array.dtype.names):
                    haz_sitecol.add_col('ampcode', site.ampcode_dt)
        else:
            haz_sitecol = readinput.get_site_collection(oq, self.datastore)
            if hasattr(self, 'rup'):
                # for scenario we reduce the site collection to the sites
                # within the maximum distance from the rupture
                haz_sitecol, _dctx = self.cmaker.filter(haz_sitecol, self.rup)
                haz_sitecol.make_complete()

            if 'site_model' in oq.inputs:
                self.datastore['site_model'] = readinput.get_site_model(oq)

        oq_hazard = (self.datastore.parent['oqparam']
                     if self.datastore.parent else None)
        if 'exposure' in oq.inputs:
            exposure = self.read_exposure(haz_sitecol)
            self.datastore['assetcol'] = self.assetcol
            self.datastore['cost_calculator'] = exposure.cost_calculator
            if hasattr(readinput.exposure, 'exposures'):
                self.datastore['assetcol/exposures'] = (numpy.array(
                    exposure.exposures, hdf5.vstr))
        elif 'assetcol' in self.datastore.parent:
            assetcol = self.datastore.parent['assetcol']
            if oq.region:
                region = wkt.loads(oq.region)
                self.sitecol = haz_sitecol.within(region)
            if oq.shakemap_id or 'shakemap' in oq.inputs:
                self.sitecol, self.assetcol = self.read_shakemap(
                    haz_sitecol, assetcol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets', len(self.assetcol),
                             len(assetcol))
                nsites = len(self.sitecol)
                if (oq.spatial_correlation != 'no'
                        and nsites > MAXSITES):  # hard-coded, heuristic
                    raise ValueError(CORRELATION_MATRIX_TOO_LARGE % nsites)
            elif hasattr(self, 'sitecol') and general.not_equal(
                    self.sitecol.sids, haz_sitecol.sids):
                self.assetcol = assetcol.reduce(self.sitecol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets', len(self.assetcol),
                             len(assetcol))
            else:
                self.assetcol = assetcol
        else:  # no exposure
            self.sitecol = haz_sitecol
            if self.sitecol:
                logging.info('Read N=%d hazard sites and L=%d hazard levels',
                             len(self.sitecol), len(oq.imtls.array))

        if oq_hazard:
            parent = self.datastore.parent
            if 'assetcol' in parent:
                check_time_event(oq, parent['assetcol'].occupancy_periods)
            elif oq.job_type == 'risk' and 'exposure' not in oq.inputs:
                raise ValueError('Missing exposure both in hazard and risk!')
            if oq_hazard.time_event and oq_hazard.time_event != oq.time_event:
                raise ValueError(
                    'The risk configuration file has time_event=%s but the '
                    'hazard was computed with time_event=%s' %
                    (oq.time_event, oq_hazard.time_event))

        if oq.job_type == 'risk':
            tmap_arr, tmap_lst = logictree.taxonomy_mapping(
                self.oqparam.inputs.get('taxonomy_mapping'),
                self.assetcol.tagcol.taxonomy)
            self.crmodel.tmap = tmap_lst
            if len(tmap_arr):
                self.datastore['taxonomy_mapping'] = tmap_arr
            taxonomies = set(taxo for items in self.crmodel.tmap
                             for taxo, weight in items if taxo != '?')
            # check that we are covering all the taxonomies in the exposure
            missing = taxonomies - set(self.crmodel.taxonomies)
            if self.crmodel and missing:
                raise RuntimeError('The exposure contains the taxonomies %s '
                                   'which are not in the risk model' % missing)
            if len(self.crmodel.taxonomies) > len(taxonomies):
                logging.info('Reducing risk model from %d to %d taxonomies',
                             len(self.crmodel.taxonomies), len(taxonomies))
                self.crmodel = self.crmodel.reduce(taxonomies)
                self.crmodel.tmap = tmap_lst
            self.crmodel.vectorize_cons_model(self.assetcol.tagcol)

        if hasattr(self, 'sitecol') and self.sitecol:
            if 'site_model' in oq.inputs:
                assoc_dist = (oq.region_grid_spacing *
                              1.414 if oq.region_grid_spacing else 5
                              )  # Graeme's 5km
                sm = readinput.get_site_model(oq)
                self.sitecol.complete.assoc(sm, assoc_dist)
            self.datastore['sitecol'] = self.sitecol.complete

        # store amplification functions if any
        self.af = None
        if 'amplification' in oq.inputs:
            logging.info('Reading %s', oq.inputs['amplification'])
            df = readinput.get_amplification(oq)
            check_amplification(df, self.sitecol)
            self.amplifier = Amplifier(oq.imtls, df, oq.soil_intensities)
            if oq.amplification_method == 'kernel':
                # TODO: need to add additional checks on the main calculation
                # methodology since the kernel method is currently tested only
                # for classical PSHA
                self.af = AmplFunction.from_dframe(df)
                self.amplifier = None
        else:
            self.amplifier = None

        # manage secondary perils
        sec_perils = oq.get_sec_perils()
        for sp in sec_perils:
            sp.prepare(self.sitecol)  # add columns as needed

        self.param = dict(individual_curves=oq.individual_curves,
                          collapse_level=oq.collapse_level,
                          avg_losses=oq.avg_losses,
                          amplifier=self.amplifier,
                          sec_perils=sec_perils,
                          ses_seed=oq.ses_seed)

        # compute exposure stats
        if hasattr(self, 'assetcol'):
            save_exposed_values(self.datastore, self.assetcol, oq.loss_names,
                                oq.aggregate_by)