def test_trivial(self):
     # using the heaviside function, i.e. `amplify_one` has contributions
     # only for soil_intensity < a * mid_intensity with a=1
     # in this case the minimimum mid_intensity is 0.0015 which is
     # smaller than the minimum soil intensity 0.0020, so some contribution
     # is lost and this is the reason why the first poe in 0.985
     # instead of 0.989
     fname = gettemp(trivial_ampl_func)
     df = read_csv(fname, {
         'ampcode': ampcode_dt,
         None: numpy.float64
     },
                   index='ampcode')
     a = Amplifier(self.imtls, df, self.soil_levels)
     a.check(self.vs30, 0)
     numpy.testing.assert_allclose(a.midlevels, [
         0.0015, 0.0035, 0.0075, 0.015, 0.035, 0.075, 0.15, 0.35, 0.75, 1.1
     ])
     poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
     numpy.testing.assert_allclose(
         poes, [0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69], atol=1E-6)
     poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
     numpy.testing.assert_allclose(
         poes, [0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69], atol=1E-6)
     poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
     numpy.testing.assert_allclose(
         poes, [0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69], atol=1E-6)
    def test_double(self):
        fname = gettemp(double_ampl_func)
        df = read_csv(fname, {
            'ampcode': ampcode_dt,
            None: numpy.float64
        },
                      index='ampcode')

        a = Amplifier(self.imtls, df, self.soil_levels)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985122, 0.979701, 0.975965, 0.96634, 0.922497, 0.886351, 0.790249
        ],
                                      atol=1E-6)
        #    poes, [0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79], atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985122, 0.979701, 0.975965, 0.96634, 0.922497, 0.886351, 0.790249
        ],
                                      atol=1E-6)
        #    poes, [0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79], atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985122, 0.979701, 0.975965, 0.96634, 0.922497, 0.886351, 0.790249
        ],
                                      atol=1E-6)
        #    poes, [0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79], atol=1E-6)

        # amplify GMFs without sigmas
        gmvs = a._amplify_gmvs(b'A', numpy.array([.1, .2, .3]), 'SA(0.5)')
        numpy.testing.assert_allclose(gmvs, [.2, .4, .6])
 def test_dupl(self):
     fname = gettemp(dupl_ampl_func)
     df = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64},
                   index='ampcode')
     with self.assertRaises(ValueError) as ctx:
         Amplifier(self.imtls, df, self.soil_levels)
     self.assertEqual(str(ctx.exception), "Found duplicates for b'A'")
    def test_simple(self):
        fname = gettemp(simple_ampl_func)
        df = read_csv(fname, {
            'ampcode': ampcode_dt,
            None: numpy.float64
        },
                      index='ampcode')
        a = Amplifier(self.imtls, df, self.soil_levels)
        # a.check(self.vs30, vs30_tolerance=1)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.981141, 0.975771, 0.964955, 0.935616, 0.882413, 0.785659,
            0.636667
        ],
                                      atol=1e-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.981141, 0.975771, 0.964955, 0.935616, 0.882413, 0.785659,
            0.636667
        ],
                                      atol=1e-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.981681, 0.976563, 0.967238, 0.940109, 0.890456, 0.799286,
            0.686047
        ],
                                      atol=1e-6)

        # Amplify GMFs with sigmas
        numpy.random.seed(42)
        gmvs = a._amplify_gmvs(b'A', numpy.array([.005, .010, .015]), 'PGA')
        numpy.testing.assert_allclose(gmvs, [0.005401, 0.010356, 0.016704],
                                      atol=1E-5)
Esempio n. 5
0
    def test_double(self):
        fname = gettemp(double_ampl_func)
        aw = read_csv(fname, {
            'ampcode': 'S2',
            'level': numpy.uint8,
            None: numpy.float64
        })
        a = Amplifier(self.imtls, aw)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)
    def test_resampling(self):
        path = os.path.dirname(os.path.abspath(__file__))

        # Read AF
        f_af = os.path.join(path, 'data', 'convolution', 'amplification.csv')
        df_af = read_csv(f_af, {'ampcode': ampcode_dt, None: numpy.float64},
                         index='ampcode')

        # Read hc
        f_hc = os.path.join(path, 'data', 'convolution', 'hazard_curve.csv')
        df_hc = pd.read_csv(f_hc, skiprows=1)

        # Get imls from the hc
        imls = []
        pattern = 'poe-(\\d*\\.\\d*)'
        for k in df_hc.columns:
            m = re.match(pattern, k)
            if m:
                imls.append(float(m.group(1)))
        imtls = DictArray({'PGA': imls})

        # Create a list with one ProbabilityCurve instance
        poes = numpy.squeeze(df_hc.iloc[0, 3:].to_numpy())
        tmp = numpy.expand_dims(poes, 1)
        pcurve = ProbabilityCurve(tmp)

        soil_levels = numpy.array(list(numpy.geomspace(0.001, 2, 50)))
        a = Amplifier(imtls, df_af, soil_levels)
        res = a.amplify(b'MQ15', pcurve)

        tmp = 'hazard_curve_expected.csv'
        fname_expected = os.path.join(path, 'data', 'convolution', tmp)
        expected = numpy.loadtxt(fname_expected)

        numpy.testing.assert_allclose(numpy.squeeze(res.array), expected)
Esempio n. 7
0
    def test_double(self):
        fname = gettemp(double_ampl_func)
        aw = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64})
        a = Amplifier(self.imtls, aw)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.989, 0.989, 0.985, 0.98, 0.97, 0.94, 0.89, 0.79, 0.69, 0.09, 0.09
        ],
                                      atol=1E-6)

        # amplify GMFs without sigmas
        gmvs = a._amplify_gmvs(b'A', numpy.array([.1, .2, .3]), 'SA(0.5)')
        numpy.testing.assert_allclose(gmvs, [.2, .4, .6])
Esempio n. 8
0
    def test_simple(self):
        fname = gettemp(simple_ampl_func)
        aw = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64})
        a = Amplifier(self.imtls, aw, self.soil_levels)
        a.check(self.vs30, vs30_tolerance=1)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(
            poes,
            [0.985002, 0.979997, 0.970004, 0.940069, 0.889961, 0.79, 0.690037],
            atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(
            poes,
            [0.985002, 0.979997, 0.970004, 0.940069, 0.889961, 0.79, 0.690037],
            atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(
            poes,
            [0.985002, 0.979996, 0.969991, 0.940012, 0.889958, 0.79, 0.690037],
            atol=1E-6)

        # amplify GMFs with sigmas
        numpy.random.seed(42)
        gmvs = a._amplify_gmvs(b'A', numpy.array([.005, .010, .015]), 'PGA')
        numpy.testing.assert_allclose(gmvs, [0.005307, 0.010093, 0.016804],
                                      atol=1E-5)
Esempio n. 9
0
 def test_gmf_with_uncertainty(self):
     fname = gettemp(gmf_ampl_func)
     aw = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64})
     imtls = {'PGA': self.imls}
     a = Amplifier(imtls, aw, self.soil_levels)
     res = []
     nsim = 10000
     numpy.random.seed(42)  # must be fixed
     for i in range(nsim):
         gmvs = a._amplify_gmvs(b'A', numpy.array([.1, .2, .3]), 'PGA')
         res.append(list(gmvs))
     res = numpy.array(res)
     dat = numpy.reshape(numpy.tile([.1, .2, .3], nsim), (nsim, 3))
     computed = numpy.std(numpy.log(res / dat), axis=0)
     expected = numpy.array([0.3, 0.3, 0.3])
     msg = "Computed and expected std do not match"
     numpy.testing.assert_almost_equal(computed, expected, 2, err_msg=msg)
Esempio n. 10
0
    def test_gmf_cata(self):
        fname = gettemp(cata_ampl_func)
        df = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64},
                      index='ampcode')
        imtls = DictArray({'PGA': [numpy.nan]})
        a = Amplifier(imtls, df)

        numpy.random.seed(42)  # must be fixed
        gmvs1 = a._amplify_gmvs(b'z1', numpy.array([.1, .2, .3]), 'PGA')
        aac(gmvs1, [0.217124, 0.399295, 0.602515], atol=1E-5)
        gmvs2 = a._amplify_gmvs(b'z2', numpy.array([.1, .2, .3]), 'PGA')
        aac(gmvs2, [0.266652, 0.334187, 0.510845], atol=1E-5)

        numpy.random.seed(43)  # changing the seed the results change a lot
        gmvs1 = a._amplify_gmvs(b'z1', numpy.array([.1, .2, .3]), 'PGA')
        aac(gmvs1, [0.197304, 0.293422, 0.399669], atol=1E-5)
        gmvs2 = a._amplify_gmvs(b'z2', numpy.array([.1, .2, .3]), 'PGA')
        aac(gmvs2, [0.117069, 0.517284, 0.475571], atol=1E-5)
Esempio n. 11
0
 def calc_stats(self):
     oq = self.oqparam
     hstats = oq.hazard_stats()
     # initialize datasets
     N = len(self.sitecol.complete)
     P = len(oq.poes)
     M = len(oq.imtls)
     if oq.soil_intensities is not None:
         L = M * len(oq.soil_intensities)
     else:
         L = len(oq.imtls.array)
     R = len(self.rlzs_assoc.realizations)
     S = len(hstats)
     if R > 1 and oq.individual_curves or not hstats:
         self.datastore.create_dset('hcurves-rlzs', F32, (N, R, L))
         if oq.poes:
             self.datastore.create_dset('hmaps-rlzs', F32, (N, R, M, P))
     if hstats:
         self.datastore.create_dset('hcurves-stats', F32, (N, S, L))
         if oq.poes:
             self.datastore.create_dset('hmaps-stats', F32, (N, S, M, P))
     ct = oq.concurrent_tasks
     logging.info('Building hazard statistics with %d concurrent_tasks', ct)
     weights = [rlz.weight for rlz in self.rlzs_assoc.realizations]
     if 'amplification' in oq.inputs:
         amplifier = Amplifier(oq.imtls, self.datastore['amplification'],
                               oq.soil_intensities)
         amplifier.check(self.sitecol.vs30, oq.vs30_tolerance)
     else:
         amplifier = None
     allargs = [  # this list is very fast to generate
         (getters.PmapGetter(self.datastore, weights, t.sids, oq.poes), N,
          hstats, oq.individual_curves, oq.max_sites_disagg, amplifier)
         for t in self.sitecol.split_in_tiles(ct)
     ]
     self.datastore.swmr_on()
     parallel.Starmap(build_hazard, allargs,
                      h5=self.datastore.hdf5).reduce(self.save_hazard)
    def test_simple(self):
        #
        # MP: checked using hand calculations some values of the poes computed
        # considering uncertainty
        #
        fname = gettemp(simple_ampl_func)
        df = read_csv(fname, {
            'ampcode': ampcode_dt,
            None: numpy.float64
        },
                      index='ampcode')
        a = Amplifier(self.imtls, df, self.soil_levels)
        a.check(self.vs30, vs30_tolerance=1)
        poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985008, 0.980001, 0.970019, 0.94006, 0.890007, 0.790198, 0.690201
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985008, 0.980001, 0.970019, 0.94006, 0.890007, 0.790198, 0.690201
        ],
                                      atol=1E-6)

        poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
        numpy.testing.assert_allclose(poes, [
            0.985109, 0.980022, 0.970272, 0.940816, 0.890224, 0.792719,
            0.692719
        ],
                                      atol=1E-6)

        # Amplify GMFs with sigmas
        numpy.random.seed(42)
        gmvs = a._amplify_gmvs(b'A', numpy.array([.005, .010, .015]), 'PGA')
        numpy.testing.assert_allclose(gmvs, [0.005401, 0.010356, 0.016704],
                                      atol=1E-5)
Esempio n. 13
0
 def test_simple(self):
     fname = gettemp(simple_ampl_func)
     aw = read_csv(fname, {
         'ampcode': 'S2',
         'level': numpy.uint8,
         None: numpy.float64
     })
     a = Amplifier(self.imtls, aw, self.soil_levels)
     a.check(self.vs30, 1)
     poes = a.amplify_one(b'A', 'SA(0.1)', self.hcurve[1]).flatten()
     numpy.testing.assert_allclose(
         poes,
         [0.985002, 0.979997, 0.970004, 0.940069, 0.889961, 0.79, 0.690037],
         atol=1E-6)
     poes = a.amplify_one(b'A', 'SA(0.2)', self.hcurve[2]).flatten()
     numpy.testing.assert_allclose(
         poes,
         [0.985002, 0.979997, 0.970004, 0.940069, 0.889961, 0.79, 0.690037],
         atol=1E-6)
     poes = a.amplify_one(b'A', 'SA(0.5)', self.hcurve[3]).flatten()
     numpy.testing.assert_allclose(
         poes,
         [0.985002, 0.979996, 0.969991, 0.940012, 0.889958, 0.79, 0.690037],
         atol=1E-6)
Esempio n. 14
0
    def _read_risk_data(self):
        # read the exposure (if any), the risk model (if any) and then the
        # site collection, possibly extracted from the exposure.
        oq = self.oqparam
        self.load_crmodel()  # must be called first

        if oq.hazard_calculation_id:
            with util.read(oq.hazard_calculation_id) as dstore:
                haz_sitecol = dstore['sitecol'].complete
                if ('amplification' in oq.inputs and
                        'ampcode' not in haz_sitecol.array.dtype.names):
                    haz_sitecol.add_col('ampcode', site.ampcode_dt)
        else:
            haz_sitecol = readinput.get_site_collection(oq)
            if hasattr(self, 'rup'):
                # for scenario we reduce the site collection to the sites
                # within the maximum distance from the rupture
                haz_sitecol, _dctx = self.cmaker.filter(
                    haz_sitecol, self.rup)
                haz_sitecol.make_complete()

            if 'site_model' in oq.inputs:
                self.datastore['site_model'] = readinput.get_site_model(oq)

        oq_hazard = (self.datastore.parent['oqparam']
                     if self.datastore.parent else None)
        if 'exposure' in oq.inputs:
            exposure = self.read_exposure(haz_sitecol)
            self.datastore['assetcol'] = self.assetcol
            self.datastore['cost_calculator'] = exposure.cost_calculator
            if hasattr(readinput.exposure, 'exposures'):
                self.datastore['assetcol/exposures'] = (
                    numpy.array(exposure.exposures, hdf5.vstr))
        elif 'assetcol' in self.datastore.parent:
            assetcol = self.datastore.parent['assetcol']
            if oq.region:
                region = wkt.loads(oq.region)
                self.sitecol = haz_sitecol.within(region)
            if oq.shakemap_id or 'shakemap' in oq.inputs:
                self.sitecol, self.assetcol = self.read_shakemap(
                    haz_sitecol, assetcol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets',
                             len(self.assetcol), len(assetcol))
                nsites = len(self.sitecol)
                if (oq.spatial_correlation != 'no' and
                        nsites > MAXSITES):  # hard-coded, heuristic
                    raise ValueError(CORRELATION_MATRIX_TOO_LARGE % nsites)
            elif hasattr(self, 'sitecol') and general.not_equal(
                    self.sitecol.sids, haz_sitecol.sids):
                self.assetcol = assetcol.reduce(self.sitecol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets',
                             len(self.assetcol), len(assetcol))
            else:
                self.assetcol = assetcol
        else:  # no exposure
            self.sitecol = haz_sitecol
            if self.sitecol:
                logging.info('Read N=%d hazard sites and L=%d hazard levels',
                             len(self.sitecol), len(oq.imtls.array))

        if oq_hazard:
            parent = self.datastore.parent
            if 'assetcol' in parent:
                check_time_event(oq, parent['assetcol'].occupancy_periods)
            elif oq.job_type == 'risk' and 'exposure' not in oq.inputs:
                raise ValueError('Missing exposure both in hazard and risk!')
            if oq_hazard.time_event and oq_hazard.time_event != oq.time_event:
                raise ValueError(
                    'The risk configuration file has time_event=%s but the '
                    'hazard was computed with time_event=%s' % (
                        oq.time_event, oq_hazard.time_event))

        if oq.job_type == 'risk':
            tmap_arr, tmap_lst = logictree.taxonomy_mapping(
                self.oqparam.inputs.get('taxonomy_mapping'),
                self.assetcol.tagcol.taxonomy)
            self.crmodel.tmap = tmap_lst
            if len(tmap_arr):
                self.datastore['taxonomy_mapping'] = tmap_arr
            taxonomies = set(taxo for items in self.crmodel.tmap
                             for taxo, weight in items if taxo != '?')
            # check that we are covering all the taxonomies in the exposure
            missing = taxonomies - set(self.crmodel.taxonomies)
            if self.crmodel and missing:
                raise RuntimeError('The exposure contains the taxonomies %s '
                                   'which are not in the risk model' % missing)
            if len(self.crmodel.taxonomies) > len(taxonomies):
                logging.info('Reducing risk model from %d to %d taxonomies',
                             len(self.crmodel.taxonomies), len(taxonomies))
                self.crmodel = self.crmodel.reduce(taxonomies)
                self.crmodel.tmap = tmap_lst
            self.crmodel.vectorize_cons_model(self.assetcol.tagcol)

        if hasattr(self, 'sitecol') and self.sitecol:
            if 'site_model' in oq.inputs:
                assoc_dist = (oq.region_grid_spacing * 1.414
                              if oq.region_grid_spacing else 5)  # Graeme's 5km
                sm = readinput.get_site_model(oq)
                self.sitecol.complete.assoc(sm, assoc_dist)
            self.datastore['sitecol'] = self.sitecol.complete

        # store amplification functions if any
        if 'amplification' in oq.inputs:
            logging.info('Reading %s', oq.inputs['amplification'])
            self.datastore['amplification'] = readinput.get_amplification(oq)
            check_amplification(self.datastore)
            self.amplifier = Amplifier(
                oq.imtls, self.datastore['amplification'], oq.soil_intensities)
            self.amplifier.check(self.sitecol.vs30, oq.vs30_tolerance)
        else:
            self.amplifier = None

        # used in the risk calculators
        self.param = dict(individual_curves=oq.individual_curves,
                          avg_losses=oq.avg_losses, amplifier=self.amplifier)

        # compute exposure stats
        if hasattr(self, 'assetcol'):
            save_exposed_values(
                self.datastore, self.assetcol, oq.loss_names, oq.aggregate_by)
Esempio n. 15
0
    def _read_risk_data(self):
        # read the risk model (if any), the exposure (if any) and then the
        # site collection, possibly extracted from the exposure.
        oq = self.oqparam
        self.load_crmodel()  # must be called first
        if (not oq.imtls and 'shakemap' not in oq.inputs
                and oq.ground_motion_fields):
            raise InvalidFile('There are no intensity measure types in %s' %
                              oq.inputs['job_ini'])
        if oq.hazard_calculation_id:
            with util.read(oq.hazard_calculation_id) as dstore:
                haz_sitecol = dstore['sitecol'].complete
                if ('amplification' in oq.inputs
                        and 'ampcode' not in haz_sitecol.array.dtype.names):
                    haz_sitecol.add_col('ampcode', site.ampcode_dt)
        else:
            haz_sitecol = readinput.get_site_collection(oq, self.datastore)
            if hasattr(self, 'rup'):
                # for scenario we reduce the site collection to the sites
                # within the maximum distance from the rupture
                haz_sitecol, _dctx = self.cmaker.filter(haz_sitecol, self.rup)
                haz_sitecol.make_complete()

            if 'site_model' in oq.inputs:
                self.datastore['site_model'] = readinput.get_site_model(oq)

        oq_hazard = (self.datastore.parent['oqparam']
                     if self.datastore.parent else None)
        if 'exposure' in oq.inputs:
            exposure = self.read_exposure(haz_sitecol)
            self.datastore['assetcol'] = self.assetcol
            self.datastore['cost_calculator'] = exposure.cost_calculator
            if hasattr(readinput.exposure, 'exposures'):
                self.datastore['assetcol/exposures'] = (numpy.array(
                    exposure.exposures, hdf5.vstr))
        elif 'assetcol' in self.datastore.parent:
            assetcol = self.datastore.parent['assetcol']
            if oq.region:
                region = wkt.loads(oq.region)
                self.sitecol = haz_sitecol.within(region)
            if oq.shakemap_id or 'shakemap' in oq.inputs:
                self.sitecol, self.assetcol = self.read_shakemap(
                    haz_sitecol, assetcol)
                self.datastore['sitecol'] = self.sitecol
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets', len(self.assetcol),
                             len(assetcol))
                nsites = len(self.sitecol)
                if (oq.spatial_correlation != 'no'
                        and nsites > MAXSITES):  # hard-coded, heuristic
                    raise ValueError(CORRELATION_MATRIX_TOO_LARGE % nsites)
            elif hasattr(self, 'sitecol') and general.not_equal(
                    self.sitecol.sids, haz_sitecol.sids):
                self.assetcol = assetcol.reduce(self.sitecol)
                self.datastore['assetcol'] = self.assetcol
                logging.info('Extracted %d/%d assets', len(self.assetcol),
                             len(assetcol))
            else:
                self.assetcol = assetcol
        else:  # no exposure
            self.sitecol = haz_sitecol
            if self.sitecol and oq.imtls:
                logging.info('Read N=%d hazard sites and L=%d hazard levels',
                             len(self.sitecol), oq.imtls.size)

        if oq_hazard:
            parent = self.datastore.parent
            if 'assetcol' in parent:
                check_time_event(oq, parent['assetcol'].occupancy_periods)
            elif oq.job_type == 'risk' and 'exposure' not in oq.inputs:
                raise ValueError('Missing exposure both in hazard and risk!')
            if oq_hazard.time_event and oq_hazard.time_event != oq.time_event:
                raise ValueError(
                    'The risk configuration file has time_event=%s but the '
                    'hazard was computed with time_event=%s' %
                    (oq.time_event, oq_hazard.time_event))

        if oq.job_type == 'risk':
            tmap_arr, tmap_lst = logictree.taxonomy_mapping(
                self.oqparam.inputs.get('taxonomy_mapping'),
                self.assetcol.tagcol.taxonomy)
            self.crmodel.tmap = tmap_lst
            if len(tmap_arr):
                self.datastore['taxonomy_mapping'] = tmap_arr
            taxonomies = set(taxo for items in self.crmodel.tmap
                             for taxo, weight in items if taxo != '?')
            # check that we are covering all the taxonomies in the exposure
            missing = taxonomies - set(self.crmodel.taxonomies)
            if self.crmodel and missing:
                raise RuntimeError('The exposure contains the taxonomies %s '
                                   'which are not in the risk model' % missing)
            if len(self.crmodel.taxonomies) > len(taxonomies):
                logging.info('Reducing risk model from %d to %d taxonomies',
                             len(self.crmodel.taxonomies), len(taxonomies))
                self.crmodel = self.crmodel.reduce(taxonomies)
                self.crmodel.tmap = tmap_lst
            self.crmodel.reduce_cons_model(self.assetcol.tagcol)

        if hasattr(self, 'sitecol') and self.sitecol:
            if 'site_model' in oq.inputs:
                assoc_dist = (oq.region_grid_spacing *
                              1.414 if oq.region_grid_spacing else 5
                              )  # Graeme's 5km
                sm = readinput.get_site_model(oq)
                self.sitecol.complete.assoc(sm, assoc_dist)
                self.datastore['sitecol'] = self.sitecol

        # store amplification functions if any
        self.af = None
        if 'amplification' in oq.inputs:
            logging.info('Reading %s', oq.inputs['amplification'])
            df = readinput.get_amplification(oq)
            check_amplification(df, self.sitecol)
            self.amplifier = Amplifier(oq.imtls, df, oq.soil_intensities)
            if oq.amplification_method == 'kernel':
                # TODO: need to add additional checks on the main calculation
                # methodology since the kernel method is currently tested only
                # for classical PSHA
                self.af = AmplFunction.from_dframe(df)
                self.amplifier = None
        else:
            self.amplifier = None

        # manage secondary perils
        sec_perils = oq.get_sec_perils()
        for sp in sec_perils:
            sp.prepare(self.sitecol)  # add columns as needed

        mal = {
            lt: getdefault(oq.minimum_asset_loss, lt)
            for lt in oq.loss_names
        }
        if mal:
            logging.info('minimum_asset_loss=%s', mal)
        self.param = dict(individual_curves=oq.individual_curves,
                          ps_grid_spacing=oq.ps_grid_spacing,
                          collapse_level=oq.collapse_level,
                          split_sources=oq.split_sources,
                          avg_losses=oq.avg_losses,
                          amplifier=self.amplifier,
                          sec_perils=sec_perils,
                          ses_seed=oq.ses_seed,
                          minimum_asset_loss=mal)

        # compute exposure stats
        if hasattr(self, 'assetcol'):
            save_agg_values(self.datastore, self.assetcol, oq.loss_names,
                            oq.aggregate_by)
Esempio n. 16
0
 def test_dupl(self):
     fname = gettemp(dupl_ampl_func)
     aw = read_csv(fname, {'ampcode': ampcode_dt, None: numpy.float64})
     with self.assertRaises(ValueError):
         Amplifier(self.imtls, aw)