Exemple #1
0
def get_oqparam(job_ini, pkg=None, calculators=None):
    """
    Parse a dictionary of parameters from one or more INI-style config file.

    :param job_ini:
        Path to configuration file/archive or dictionary of parameters
    :param pkg:
        Python package where to find the configuration file (optional)
    :param calculators:
        Sequence of calculator names (optional) used to restrict the
        valid choices for `calculation_mode`
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    # UGLY: this is here to avoid circular imports
    from openquake.commonlib.calculators import base

    OqParam.calculation_mode.validator.choices = tuple(
        calculators or base.calculators)

    if isinstance(job_ini, dict):
        oqparam = OqParam(**job_ini)
    else:
        basedir = os.path.dirname(pkg.__file__) if pkg else ''
        inis = [os.path.join(basedir, ini) for ini in job_ini.split(',')]
        oqparam = OqParam(**get_params(inis))

    oqparam.validate()
    return oqparam
 def test_missing_export_dir(self):
     oq = OqParam(
         calculation_mode='event_based', inputs={},
         sites='0.1 0.2',
         intensity_measure_types='PGA',
         maximum_distance=400)
     oq.validate()
     self.assertEqual(oq.export_dir, os.path.expanduser('~'))
 def test_imts_and_imtls(self):
     oq = OqParam(
         calculation_mode='event_based', inputs={},
         intensity_measure_types_and_levels="{'PGA': [0.1, 0.2]}",
         intensity_measure_types='PGV', sites='0.1 0.2',
         maximum_distance=400)
     oq.validate()
     self.assertEqual(list(oq.imtls.keys()), ['PGA'])
 def test_missing_export_dir(self):
     oq = OqParam(
         calculation_mode='event_based', inputs=GST,
         sites='0.1 0.2',
         intensity_measure_types='PGA',
         reference_vs30_value='200',
         maximum_distance='400')
     oq.validate()
     self.assertEqual(oq.export_dir, os.getcwd())
Exemple #5
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if not calc_id:
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                oq = OqParam.from_(datastore.DataStore(calc_id).attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:  # invalid datastore directory
                logging.warn('Removed invalid calculation %d', calc_id)
                shutil.rmtree(os.path.join(
                    datastore.DATADIR, 'calc_%s' % calc_id))
            else:
                rows.append((calc_id, cmode, descr))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id)
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return
    # print all keys
    oq = OqParam.from_(ds.attrs)
    print(oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
          (oq.description, ds.hdf5path))
    for key in ds:
        print(key, humansize(ds.getsize(key)))

    # this part is experimental and not tested on purpose
    if rlzs and 'curves_by_trt_gsim' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = combined_curves(ds)
        dists = []
        for rlz in sorted(curves_by_rlz):
            curves = curves_by_rlz[rlz]
            dist = sum(rmsep(mean_curves[imt], curves[imt], min_value)
                       for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        for dist, rlz in sorted(dists):
            print('rlz=%s, rmsep=%s' % (rlz, dist))
Exemple #6
0
def export_ses_xml(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    fmt = ekey[-1]
    oq = OqParam.from_(dstore.attrs)
    try:
        csm_info = dstore['rlzs_assoc'].csm_info
    except AttributeError:  # for scenario calculators don't export
        return []
    sescollection = dstore['sescollection']
    col_id = 0
    fnames = []
    for sm in csm_info.source_models:
        for trt_model in sm.trt_models:
            sesruptures = list(sescollection[col_id].values())
            col_id += 1
            ses_coll = SESCollection(
                groupby(sesruptures, operator.attrgetter('ses_idx')),
                sm.path, oq.investigation_time)
            smpath = '_'.join(sm.path)
            fname = 'ses-%d-smltp_%s.%s' % (trt_model.id, smpath, fmt)
            dest = os.path.join(dstore.export_dir, fname)
            globals()['_export_ses_' + fmt](dest, ses_coll)
            fnames.append(os.path.join(dstore.export_dir, fname))
    return fnames
Exemple #7
0
def export_agg_curve(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    cost_types = dstore['cost_types']
    rlzs = dstore['rlzs_assoc'].realizations
    agg_curve = dstore[ekey[0]]
    fnames = []
    L, R = len(cost_types), len(rlzs)
    for ct in cost_types:
        loss_type = ct['name']
        array = agg_curve[loss_type].value
        for ins in range(oq.insured_losses + 1):
            for rlz in rlzs:
                suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (
                    rlz.uid, loss_type)
                dest = dstore.export_path('agg_curve%s%s.%s' % (
                    suffix, '_ins' if ins else '', ekey[1]))
                rec = array[rlz.ordinal, ins]
                curve = AggCurve(rec['losses'], rec['poes'], rec['avg'], None)
                risk_writers.AggregateLossCurveXMLWriter(
                    dest, oq.investigation_time, loss_type,
                    source_model_tree_path='_'.join(rlz.sm_lt_path),
                    gsim_tree_path='_'.join(rlz.gsim_lt_path),
                    unit=ct['unit']).serialize(curve)
                fnames.append(dest)
    return sorted(fnames)
Exemple #8
0
def export_hcurves_xml_json(ekey, dstore):
    export_type = ekey[1]
    len_ext = len(export_type) + 1
    oq = OqParam.from_(dstore.attrs)
    sitemesh = dstore['sitemesh'].value
    rlzs_assoc = dstore['rlzs_assoc']
    fnames = []
    writercls = (hazard_writers.HazardCurveGeoJSONWriter
                 if export_type == 'geojson' else
                 hazard_writers.HazardCurveXMLWriter)
    rlzs = iter(rlzs_assoc.realizations)
    for kind, curves in dstore[ekey[0]].items():
        rlz = next(rlzs)
        name = hazard_curve_name(
            dstore, ekey, kind, rlzs_assoc, oq.number_of_logic_tree_samples)
        for imt in oq.imtls:
            fname = name[:-len_ext] + '-' + imt + '.' + export_type
            data = [HazardCurve(Location(site), poes[imt])
                    for site, poes in zip(sitemesh, curves)]
            writer = writercls(fname, investigation_time=oq.investigation_time,
                               imls=oq.imtls[imt],
                               smlt_path='_'.join(rlz.sm_lt_path),
                               gsimlt_path=rlz.gsim_rlz.uid)
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemple #9
0
    def pre_execute(self):
        """
        Check if there is a pre_calculator or a previous calculation ID.
        If yes, read the inputs by invoking the precalculator or by retrieving
        the previous calculation; if not, read the inputs directly.
        """
        if self.pre_calculator is not None:
            # the parameter hazard_calculation_id is only meaningful if
            # there is a precalculator
            precalc_id = self.oqparam.hazard_calculation_id
            if precalc_id is None:  # recompute everything
                precalc = calculators[self.pre_calculator](
                    self.oqparam, self.monitor('precalculator'),
                    self.datastore.calc_id)
                precalc.run(clean_up=False)
                if 'scenario' not in self.oqparam.calculation_mode:
                    self.csm = precalc.csm
            else:  # read previously computed data
                self.datastore.set_parent(datastore.DataStore(precalc_id))
                # update oqparam with the attributes saved in the datastore
                self.oqparam = OqParam.from_(self.datastore.attrs)
                self.read_exposure_sitecol()

        else:  # we are in a basic calculator
            self.read_exposure_sitecol()
            self.read_sources()
        self.datastore.hdf5.flush()
Exemple #10
0
def export_gmf(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    sitecol = dstore['sitecol']
    rlzs_assoc = dstore['rlzs_assoc']
    rupture_by_tag = sum(dstore['sescollection'], AccumDict())
    all_tags = dstore['tags'].value
    oq = OqParam.from_(dstore.attrs)
    investigation_time = (None if oq.calculation_mode == 'scenario'
                          else oq.investigation_time)
    samples = oq.number_of_logic_tree_samples
    fmt = ekey[-1]
    gmfs = dstore[ekey[0]]
    nbytes = gmfs.attrs['nbytes']
    logging.info('Internal size of the GMFs: %s', humansize(nbytes))
    if nbytes > GMF_MAX_SIZE:
        logging.warn(GMF_WARNING, dstore.hdf5path)
    fnames = []
    for rlz, gmf_by_idx in zip(
            rlzs_assoc.realizations, rlzs_assoc.combine_gmfs(gmfs)):
        tags = all_tags[list(gmf_by_idx)]
        gmfs = list(gmf_by_idx.values())
        if not gmfs:
            continue
        ruptures = [rupture_by_tag[tag] for tag in tags]
        fname = build_name(dstore, rlz, 'gmf', fmt, samples)
        fnames.append(fname)
        globals()['export_gmf_%s' % fmt](
            ('gmf', fmt), fname, sitecol,
            ruptures, gmfs, rlz, investigation_time)
    return fnames
Exemple #11
0
def get_data_transfer(dstore):
    """
    Determine the amount of data transferred from the controller node
    to the workers and back in a classical calculation.

    :param dstore: a :class:`openquake.commonlib.datastore.DataStore` instance
    :returns: (block_info, to_send_forward, to_send_back)
    """
    oqparam = OqParam.from_(dstore.attrs)
    sitecol = dstore['sitecol']
    rlzs_assoc = dstore['rlzs_assoc']
    info = dstore['job_info']
    sources = dstore['composite_source_model'].get_sources()
    num_gsims_by_trt = groupby(rlzs_assoc, operator.itemgetter(0),
                               lambda group: sum(1 for row in group))
    gsims_assoc = rlzs_assoc.gsims_by_trt_id
    to_send_forward = 0
    to_send_back = 0
    block_info = []
    for block in split_in_blocks(sources, oqparam.concurrent_tasks or 1,
                                 operator.attrgetter('weight'),
                                 operator.attrgetter('trt_model_id')):
        num_gsims = num_gsims_by_trt.get(block[0].trt_model_id, 0)
        back = info['n_sites'] * info['n_levels'] * info['n_imts'] * num_gsims
        to_send_back += back * 8  # 8 bytes per float
        args = (block, sitecol, gsims_assoc, PerformanceMonitor(''))
        to_send_forward += sum(len(p) for p in parallel.pickle_sequence(args))
        block_info.append((len(block), block.weight))
    return numpy.array(block_info, block_dt), to_send_forward, to_send_back
Exemple #12
0
 def get_oqparam(self):
     """
     Return an OqParam object as read from the database
     """
     datadir = os.path.dirname(self.ds_calc_dir)
     dstore = datastore.read(self.id, datadir=datadir)
     oqparam = OqParam.from_(dstore.attrs)
     return oqparam
    def test_missing_maximum_distance(self):
        with self.assertRaises(ValueError):
            OqParam(
                calculation_mode='classical_risk', inputs=dict(site_model=''),
                hazard_calculation_id=None, hazard_output_id=None,
                sites='0.1 0.2').validate()

        with self.assertRaises(ValueError):
            OqParam(
                calculation_mode='classical_risk', inputs=dict(site_model=''),
                hazard_calculation_id=None, hazard_output_id=None,
                sites='0.1 0.2', maximum_distance='0').validate()

        oq = OqParam(
            calculation_mode='event_based', inputs=GST,
            intensity_measure_types_and_levels="{'PGA': [0.1, 0.2]}",
            intensity_measure_types='PGV', sites='0.1 0.2',
            reference_vs30_value='200',
            maximum_distance='{"wrong TRT": 200}')
        oq.inputs['source_model_logic_tree'] = 'something'

        oq._gsims_by_trt = {'Active Shallow Crust': []}
        self.assertFalse(oq.is_valid_maximum_distance())
        self.assertIn('setting the maximum_distance for wrong TRT', oq.error)

        oq._gsims_by_trt = {'Active Shallow Crust': [],
                            'Stable Continental Crust': []}
        oq.maximum_distance = {'Active Shallow Crust': 200}
        self.assertFalse(oq.is_valid_maximum_distance())
        self.assertEqual('missing distance for Stable Continental Crust '
                         'and no default', oq.error)
Exemple #14
0
def view_params(token, dstore):
    oq = OqParam.from_(dstore.attrs)
    params = ('calculation_mode', 'number_of_logic_tree_samples',
              'maximum_distance', 'investigation_time',
              'ses_per_logic_tree_path', 'truncation_level',
              'rupture_mesh_spacing', 'complex_fault_mesh_spacing',
              'width_of_mfd_bin', 'area_source_discretization',
              'random_seed', 'master_seed', 'concurrent_tasks')
    return rst_table([(param, getattr(oq, param)) for param in params])
Exemple #15
0
def view_inputs(token, dstore):
    inputs = OqParam.from_(dstore.attrs).inputs.copy()
    try:
        source_models = [('source', fname) for fname in inputs['source']]
        del inputs['source']
    except KeyError:  # there is no 'source' in scenario calculations
        source_models = []
    return rst_table(
        build_links(list(inputs.items()) + source_models),
        header=['Name', 'File'])
def view_gmfs_total_size(name, dstore):
    """
    :returns:
        the total size of the GMFs as human readable string; it assumes
        4 bytes for the rupture index, 4 bytes for the realization index
        and 8 bytes for each float (there are num_imts floats per gmf)
    """
    nbytes = 0
    num_imts = len(OqParam.from_(dstore.attrs).imtls)
    for counts in dstore['counts_per_rlz']:
        nbytes += 8 * counts['gmf'] * (num_imts + 1)
    return humansize(nbytes)
Exemple #17
0
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1,
                **kw):
    """
    Parse a dictionary of parameters from an INI-style config file.

    :param job_ini:
        Path to configuration file/archive or dictionary of parameters
    :param pkg:
        Python package where to find the configuration file (optional)
    :param calculators:
        Sequence of calculator names (optional) used to restrict the
        valid choices for `calculation_mode`
    :param hc_id:
        Not None only when called from a post calculation
    :param validate:
        Flag. By default it is true and the parameters are validated
    :param kw:
        String-valued keyword arguments used to override the job.ini parameters
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    # UGLY: this is here to avoid circular imports
    from openquake.calculators import base

    OqParam.calculation_mode.validator.choices = tuple(
        calculators or base.calculators)
    if not isinstance(job_ini, dict):
        basedir = os.path.dirname(pkg.__file__) if pkg else ''
        job_ini = get_params([os.path.join(basedir, job_ini)])
    if hc_id:
        job_ini.update(hazard_calculation_id=str(hc_id))
    job_ini.update(kw)
    oqparam = OqParam(**job_ini)
    if validate:
        oqparam.validate()
    return oqparam
Exemple #18
0
def avglosses_data_transfer(token, dstore):
    """
    Determine the amount of average losses transferred from the workers to the
    controller node in a risk calculation.
    """
    oq = OqParam.from_(dstore.attrs)
    N = len(dstore['assetcol'])
    R = len(dstore['rlzs_assoc'].realizations)
    L = len(dstore['riskmodel'].loss_types)
    ct = oq.concurrent_tasks
    size_bytes = N * R * L * 2 * 8 * ct  # two 8 byte floats, loss and ins_loss
    return ('%d asset(s) x %d realization(s) x %d loss type(s) x 2 losses x '
            '8 bytes x %d tasks = %s' % (N, R, L, ct, humansize(size_bytes)))
 def test_disaggregation(self):
     with self.assertRaises(ValueError) as ctx:
         OqParam(
             calculation_mode='disaggregation',
             inputs=fakeinputs,
             gsim='BooreAtkinson2008',
             reference_vs30_value='200',
             sites='0.1 0.2',
             poes='0.2',
             maximum_distance='400',
             intensity_measure_types_and_levels="{'PGV': [0.1, 0.2, 0.3]}",
             uniform_hazard_spectra='1')
     self.assertIn("poes_disagg or iml_disagg must be set",
                   str(ctx.exception))
 def test_invalid_export_dir(self):
     # FIXME: apparently this fails only when --with-doctest is set
     raise unittest.SkipTest
     with self.assertRaises(ValueError) as ctx:
         OqParam(
             calculation_mode='event_based', inputs=GST,
             sites='0.1 0.2',
             maximum_distance='400',
             reference_vs30_value='200',
             intensity_measure_types='PGA',
             export_dir='/non/existing',
         ).validate()
     self.assertIn('The `export_dir` parameter must refer to a '
                   'directory', str(ctx.exception))
 def test_unknown_parameter(self):
     # if the job.ini file contains an unknown parameter, print a warning
     with mock.patch('logging.warn') as w:
         OqParam(
             calculation_mode='classical', inputs=GST,
             hazard_calculation_id=None, hazard_output_id=None,
             maximum_distance='10', sites='0.1 0.2',
             reference_vs30_value='200',
             not_existing_param='XXX', export_dir=TMP,
             intensity_measure_types_and_levels="{'PGA': [0.1, 0.2]}",
             rupture_mesh_spacing='1.5').validate()
     self.assertEqual(
         w.call_args[0][0],
         "The parameter 'not_existing_param' is unknown, ignoring")
 def test_geometry(self):
     # you cannot have both region and sites
     with self.assertRaises(ValueError):
         OqParam(
             calculation_mode='classical_risk',
             hazard_calculation_id=None,
             hazard_output_id=None,
             maximum_distance='10',
             inputs=fakeinputs,
             region_grid_spacing='5',
             region='-78.182 15.615, -78.152 15.615, -78.152 15.565, '
             '-78.182 15.565',
             sites='0.1 0.2',
         ).validate()
 def test_create_export_dir(self):
     # FIXME: apparently this fails only when --with-doctest is set
     raise unittest.SkipTest
     EDIR = os.path.join(TMP, 'nonexisting')
     OqParam(
         calculation_mode='event_based',
         sites='0.1 0.2',
         reference_vs30_value='200',
         intensity_measure_types='PGA',
         inputs=GST,
         maximum_distance='400',
         export_dir=EDIR,
     ).validate()
     self.assertTrue(os.path.exists(EDIR))
 def test_required_site_param(self):
     with self.assertRaises(ValueError) as ctx:
         OqParam(
             calculation_mode='scenario',
             gsim='AbrahamsonSilva2008',
             sites='0.1 0.2',
             maximum_distance='400',
             reference_vs30_value='760',
             intensity_measure_types='PGA',
             inputs=fakeinputs,
         ).validate()
     self.assertIn(
         "Please set a value for 'reference_depth_to_1pt0km_per_sec', this "
         "is required by the GSIM AbrahamsonSilva2008", str(ctx.exception))
 def test_event_based_risk(self):
     with self.assertRaises(InvalidFile) as ctx:
         OqParam(
             calculation_mode='event_based_risk',
             inputs=fakeinputs,
             gsim='BooreAtkinson2008',
             reference_vs30_value='200',
             sites='0.1 0.2',
             poes='0.2',
             maximum_distance='400',
             intensity_measure_types_and_levels="{'PGV': [0.1, 0.2, 0.3]}",
             conditional_loss_poes='0.02')
     self.assertIn("asset_loss_table is not set, probably you want to "
                   "remove conditional_loss_poes", str(ctx.exception))
Exemple #26
0
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1):
    """
    Parse a dictionary of parameters from an INI-style config file.

    :param job_ini:
        Path to configuration file/archive or dictionary of parameters
    :param pkg:
        Python package where to find the configuration file (optional)
    :param calculators:
        Sequence of calculator names (optional) used to restrict the
        valid choices for `calculation_mode`
    :param hc_id:
        Not None only when called from a post calculation
    :param validate:
        Flag. By default it is true and the parameters are validated
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    # UGLY: this is here to avoid circular imports
    from openquake.calculators import base

    OqParam.calculation_mode.validator.choices = tuple(calculators
                                                       or base.calculators)
    if not isinstance(job_ini, dict):
        basedir = os.path.dirname(pkg.__file__) if pkg else ''
        job_ini = get_params([os.path.join(basedir, job_ini)])
    if hc_id:
        job_ini.update(hazard_calculation_id=str(hc_id))
    oqparam = OqParam(**job_ini)
    if validate:
        oqparam.validate()
    BaseSeismicSource.min_mag = oqparam.minimum_magnitude
    return oqparam
Exemple #27
0
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1):
    """
    Parse a dictionary of parameters from an INI-style config file.

    :param job_ini:
        Path to configuration file/archive or dictionary of parameters
    :param pkg:
        Python package where to find the configuration file (optional)
    :param calculators:
        Sequence of calculator names (optional) used to restrict the
        valid choices for `calculation_mode`
    :param hc_id:
        Not None only when called from a post calculation
    :param validate:
        Flag. By default it is true and the parameters are validated
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    # UGLY: this is here to avoid circular imports
    from openquake.calculators import base

    OqParam.calculation_mode.validator.choices = tuple(calculators
                                                       or base.calculators)
    if not isinstance(job_ini, dict):
        basedir = os.path.dirname(pkg.__file__) if pkg else ''
        job_ini = get_params(os.path.join(basedir, job_ini))
    if hc_id:
        job_ini.update(hazard_calculation_id=str(hc_id))
    re = os.environ.get('OQ_REDUCE')  # debugging facility
    if re:
        # reduce the imtls to the first imt
        # reduce the logic tree to one random realization
        # reduce the sites by a factor of `re`
        # reduce the ses by a factor of `re`
        # set save_disk_space = true
        os.environ['OQ_SAMPLE_SITES'] = str(1 / float(re))
        job_ini['number_of_logic_tree_samples'] = 1
        ses = job_ini.get('ses_per_logic_tree_path')
        if ses:
            ses = str(int(numpy.ceil(int(ses) / float(re))))
            job_ini['ses_per_logic_tree_path'] = ses
        imtls = job_ini.get('intensity_measure_types_and_levels')
        if imtls:
            imtls = valid.intensity_measure_types_and_levels(imtls)
            imt = next(iter(imtls))
            job_ini['intensity_measure_types_and_levels'] = repr(
                {imt: imtls[imt]})
        job_ini['save_disk_space'] = True
    oqparam = OqParam(**job_ini)
    if validate and '_job_id' not in job_ini:
        oqparam.check_source_model()
        oqparam.validate()
    return oqparam
Exemple #28
0
 def test_uniform_hazard_spectra(self):
     with self.assertRaises(ValueError) as ctx:
         OqParam(
             calculation_mode='classical',
             gsim='BooreAtkinson2008',
             reference_vs30_value='200',
             sites='0.1 0.2',
             poes='0.2',
             maximum_distance='400',
             intensity_measure_types_and_levels="{'PGV': [0.1, 0.2, 0.3]}",
             uniform_hazard_spectra='1',
         ).set_risk_imtls({})
     self.assertIn(
         "The `uniform_hazard_spectra` can be True only if "
         "the IMT set contains SA(...) or PGA", str(ctx.exception))
Exemple #29
0
def export_loss_map_xml_geojson(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    unit_by_lt = {
        riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
        for ct in dstore['cost_types']
    }
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    loss_map = dstore[ekey[0]]
    riskmodel = dstore['riskmodel']
    assetcol = dstore['assetcol']
    R = len(rlzs)
    sitemesh = dstore['sitemesh']
    L = len(riskmodel.loss_types)
    fnames = []
    export_type = ekey[1]
    writercls = (risk_writers.LossMapGeoJSONWriter if export_type == 'geojson'
                 else risk_writers.LossMapXMLWriter)
    loss_types = riskmodel.loss_types
    for lt in loss_types:
        alosses = loss_map[lt]
        for ins in range(oq.insured_losses + 1):
            means = alosses['mean' + ('_ins' if ins else '')]
            stddevs = alosses['stddev' + ('_ins' if ins else '')]
            for r in range(R):
                rlz = rlzs[r]
                unit = unit_by_lt[lt]
                suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (
                    rlz.uid, lt)
                root = ekey[0][:-5]  # strip -rlzs
                name = '%s%s%s.%s' % (root, suffix, '_ins' if ins else '',
                                      ekey[1])
                fname = dstore.export_path(name)
                data = []
                for ass, mean, stddev in zip(assetcol, means[:, r],
                                             stddevs[:, r]):
                    loc = Location(sitemesh[ass['site_id']])
                    lm = LossMap(loc, ass['asset_ref'], mean, stddev)
                    data.append(lm)
                writer = writercls(fname,
                                   oq.investigation_time,
                                   poe=None,
                                   loss_type=lt,
                                   gsim_tree_path=rlz.uid,
                                   unit=unit)
                writer.serialize(data)
                fnames.append(fname)
    return sorted(fnames)
Exemple #30
0
def get_oq(text):
    """
    Returns an OqParam instance from a configuration string. For instance:

    >>> get_oq('maximum_distance=200')
    <OqParam calculation_mode='classical', collapse_level=0, inputs={'job_ini': '<in-memory>'}, maximum_distance={'default': 200}, risk_investigation_time=None>
    """
    # UGLY: this is here to avoid circular imports
    from openquake.calculators import base
    OqParam.calculation_mode.validator.choices = tuple(base.calculators)
    cp = configparser.ConfigParser()
    cp.read_string('[general]\ncalculation_mode=classical\n' + text)
    dic = dict(cp['general'])
    dic['inputs'] = dict(job_ini='<in-memory>')
    oq = OqParam(**dic)
    return oq
Exemple #31
0
def export_avg_losses_stats(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = OqParam.from_(dstore.attrs)
    avg_losses = dstore[ekey[0]].value
    quantiles = ['mean'] + ['quantile-%s' % q for q in oq.quantile_loss_curves]
    assets = get_assets(dstore)
    writer = writers.CsvWriter(fmt='%10.6E')
    for i, quantile in enumerate(quantiles):
        losses = avg_losses[:, i]
        dest = dstore.export_path('avg_losses-%s.csv' % quantile)
        data = compose_arrays(assets, losses)
        writer.save(data, dest)
    return writer.getsaved()
 def test_duplicated_levels(self):
     with self.assertRaises(ValueError) as ctx:
         OqParam(
             calculation_mode='classical', inputs={},
             sites='0.1 0.2',
             reference_vs30_type='measured',
             reference_vs30_value='200',
             reference_depth_to_2pt5km_per_sec='100',
             reference_depth_to_1pt0km_per_sec='150',
             maximum_distance='400',
             intensity_measure_types_and_levels='{"PGA": [0.4, 0.4, 0.6]}',
         ).validate()
     self.assertEqual(
         str(ctx.exception),
         'Found duplicated levels for PGA: [0.4, 0.4, 0.6]: could not '
         'convert to intensity_measure_types_and_levels: '
         'intensity_measure_types_and_levels={"PGA": [0.4, 0.4, 0.6]}')
Exemple #33
0
    def read_exposure_sitecol(self):
        """
        Read the exposure (if any) and then the site collection, possibly
        extracted from the exposure.
        """
        logging.info('Reading the site collection')
        with self.monitor('reading site collection', autoflush=True):
            haz_sitecol = readinput.get_site_collection(self.oqparam)
        inputs = self.oqparam.inputs
        if 'exposure' in inputs:
            logging.info('Reading the exposure')
            with self.monitor('reading exposure', autoflush=True):
                self.exposure = readinput.get_exposure(self.oqparam)
                self.sitecol, self.assets_by_site = (
                    readinput.get_sitecol_assets(self.oqparam, self.exposure))
                if len(self.exposure.cost_types):
                    self.cost_types = self.exposure.cost_types
                self.taxonomies = numpy.array(
                    sorted(self.exposure.taxonomies), '|S100')
            num_assets = self.count_assets()
            if self.datastore.parent:
                haz_sitecol = self.datastore.parent['sitecol']
            if haz_sitecol is not None and haz_sitecol != self.sitecol:
                with self.monitor('assoc_assets_sites'):
                    self.sitecol, self.assets_by_site = \
                        self.assoc_assets_sites(haz_sitecol.complete)
                ok_assets = self.count_assets()
                num_sites = len(self.sitecol)
                logging.warn('Associated %d assets to %d sites, %d discarded',
                             ok_assets, num_sites, num_assets - ok_assets)
        elif (self.datastore.parent and 'exposure' in
              OqParam.from_(self.datastore.parent.attrs).inputs):
            logging.info('Re-using the already imported exposure')
        else:  # no exposure
            self.sitecol = haz_sitecol

        # save mesh and asset collection
        self.save_mesh()
        if hasattr(self, 'assets_by_site'):
            self.assetcol = riskinput.build_asset_collection(
                self.assets_by_site, self.oqparam.time_event)
            spec = set(self.oqparam.specific_assets)
            unknown = spec - set(self.assetcol['asset_ref'])
            if unknown:
                raise ValueError('The specific asset(s) %s are not in the '
                                 'exposure' % ', '.join(unknown))
Exemple #34
0
def export_avg_losses_stats(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = OqParam.from_(dstore.attrs)
    avg_losses = dstore[ekey[0]]
    quantiles = ['mean'] + ['quantile-%s' % q for q in oq.quantile_loss_curves]
    assets = get_assets(dstore)
    fnames = []
    for i, quantile in enumerate(quantiles):
        losses = avg_losses[:, i]
        dest = dstore.export_path('avg_losses-%s.csv' % quantile)
        data = compose_arrays(assets, losses)
        writers.write_csv(dest, data, fmt='%10.6E')
        fnames.append(dest)
    return fnames
Exemple #35
0
def export_lossmaps_xml_geojson(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    unit_by_lt = {
        riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
        for ct in dstore['cost_types']
    }
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    avglosses = dstore[ekey[0]]
    riskmodel = dstore['riskmodel']
    assetcol = dstore['assetcol']
    sitemesh = dstore['sitemesh']
    L = len(riskmodel.loss_types)
    N, R = avglosses.shape
    fnames = []
    export_type = ekey[1]
    writercls = (risk_writers.LossMapGeoJSONWriter if export_type == 'geojson'
                 else risk_writers.LossMapXMLWriter)
    for l, lt in enumerate(riskmodel.loss_types):
        alosses = avglosses[lt]
        for r in range(R):
            rlz = rlzs[r]
            unit = unit_by_lt[lt]
            suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (rlz.uid,
                                                                      lt)
            name = '%s%s.%s' % (ekey[0], suffix, ekey[1])
            fname = dstore.export_path(name)
            data = []
            for ass, stat in zip(assetcol, alosses[:, r]):
                loc = Location(sitemesh[ass['site_id']])
                lm = LossMap(loc, ass['asset_ref'], stat['mean'],
                             stat['stddev'])
                data.append(lm)
            writer = writercls(fname,
                               oq.investigation_time,
                               poe=None,
                               loss_type=lt,
                               gsim_tree_path=None,
                               unit=unit,
                               loss_category=None)
            # TODO: replace the category with the exposure category
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemple #36
0
    def test_missing_maximum_distance(self):
        with self.assertRaises(ValueError):
            OqParam(
                calculation_mode='classical', inputs=fakeinputs,
                sites='0.1 0.2').validate()

        oq = OqParam(
            calculation_mode='event_based', inputs=GST,
            intensity_measure_types_and_levels="{'PGA': [0.1, 0.2]}",
            intensity_measure_types='PGV', sites='0.1 0.2',
            reference_vs30_value='200',
            maximum_distance='{"wrong TRT": 200}')
        oq.inputs['source_model_logic_tree'] = 'something'

        oq._gsims_by_trt = {'Active Shallow Crust': []}
        self.assertFalse(oq.is_valid_maximum_distance())
        self.assertIn('setting the maximum_distance for wrong TRT', oq.error)

        oq._gsims_by_trt = {'Active Shallow Crust': [],
                            'Stable Continental Crust': []}
        oq.maximum_distance = {'Active Shallow Crust': 200}
        self.assertFalse(oq.is_valid_maximum_distance())
        self.assertEqual('missing distance for Stable Continental Crust '
                         'and no default', oq.error)
Exemple #37
0
def _print_info(dstore, filtersources=True, weightsources=True):
    assoc = dstore["rlzs_assoc"]
    oqparam = OqParam.from_(dstore.attrs)
    csm = dstore["composite_source_model"]
    sitecol = dstore["sitecol"]
    print(csm.get_info())
    print("See https://github.com/gem/oq-risklib/blob/master/doc/" "effective-realizations.rst for an explanation")
    print(assoc)
    if filtersources or weightsources:
        [info] = readinput.get_job_info(oqparam, csm, sitecol)
        info["n_sources"] = csm.get_num_sources()
        curve_matrix_size = info["n_sites"] * info["n_levels"] * info["n_imts"] * len(assoc) * 8
        for k in info.dtype.fields:
            if k == "input_weight" and not weightsources:
                pass
            else:
                print(k, info[k])
        print("curve_matrix_size", humansize(curve_matrix_size))
    if "num_ruptures" in dstore:
        print(datastore.view("rupture_collections", dstore))
Exemple #38
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = OqParam.from_(dstore.attrs)
    rlzs_assoc = dstore['rlzs_assoc']
    sitecol = dstore['sitecol']
    key, fmt = ekey
    fnames = []
    for kind, hcurves in dstore[key].items():
        fname = hazard_curve_name(dstore, ekey, kind, rlzs_assoc,
                                  oq.number_of_logic_tree_samples)
        if key == 'uhs':
            export_uhs_csv(ekey, fname, sitecol, hcurves)
        else:
            export_hazard_curves_csv(ekey, fname, sitecol, hcurves, oq.imtls)
        fnames.append(fname)
    return sorted(fnames)
Exemple #39
0
def get_hcurves_and_means(dstore):
    """
    Extract hcurves from the datastore and compute their means.

    :returns: curves_by_rlz, mean_curves
    """
    oq = OqParam.from_(dstore.attrs)
    hcurves = dstore['hcurves']
    realizations = dstore['rlzs_assoc'].realizations
    weights = [rlz.weight for rlz in realizations]
    curves_by_rlz = {
        rlz: hcurves['rlz-%03d' % rlz.ordinal]
        for rlz in realizations
    }
    N = len(dstore['sitemesh'])
    mean_curves = zero_curves(N, oq.imtls)
    for imt in oq.imtls:
        mean_curves[imt] = scientific.mean_curve(
            [curves_by_rlz[rlz][imt] for rlz in sorted(curves_by_rlz)],
            weights)
    return curves_by_rlz, mean_curves
Exemple #40
0
def export_agg_curve_stats(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    quantiles = oq.quantile_loss_curves
    cost_types = dstore['cost_types']
    agg_curve = dstore[ekey[0]]
    fnames = []
    for ct in cost_types:
        loss_type = ct['name']
        array = agg_curve[loss_type].value
        for ins in range(oq.insured_losses + 1):
            for i, sname, qvalue in _gen_idx_sname_qvalue(quantiles):
                dest = dstore.export_path('agg_curve-%s-%s%s.%s' % (
                    sname, loss_type, '_ins' if ins else '', ekey[1]))
                rec = array[i, ins]
                curve = AggCurve(rec['losses'], rec['poes'], rec['avg'], None)
                risk_writers.AggregateLossCurveXMLWriter(
                    dest, oq.investigation_time, loss_type,
                    statistics=sname, quantile_value=qvalue,
                    unit=ct['unit']).serialize(curve)
                fnames.append(dest)
    return sorted(fnames)
Exemple #41
0
def export_disagg_xml(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    rlzs = dstore['rlzs_assoc'].realizations
    group = dstore['disagg']
    fnames = []
    writercls = hazard_writers.DisaggXMLWriter
    for key in group:
        matrix = pickle.loads(group[key].value)
        attrs = group[key].attrs
        rlz = rlzs[attrs['rlzi']]
        poe = attrs['poe']
        iml = attrs['iml']
        imt, sa_period, sa_damping = from_string(attrs['imt'])
        fname = dstore.export_path(key + '.xml')
        lon, lat = attrs['location']
        # TODO: add poe=poe below
        writer = writercls(
            fname,
            investigation_time=oq.investigation_time,
            imt=imt,
            smlt_path='_'.join(rlz.sm_lt_path),
            gsimlt_path=rlz.gsim_rlz.uid,
            lon=lon,
            lat=lat,
            sa_period=sa_period,
            sa_damping=sa_damping,
            mag_bin_edges=attrs['mag_bin_edges'],
            dist_bin_edges=attrs['dist_bin_edges'],
            lon_bin_edges=attrs['lon_bin_edges'],
            lat_bin_edges=attrs['lat_bin_edges'],
            eps_bin_edges=attrs['eps_bin_edges'],
            tectonic_region_types=attrs['trts'],
        )
        data = [
            DisaggMatrix(poe, iml, dim_labels, matrix[i])
            for i, dim_labels in enumerate(disagg.pmf_map)
        ]
        writer.serialize(data)
        fnames.append(fname)
    return sorted(fnames)
Exemple #42
0
def export_hcurves_xml_json(ekey, dstore):
    export_type = ekey[1]
    len_ext = len(export_type) + 1
    oq = OqParam.from_(dstore.attrs)
    sitemesh = dstore['sitemesh'].value
    rlzs_assoc = dstore['rlzs_assoc']
    hcurves = dstore[ekey[0]]
    fnames = []
    writercls = (hazard_writers.HazardCurveGeoJSONWriter if export_type
                 == 'geojson' else hazard_writers.HazardCurveXMLWriter)
    for kind in hcurves:
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        curves = hcurves[kind]
        name = hazard_curve_name(dstore, ekey, kind, rlzs_assoc,
                                 oq.number_of_logic_tree_samples)
        for imt in oq.imtls:
            imtype, sa_period, sa_damping = from_string(imt)
            fname = name[:-len_ext] + '-' + imt + '.' + export_type
            data = [
                HazardCurve(Location(site), poes[imt])
                for site, poes in zip(sitemesh, curves)
            ]
            writer = writercls(fname,
                               investigation_time=oq.investigation_time,
                               imls=oq.imtls[imt],
                               imt=imtype,
                               sa_period=sa_period,
                               sa_damping=sa_damping,
                               smlt_path=smlt_path,
                               gsimlt_path=gsimlt_path)
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemple #43
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = OqParam.from_(dstore.attrs)
    rlzs_assoc = dstore['rlzs_assoc']
    sitecol = dstore['sitecol']
    key, fmt = ekey
    fnames = []
    for kind, hcurves in dstore[key].items():
        fname = hazard_curve_name(
            dstore, ekey, kind, rlzs_assoc,
            oq.number_of_logic_tree_samples)
        if key == 'uhs':
            export_uhs_csv(ekey, fname, sitecol, hcurves)
        else:
            export_hazard_curves_csv(ekey, fname, sitecol, hcurves, oq.imtls)
        fnames.append(fname)
    return sorted(fnames)
Exemple #44
0
def export_lossmaps_xml_geojson(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    unit_by_lt = {riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
                  for ct in dstore['cost_types']}
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    avglosses = dstore[ekey[0]]
    riskmodel = dstore['riskmodel']
    assetcol = dstore['assetcol']
    sitemesh = dstore['sitemesh']
    L = len(riskmodel.loss_types)
    N, R = avglosses.shape
    fnames = []
    export_type = ekey[1]
    writercls = (risk_writers.LossMapGeoJSONWriter
                 if export_type == 'geojson' else
                 risk_writers.LossMapXMLWriter)
    for l, lt in enumerate(riskmodel.loss_types):
        alosses = avglosses[lt]
        for r in range(R):
            rlz = rlzs[r]
            unit = unit_by_lt[lt]
            suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (
                rlz.uid, lt)
            name = '%s%s.%s' % (ekey[0], suffix, ekey[1])
            fname = dstore.export_path(name)
            data = []
            for ass, stat in zip(assetcol, alosses[:, r]):
                loc = Location(sitemesh[ass['site_id']])
                lm = LossMap(loc, ass['asset_ref'],
                             stat['mean'], stat['stddev'])
                data.append(lm)
            writer = writercls(
                fname, oq.investigation_time, poe=None, loss_type=lt,
                gsim_tree_path=None, unit=unit, loss_category=None)
            # TODO: replace the category with the exposure category
            writer.serialize(data)
            fnames.append(fname)
    return sorted(fnames)
Exemple #45
0
def _print_info(dstore, filtersources=True, weightsources=True):
    assoc = dstore['rlzs_assoc']
    oqparam = OqParam.from_(dstore.attrs)
    csm = dstore['composite_source_model']
    sitecol = dstore['sitecol']
    print(csm.get_info())
    print('See https://github.com/gem/oq-risklib/blob/master/doc/'
          'effective-realizations.rst for an explanation')
    print(assoc)
    if filtersources or weightsources:
        [info] = readinput.get_job_info(oqparam, csm, sitecol)
        info['n_sources'] = csm.get_num_sources()
        curve_matrix_size = (info['n_sites'] * info['n_levels'] *
                             info['n_imts'] * len(assoc) * 8)
        for k in info.dtype.fields:
            if k == 'input_weight' and not weightsources:
                pass
            else:
                print(k, info[k])
        print('curve_matrix_size', humansize(curve_matrix_size))
    if 'num_ruptures' in dstore:
        print(datastore.view('rupture_collections', dstore))
Exemple #46
0
def export_hmaps_xml_json(ekey, dstore):
    export_type = ekey[1]
    oq = OqParam.from_(dstore.attrs)
    sitemesh = dstore['sitemesh'].value
    rlzs_assoc = dstore['rlzs_assoc']
    hmaps = dstore[ekey[0]]
    fnames = []
    writercls = (hazard_writers.HazardMapGeoJSONWriter if export_type
                 == 'geojson' else hazard_writers.HazardMapXMLWriter)
    for kind in hmaps:
        if kind.startswith('rlz-'):
            rlz = rlzs_assoc.realizations[int(kind[4:])]
            smlt_path = '_'.join(rlz.sm_lt_path)
            gsimlt_path = rlz.gsim_rlz.uid
        else:
            smlt_path = ''
            gsimlt_path = ''
        maps = hmaps[kind]
        for imt in oq.imtls:
            for poe in oq.poes:
                suffix = '-%s-%s' % (poe, imt)
                fname = hazard_curve_name(dstore, ekey, kind + suffix,
                                          rlzs_assoc,
                                          oq.number_of_logic_tree_samples)
                data = [
                    HazardMap(site[0], site[1], hmap['%s~%s' % (imt, poe)])
                    for site, hmap in zip(sitemesh, maps)
                ]
                writer = writercls(fname,
                                   investigation_time=oq.investigation_time,
                                   imt=imt,
                                   poe=poe,
                                   smlt_path=smlt_path,
                                   gsimlt_path=gsimlt_path)
                writer.serialize(data)
                fnames.append(fname)
    return sorted(fnames)
Exemple #47
0
def export_lossmaps_xml(ekey, dstore):
    oq = OqParam.from_(dstore.attrs)
    unit_by_lt = {
        riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
        for ct in dstore['cost_types']
    }
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    avglosses = dstore['avglosses']
    riskmodel = dstore['riskmodel']
    assetcol = dstore['assetcol']
    sitemesh = dstore['sitemesh']
    N, L, R = avglosses.shape
    fnames = []
    for l, r in itertools.product(range(L), range(R)):
        rlz = rlzs[r]
        lt = riskmodel.loss_types[l]
        unit = unit_by_lt[lt]
        suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (rlz.uid, lt)
        fname = os.path.join(dstore.export_dir,
                             '%s%s.%s' % (ekey[0], suffix, ekey[1]))
        data = []
        for ass, stat in zip(assetcol, avglosses[:, l, r]):
            loc = Location(sitemesh[ass['site_id']])
            lm = LossMap(loc, ass['asset_ref'], stat['mean'], stat['stddev'])
            data.append(lm)
        writer = risk_writers.LossMapXMLWriter(fname,
                                               oq.investigation_time,
                                               poe=None,
                                               loss_type=lt,
                                               gsim_tree_path=None,
                                               unit=unit,
                                               loss_category=None)
        # TODO: replace the category with the exposure category
        writer.serialize(data)
        fnames.append(fname)
    return sorted(fnames)
Exemple #48
0
def _gen_writers(dstore, writercls, root):
    # build XMLWriter instances
    oq = OqParam.from_(dstore.attrs)
    rlzs = dstore['rlzs_assoc'].realizations
    cost_types = dstore['cost_types']
    L, R = len(cost_types), len(rlzs)
    for l, ct in enumerate(cost_types):
        loss_type = riskmodels.cost_type_to_loss_type(ct['name'])
        for ins in range(oq.insured_losses + 1):
            if root.endswith('-rlzs'):
                for rlz in rlzs:
                    suffix = '' if L == 1 and R == 1 else '-gsimltp_%s_%s' % (
                        rlz.uid, loss_type)
                    dest = dstore.export_path(
                        '%s%s%s.xml' %
                        (root[:-5], suffix, '_ins' if ins else ''))
                    yield writercls(dest,
                                    oq.investigation_time,
                                    loss_type,
                                    unit=ct['unit'],
                                    **get_paths(rlz)), (loss_type, rlz.ordinal,
                                                        ins)
            elif root.endswith('-stats'):
                pairs = [('mean', None)] + [('quantile-%s' % q, q)
                                            for q in oq.quantile_loss_curves]
                for ordinal, (statname, statvalue) in enumerate(pairs):
                    dest = dstore.export_path('%s-%s-%s%s.xml' %
                                              (root[:-6], statname, loss_type,
                                               '_ins' if ins else ''))
                    yield writercls(
                        dest,
                        oq.investigation_time,
                        loss_type,
                        statistics='mean' if ordinal == 0 else 'quantile',
                        quantile_value=statvalue,
                        unit=ct['unit']), (loss_type, ordinal, ins)
Exemple #49
0
def main(what, report=False):
    """
    Give information about the passed keyword or filename
    """
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    if what == 'calculators':
        for calc in sorted(base.calculators):
            print(calc)
    elif what == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif what == 'portable_gsims':
        for gs in gsim.get_portable_gsims():
            print(gs)
    elif what == 'imts':
        for im in vars(imt).values():
            if inspect.isfunction(im) and is_upper(im):
                print(im.__name__)
    elif what == 'views':
        for name in sorted(view):
            print(name)
    elif what == 'exports':
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        items = [(DISPLAY_NAME.get(exporter, '?'), exporter, formats)
                 for exporter, formats in dic.items()]
        n = 0
        for dispname, exporter, formats in sorted(items):
            print(dispname, '"%s"' % exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    elif what == 'extracts':
        for key in extract:
            func = extract[key]
            if hasattr(func, '__wrapped__'):
                fm = FunctionMaker(func.__wrapped__)
            elif hasattr(func, 'func'):  # for partial objects
                fm = FunctionMaker(func.func)
            else:
                fm = FunctionMaker(func)
            print('%s(%s)%s' % (fm.name, fm.signature, fm.doc))
    elif what == 'parameters':
        docs = OqParam.docs()
        names = set()
        for val in vars(OqParam).values():
            if hasattr(val, 'name'):
                names.add(val.name)
        params = sorted(names)
        for param in params:
            print(param)
            print(docs[param])
    elif what == 'mfds':
        for cls in gen_subclasses(BaseMFD):
            print(cls.__name__)
    elif what == 'venv':
        print(sys.prefix)
    elif what == 'sources':
        for cls in gen_subclasses(BaseSeismicSource):
            print(cls.__name__)
    elif what == 'consequences':
        known = scientific.KNOWN_CONSEQUENCES
        print('The following %d consequences are implemented:' % len(known))
        for cons in known:
            print(cons)
    elif os.path.isdir(what) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(what)
        print(mon)
    elif what.endswith('.xml'):
        node = nrml.read(what)
        if node[0].tag.endswith('sourceModel'):
            print(source_model_info([node]))
        elif node[0].tag.endswith('logicTree'):
            bset = node[0][0]
            if bset.tag.endswith("logicTreeBranchingLevel"):
                bset = bset[0]
            if bset.attrib['uncertaintyType'] == 'sourceModel':
                sm_nodes = []
                for smpath in logictree.collect_info(what).smpaths:
                    sm_nodes.append(nrml.read(smpath))
                print(source_model_info(sm_nodes))
            elif bset.attrib['uncertaintyType'] == 'gmpeModel':
                print(logictree.GsimLogicTree(what))
        else:
            print(node.to_str())
    elif what.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(what))
            else:
                print(readinput.get_oqparam(what).json())
        if mon.duration > 1:
            print(mon)
    elif what:
        print("No info for '%s'" % what)
Exemple #50
0
 def __init__(self, dstore):
     description = OqParam.from_(dstore.attrs).description
     self.dstore = dstore
     self.text = description + '\n' + '=' * len(description)
Exemple #51
0
def parse_config(source, hazard_calculation_id=None, hazard_output_id=None):
    """
    Parse a dictionary of parameters from an INI-style config file.

    :param source:
        File-like object containing the config parameters.
    :param hazard_job_id:
        The ID of a previous calculation (or None)
    :param hazard_ouput_id:
        The output of a previous job (or None)
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    cp = ConfigParser.ConfigParser()
    cp.readfp(source)

    base_path = os.path.dirname(
        os.path.join(os.path.abspath('.'), source.name))
    params = dict(base_path=base_path, inputs={},
                  hazard_calculation_id=hazard_calculation_id,
                  hazard_output_id=hazard_output_id)

    # Directory containing the config file we're parsing.
    base_path = os.path.dirname(os.path.abspath(source.name))

    for sect in cp.sections():
        for key, value in cp.items(sect):
            if key == 'sites_csv' or key.endswith('_file'):
                input_type = key[:-5]
                path = value if os.path.isabs(value) else os.path.join(
                    base_path, value)
                params['inputs'][input_type] = path
            else:
                params[key] = value

    # load source inputs (the paths are the source_model_logic_tree)
    smlt = params['inputs'].get('source_model_logic_tree')
    if smlt:
        params['inputs']['source'] = [
            os.path.join(base_path, src_path)
            for src_path in _collect_source_model_paths(smlt)]

    # check for obsolete calculation_mode
    is_risk = hazard_calculation_id or hazard_output_id
    cmode = params['calculation_mode']
    if is_risk and cmode in ('classical', 'event_based', 'scenario'):
        raise ValueError('Please change calculation_mode=%s into %s_risk '
                         'in the .ini file' % (cmode, cmode))

    oqparam = OqParam(**params)

    # define the parameter `intensity measure types and levels` always
    oqparam.intensity_measure_types_and_levels = get_imtls(oqparam)

    # remove the redundant parameter `intensity_measure_types`
    if hasattr(oqparam, 'intensity_measure_types'):
        delattr(oqparam, 'intensity_measure_types')

    return oqparam
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = OqParam.from_(dstore.attrs)
     self.text = oq.description + '\n' + '=' * len(oq.description)
Exemple #53
0
 def __init__(self, dstore):
     self.dstore = dstore
     self.oq = oq = OqParam.from_(dstore.attrs)
     self.text = oq.description + '\n' + '=' * len(oq.description)
     self.text += '\n\nnum_sites = %d' % len(dstore['sitemesh'])
Exemple #54
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if calc_id == 0:  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.DataStore(calc_id, mode='r')
                oq = OqParam.from_(ds.attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                logging.warn('Removed invalid calculation %d', calc_id)
                os.remove(
                    os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id))
                continue
            else:
                rows.append((calc_id, cmode, descr))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id, mode='r')
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return

    oq = OqParam.from_(ds.attrs)

    # this part is experimental
    if rlzs and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    else:
        # print all keys
        print(
            oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
            (oq.description, ds.hdf5path))
        for key in ds:
            print(key, humansize(ds.getsize(key)))
Exemple #55
0
def parse_config(source, hazard_calculation_id=None, hazard_output_id=None):
    """
    Parse a dictionary of parameters from an INI-style config file.

    :param source:
        File-like object containing the config parameters.
    :param hazard_job_id:
        The ID of a previous calculation (or None)
    :param hazard_ouput_id:
        The output of a previous job (or None)
    :returns:
        An :class:`openquake.commonlib.oqvalidation.OqParam` instance
        containing the validate and casted parameters/values parsed from
        the job.ini file as well as a subdictionary 'inputs' containing
        absolute paths to all of the files referenced in the job.ini, keyed by
        the parameter name.
    """
    cp = ConfigParser.ConfigParser()
    cp.readfp(source)

    base_path = os.path.dirname(os.path.join(os.path.abspath('.'),
                                             source.name))
    params = dict(base_path=base_path,
                  inputs={},
                  hazard_calculation_id=hazard_calculation_id,
                  hazard_output_id=hazard_output_id)

    # Directory containing the config file we're parsing.
    base_path = os.path.dirname(os.path.abspath(source.name))

    for sect in cp.sections():
        for key, value in cp.items(sect):
            if key == 'sites_csv':
                # Parse site coordinates from the csv file,
                # return a string 'lon1 lat1, lon2 lat2, ... , lonN latN'
                path = value if os.path.isabs(value) else os.path.join(
                    base_path, value)
                sites = open(path, 'U').read().strip().replace(',', ' ')
                params['sites'] = sites.replace('\n', ',')
            elif key.endswith('_file'):
                input_type = key[:-5]
                path = value if os.path.isabs(value) else os.path.join(
                    base_path, value)
                params['inputs'][input_type] = path
            else:
                params[key] = value

    # load source inputs (the paths are the source_model_logic_tree)
    smlt = params['inputs'].get('source_model_logic_tree')
    if smlt:
        params['inputs']['source'] = [
            os.path.join(base_path, src_path)
            for src_path in _collect_source_model_paths(smlt)
        ]

    is_risk = hazard_calculation_id or hazard_output_id
    cmode = params['calculation_mode']
    if is_risk and cmode in ('classical', 'event_based', 'scenario'):
        raise ValueError('Please change calculation_mode=%s into %s_risk '
                         'in the .ini file' % (cmode, cmode))
    return OqParam(**params)