예제 #1
0
def upgrade_file(path, multipoint):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    tag = striptag(node0.tag)
    gml = True
    if tag == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        # below I am converting into a NRML 0.5 vulnerabilityModel
        node0 = Node(
            'vulnerabilityModel', cat_dict,
            nodes=[obj_to_node(val) for val in vf_dict.values()])
        gml = False
    elif tag == 'fragilityModel':
        node0 = read_nrml.convert_fragility_model_04(
            nrml.read(path)[0], path)
        gml = False
    elif tag == 'sourceModel':
        node0 = nrml.read(path)[0]
        dic = groupby(node0.nodes, operator.itemgetter('tectonicRegion'))
        node0.nodes = [Node('sourceGroup',
                            dict(tectonicRegion=trt, name="group %s" % i),
                            nodes=srcs)
                       for i, (trt, srcs) in enumerate(dic.items(), 1)]
        if multipoint:
            sourceconverter.update_source_model(node0, path + '.bak')
    with open(path, 'wb') as f:
        nrml.write([node0], f, gml=gml)
예제 #2
0
    def test_ill_formed_rupture(self):
        rup_file = BytesIO(b'''\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml"
      xmlns="http://openquake.org/xmlns/nrml/0.4">
    <simpleFaultRupture>
        <magnitude>7.65</magnitude>
        <rake>15.0</rake>
        <hypocenter lon="0.0" lat="91.0" depth="5.0"/>
        <simpleFaultGeometry>
                <gml:LineString>
                    <gml:posList>
                        -124.704 40.363
                        -124.977 41.214
                        -125.140 42.096
                    </gml:posList>
                </gml:LineString>
            <dip>50.0</dip>
            <upperSeismoDepth>12.5</upperSeismoDepth>
            <lowerSeismoDepth>19.5</lowerSeismoDepth>
        </simpleFaultGeometry>
    </simpleFaultRupture>
</nrml>
''')

        # at line 7 there is an invalid depth="-5.0"
        with self.assertRaises(ValueError) as ctx:
            nrml.read(rup_file)
        self.assertIn('line 7', str(ctx.exception))
예제 #3
0
    def test_no_nrml(self):
        fname = gettemp('''\
<?xml version="1.0" encoding="UTF-8"?>
<fragilityModel id="Ethiopia" assetCategory="buildings"
lossCategory="structural" />
''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn('expected a node of kind nrml, got fragilityModel',
                      str(ctx.exception))
예제 #4
0
 def test_alternative_mfds(self):
     converter = s.SourceConverter(
         investigation_time=1.,
         rupture_mesh_spacing=1,  # km
         complex_fault_mesh_spacing=5,  # km
         width_of_mfd_bin=0.1,  # for Truncated GR MFDs
         area_source_discretization=1.)
     grp_nodes = nrml.read(ALT_MFDS_SRC_MODEL).sourceModel.nodes
     [[sflt1, sflt2], [cplx1]] = map(converter.convert_node, grp_nodes)
     # Check the values
     # Arbitrary MFD
     assert_close(cplx1.mfd.magnitudes, [8.6, 8.8, 9.0])
     assert_close(cplx1.mfd.occurrence_rates, [0.0006, 0.0008, 0.0004])
     # Youngs & Coppersmith from characteristic rate
     self.assertAlmostEqual(sflt1.mfd.b_val, 1.0)
     self.assertAlmostEqual(sflt1.mfd.a_val, 3.3877843113)
     self.assertAlmostEqual(sflt1.mfd.char_mag, 7.0)
     self.assertAlmostEqual(sflt1.mfd.char_rate, 0.005)
     self.assertAlmostEqual(sflt1.mfd.min_mag, 5.0)
     # Youngs & Coppersmith from total moment rate
     self.assertAlmostEqual(sflt2.mfd.b_val, 1.0)
     self.assertAlmostEqual(sflt2.mfd.a_val, 5.0800, 3)
     self.assertAlmostEqual(sflt2.mfd.char_mag, 7.0)
     self.assertAlmostEqual(sflt2.mfd.char_rate, 0.24615, 5)
     self.assertAlmostEqual(sflt2.mfd.min_mag, 5.0)
예제 #5
0
 def test_well_formed_ruptures(self):
     converter = s.RuptureConverter(rupture_mesh_spacing=1.5,
                                    complex_fault_mesh_spacing=1.5)
     for fname in (SIMPLE_FAULT_RUPTURE, COMPLEX_FAULT_RUPTURE,
                   SINGLE_PLANE_RUPTURE, MULTI_PLANES_RUPTURE):
         [node] = nrml.read(fname)
         converter.convert_node(node)
예제 #6
0
 def test_simple(self):
     testfile = os.path.join(testdir, 'two-point-sources.xml')
     sm = nrml.read(testfile).sourceModel
     update_source_model(sm, testfile)
     with io.BytesIO() as f:
         nrml.write(sm, f)
         got = f.getvalue().decode('utf-8')
         self.assertEqual(got, expected)
예제 #7
0
 def test_nonparametric_source_ok(self):
     converter = s.SourceConverter(
         investigation_time=50.,
         rupture_mesh_spacing=1,  # km
         complex_fault_mesh_spacing=1,  # km
         width_of_mfd_bin=1.,  # for Truncated GR MFDs
         area_source_discretization=1.)
     [np] = nrml.read(NONPARAMETRIC_SOURCE).sourceModel
     converter.convert_node(np)
예제 #8
0
 def test_complex(self):
     testfile = os.path.normpath(os.path.join(
         testdir, '../../../qa_tests_data/classical/case_30/ssm/shallow/'
         'gridded_seismicity_source_4.xml'))
     sm = nrml.read(testfile).sourceModel
     update_source_model(sm, testfile)
     with io.BytesIO() as f:
         nrml.write(sm, f)
         got = f.getvalue().decode('utf-8')
         self.assertEqual(got, multipoint)
def build_rupture_from_file(rupture_file, simple_mesh_spacing=1.0,
                            complex_mesh_spacing=1.0):
    """
    Parses a rupture from the OpenQuake nrml 4.0 format and parses it to
    an instance of :class: openquake.hazardlib.source.rupture.Rupture
    """
    [rup_node] = nrml.read(rupture_file)
    conv = RuptureConverter(simple_mesh_spacing,
                            complex_mesh_spacing)
    return conv.convert_node(rup_node)
예제 #10
0
    def test_invalid(self):
        fname = gettemp('''\
<?xml version="1.0" encoding="UTF-8"?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.5">
  <fragilityModel id="Ethiopia" assetCategory="buildings"
        lossCategory="structural">
    <description>structural_vul_ethiopia</description>
    <limitStates> slight moderate extensive collapse</limitStates>
    <fragilityFunction id="CR/LFINF/H:1,2" format="continuous" shape="logncdf">
       <imls imt="SA" noDamageLimit="0.1" minIML="0.01" maxIML="1.2"/>
       <params ls="slight" mean="0.184422723" stddev="0.143988438"/>
       <params ls="moderate" mean="1.659007804" stddev="3.176361273"/>
       <params ls="extensive" mean="9.747745727" stddev="38.54171001"/>
       <params ls="collapse" mean="247.1792873" stddev="4014.774504"/>
     </fragilityFunction>
  </fragilityModel>
</nrml>''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn('Could not convert imt->intensity_measure_type: '
                      "Invalid IMT: 'SA', line 8", str(ctx.exception))
def fix(fname, outname=None):
    root = nrml.read(fname)
    xmlns = root['xmlns']
    if xmlns == u'http://openquake.org/xmlns/nrml/0.4':
        for src_node in root.sourceModel:
            fix_source_node(src_node)
    else:  # nrml/0.5+
        for src_grp in root.sourceModel:
            for src_node in src_grp:
                fix_source_node(src_node)
    with open(outname or fname, 'wb') as out:
        nrml.write([root.sourceModel], out, xmlns=xmlns)
예제 #12
0
def convert_xml_hdf5(input_file, output_file):
    with hdf5.File(output_file, 'w') as out:
        inp = nrml.read(input_file)
        if inp['xmlns'].endswith('nrml/0.4'):  # old version
            d = os.path.dirname(input_file) or '.'
            raise ValueError('Please upgrade with `oq upgrade_nrml %s`' % d)
        elif inp['xmlns'].endswith('nrml/0.5'):  # current version
            sm = inp.sourceModel
        else:  # not a NRML
            raise ValueError('Unknown NRML:' % inp['xmlns'])
        out.save(node.node_to_dict(sm))
    return output_file
예제 #13
0
def reduce(fname, reduction_factor):
    """
    Produce a submodel from `fname` by sampling the nodes randomly.
    Supports source models, site models and exposure models. As a special
    case, it is also able to reduce .csv files by sampling the lines.
    This is a debugging utility to reduce large computations to small ones.
    """
    if fname.endswith('.csv'):
        with open(fname) as f:
            line = f.readline()  # read the first line
            if csv.Sniffer().has_header(line):
                header = line
                all_lines = f.readlines()
            else:
                header = None
                f.seek(0)
                all_lines = f.readlines()
        lines = general.random_filter(all_lines, reduction_factor)
        shutil.copy(fname, fname + '.bak')
        print('Copied the original file in %s.bak' % fname)
        _save_csv(fname, lines, header)
        print('Extracted %d lines out of %d' % (len(lines), len(all_lines)))
        return
    elif fname.endswith('.npy'):
        array = numpy.load(fname)
        shutil.copy(fname, fname + '.bak')
        print('Copied the original file in %s.bak' % fname)
        arr = numpy.array(general.random_filter(array, reduction_factor))
        numpy.save(fname, arr)
        print('Extracted %d rows out of %d' % (len(arr), len(array)))
        return
    node = nrml.read(fname)
    model = node[0]
    if model.tag.endswith('exposureModel'):
        total = len(model.assets)
        model.assets.nodes = general.random_filter(
            model.assets, reduction_factor)
        num_nodes = len(model.assets)
    elif model.tag.endswith('siteModel'):
        total = len(model)
        model.nodes = general.random_filter(model, reduction_factor)
        num_nodes = len(model)
    elif model.tag.endswith('sourceModel'):
        reduce_source_model(fname, reduction_factor)
        return
    elif model.tag.endswith('logicTree'):
        for smpath in logictree.collect_info(fname).smpaths:
            reduce_source_model(smpath, reduction_factor)
        return
    else:
        raise RuntimeError('Unknown model tag: %s' % model.tag)
    save_bak(fname, node, num_nodes, total)
예제 #14
0
def reduce_source_model(fname, reduction_factor):
    node = nrml.read(fname)
    if node['xmlns'] == 'http://openquake.org/xmlns/nrml/0.5':
        total = sum(len(sg) for sg in node[0])
        num_nodes = 0
        for sg in node[0]:
            sg.nodes = general.random_filter(sg, reduction_factor)
            num_nodes += len(sg)
    else:  # nrml/0.4
        total = len(node[0].nodes)
        node[0].nodes = general.random_filter(node[0], reduction_factor)
        num_nodes = len(node[0].nodes)
    save_bak(fname, node, num_nodes, total)
예제 #15
0
    def test_raises_useful_error_2(self):
        area_file = BytesIO(b"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml"
      xmlns="http://openquake.org/xmlns/nrml/0.4">
    <sourceModel name="Some Source Model">
        <areaSource id="1" name="Quito" tectonicRegion="Active Shallow Crust">
            <areaGeometry>
                <gml:Polygon>
                    <gml:exterior>
                        <gml:LinearRing>
                            <gml:posList>
                             -122.5 37.5
                             -121.5 37.5
                             -121.5 38.5
                             -122.5 38.5
                            </gml:posList>
                        </gml:LinearRing>
                    </gml:exterior>
                </gml:Polygon>
                <upperSeismoDepth>0.0</upperSeismoDepth>
                <lowerSeismoDepth>10.0</lowerSeismoDepth>
            </areaGeometry>
            <magScaleRel>PeerMSR</magScaleRel>
            <ruptAspectRatio>1.5</ruptAspectRatio>
            <incrementalMFD minMag="6.55" binWidth="0.1">
                <occurRates>0.0010614989 8.8291627E-4 7.3437777E-4
                            6.108288E-4 5.080653E-4
                </occurRates>
            </incrementalMFD>
            <nodalPlanedist>
         <nodalPlane probability="0.3" strike="0.0" dip="90.0" rake="0.0" />
         <nodalPlane probability="0.7" strike="90.0" dip="45.0" rake="90.0" />
            </nodalPlanedist>
            <hypoDepthDist>
                <hypoDepth probability="0.5" depth="4.0" />
                <hypoDepth probability="0.5" depth="8.0" />
            </hypoDepthDist>
        </areaSource>

    </sourceModel>
</nrml>
""")
        [area] = nrml.read(area_file).sourceModel
        with self.assertRaises(AttributeError) as ctx:
            self.conv.convert_node(area)
        self.assertIn(
            "node areaSource: No subnode named 'nodalPlaneDist'"
            " found in 'areaSource', line 5 of", str(ctx.exception))
예제 #16
0
def get_vulnerability_functions_04(fname):
    """
    Parse the vulnerability model in NRML 0.4 format.

    :param fname:
        path of the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function + vset
    """
    categories = dict(assetCategory=set(), lossCategory=set(),
                      vulnerabilitySetID=set())
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    for vset in nrml.read(fname).vulnerabilityModel:
        categories['assetCategory'].add(vset['assetCategory'])
        categories['lossCategory'].add(vset['lossCategory'])
        categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
        IML = vset.IML
        imt_str = IML['IMT']
        imls = ~IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
    del categories['vulnerabilitySetID']
    return vf_dict, categories
예제 #17
0
def get_rupture(oqparam):
    """
    Read the `rupture_model` file and by filter the site collection

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an hazardlib rupture
    """
    rup_model = oqparam.inputs['rupture_model']
    [rup_node] = nrml.read(rup_model)
    conv = sourceconverter.RuptureConverter(
        oqparam.rupture_mesh_spacing, oqparam.complex_fault_mesh_spacing)
    rup = conv.convert_node(rup_node)
    rup.tectonic_region_type = '*'  # there is not TRT for scenario ruptures
    rup.serial = oqparam.random_seed
    return rup
예제 #18
0
def get_site_model(oqparam):
    """
    Convert the NRML file into an array of site parameters.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an array with fields lon, lat, vs30, ...
    """
    req_site_params = get_gsim_lt(oqparam).req_site_params
    arrays = []
    for fname in oqparam.inputs['site_model']:
        if isinstance(fname, str) and fname.endswith('.csv'):
            sm = hdf5.read_csv(
                 fname, {None: float, 'vs30measured': bool}).array
            if 'site_id' in sm.dtype.names:
                raise InvalidFile('%s: you passed a sites.csv file instead of '
                                  'a site_model.csv file!' % fname)
            z = numpy.zeros(len(sm), sorted(sm.dtype.descr))
            for name in z.dtype.names:  # reorder the fields
                z[name] = sm[name]
            arrays.append(z)
            continue
        nodes = nrml.read(fname).siteModel
        params = [valid.site_param(node.attrib) for node in nodes]
        missing = req_site_params - set(params[0])
        if 'vs30measured' in missing:  # use a default of False
            missing -= {'vs30measured'}
            for param in params:
                param['vs30measured'] = False
        if 'backarc' in missing:  # use a default of False
            missing -= {'backarc'}
            for param in params:
                param['backarc'] = False
        if missing:
            raise InvalidFile('%s: missing parameter %s' %
                              (oqparam.inputs['site_model'],
                               ', '.join(missing)))
        # NB: the sorted in sorted(params[0]) is essential, otherwise there is
        # an heisenbug in scenario/test_case_4
        site_model_dt = numpy.dtype([(p, site.site_param_dt[p])
                                     for p in sorted(params[0])])
        sm = numpy.array([tuple(param[name] for name in site_model_dt.names)
                          for param in params], site_model_dt)
        arrays.append(sm)
    return numpy.concatenate(arrays)
예제 #19
0
def get_input_files(oqparam, hazard=False):
    """
    :param oqparam: an OqParam instance
    :param hazard: if True, consider only the hazard files
    :returns: input path names in a specific order
    """
    fnames = []  # files entering in the checksum
    for key in oqparam.inputs:
        fname = oqparam.inputs[key]
        if hazard and key not in ('site_model', 'source_model_logic_tree',
                                  'gsim_logic_tree', 'source'):
            continue
        # collect .hdf5 tables for the GSIMs, if any
        elif key == 'gsim_logic_tree':
            gsim_lt = get_gsim_lt(oqparam)
            for gsims in gsim_lt.values.values():
                for gsim in gsims:
                    table = getattr(gsim, 'GMPE_TABLE', None)
                    if table:
                        fnames.append(table)
            fnames.append(fname)
        elif key == 'source_model':  # UCERF
            f = oqparam.inputs['source_model']
            fnames.append(f)
            fname = nrml.read(f).sourceModel.UCERFSource['filename']
            fnames.append(os.path.join(os.path.dirname(f), fname))
        elif key == 'exposure':  # fname is a list
            for exp in asset.Exposure.read_headers(fname):
                fnames.extend(exp.datafiles)
            fnames.extend(fname)
        elif isinstance(fname, dict):
            fnames.extend(fname.values())
        elif isinstance(fname, list):
            for f in fname:
                if f == oqparam.input_dir:
                    raise InvalidFile('%s there is an empty path in %s' %
                                      (oqparam.inputs['job_ini'], key))
            fnames.extend(fname)
        elif key == 'source_model_logic_tree':
            for smpaths in logictree.collect_info(fname).smpaths.values():
                fnames.extend(smpaths)
            fnames.append(fname)
        else:
            fnames.append(fname)
    return sorted(fnames)
예제 #20
0
def tidy(fnames):
    """
    Reformat a NRML file in a canonical form. That also means reducing the
    precision of the floats to a standard value. If the file is invalid,
    a clear error message is shown.
    """
    for fname in fnames:
        try:
            node = nrml.read(fname)
        except ValueError as err:
            print(err)
            return
        with open(fname + '.bak', 'wb') as f:
            f.write(open(fname, 'rb').read())
        with open(fname, 'wb') as f:
            # make sure the xmlns i.e. the NRML version is unchanged
            nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns'])
        print('Reformatted %s, original left in %s.bak' % (fname, fname))
예제 #21
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model
    """
    found = 0
    to_remove = set()
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    if src_node['id'] in source_ids:
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        if src_node['id'] in source_ids:
                            found += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.add(path)
    if found:
        for path in to_remove:
            os.remove(path)
예제 #22
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model
    """
    found = 0
    to_remove = []
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    if src_node['id'] in source_ids:
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        if src_node['id'] in source_ids:
                            found += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.append(path)
    if found:
        for path in to_remove:
            os.remove(path)
예제 #23
0
def collect_files(gsim_lt_path):
    """
    Given a path to a gsim logic tree, collect all of the
    path names it contains (relevent for tabular/file-dependent GSIMs).
    """
    n = nrml.read(gsim_lt_path)
    try:
        blevels = n.logicTree
    except Exception:
        raise InvalidFile('%s is not a valid source_model_logic_tree_file' %
                          gsim_lt_path)
    paths = set()
    for blevel in blevels:
        for bset in bsnodes(gsim_lt_path, blevel):
            assert bset['uncertaintyType'] == 'gmpeModel', bset
            for br in bset:
                with context(gsim_lt_path, br):
                    relpaths = rel_paths(br.uncertaintyModel.text)
                    paths.update(abs_paths(gsim_lt_path, relpaths))
    return sorted(paths)
예제 #24
0
def collect_source_model_paths(smlt):
    """
    Given a path to a source model logic tree or a file-like, collect all of
    the soft-linked path names to the source models it contains and return them
    as a uniquified list (no duplicates).

    :param smlt: source model logic tree file
    """
    n = nrml.read(smlt)
    try:
        blevels = n.logicTree
    except:
        raise InvalidFile('%s is not a valid source_model_logic_tree_file' %
                          smlt)
    for blevel in blevels:
        with node.context(smlt, blevel):
            for bset in blevel:
                for br in bset:
                    smfname = br.uncertaintyModel.text.strip()
                    if smfname:
                        yield smfname
예제 #25
0
def info(calculators, gsims, views, exports, report, input_file=''):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    logging.basicConfig(level=logging.INFO)
    if calculators:
        for calc in sorted(base.calculators):
            print(calc)
    if gsims:
        for gs in gsim.get_available_gsims():
            print(gs)
    if views:
        for name in sorted(view):
            print(name)
    if exports:
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    if os.path.isdir(input_file) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(input_file)
        print(mon)
    elif input_file.endswith('.xml'):
        print(nrml.read(input_file).to_str())
    elif input_file.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(input_file))
            else:
                print_csm_info(input_file)
        if mon.duration > 1:
            print(mon)
    elif input_file:
        print("No info for '%s'" % input_file)
예제 #26
0
    def __init__(self, filename, seed=0, num_samples=0,
                 sampling_method='early_weights', test_mode=False):
        self.filename = filename
        self.basepath = os.path.dirname(filename)
        # NB: converting the random_seed into an integer is needed on Windows
        self.seed = int(seed)
        self.num_samples = num_samples
        self.sampling_method = sampling_method
        self.test_mode = test_mode
        self.branches = {}  # branch_id -> branch
        self.bsetdict = {}
        self.previous_branches = []
        self.tectonic_region_types = set()
        self.source_types = set()
        self.hdf5_files = set()
        self.root_branchset = None
        root = nrml.read(filename)
        try:
            tree = root.logicTree
        except AttributeError:
            raise LogicTreeError(
                root, self.filename, "missing logicTree node")
        self.shortener = {}
        self.branchsets = []
        self.parse_tree(tree)

        # determine if the logic tree is source specific
        dicts = list(self.bsetdict.values())[1:]
        if not dicts:
            self.is_source_specific = False
            return
        for dic in dicts:
            ats = dic.get('applyToSources')
            if not ats:
                self.is_source_specific = False
                return
            elif len(ats.split()) != 1:
                self.is_source_specific = False
                return
        self.is_source_specific = True
def nrml_to_pdf(file_nrml, include_ids=False, verbose=False):
    '''
    Convert NRML logic tree into a PDF diagram. Output file name is same
    as input except with .pdf extension. An intermediate .tex file is
    generated. Lualatex must be installed and present on the system path.

    :param file_nrml: file name of NRML logic tree in XML format
    :param include_ids: include or omit node ids from diagram
    '''

    if verbose:
        print('Reading %s' % file_nrml)
    root = nrml.read(file_nrml)

    file_tex = file_nrml.replace('.xml', '') + '.tex'
    if verbose:
        print('Writing %s' % file_tex)
    with open(file_tex, 'w+') as f:
        with StreamingTexWriter(f, include_ids) as writer:
            writer.serialize(root)

    out_dir = 'build'
    file_pdf = file_tex.replace('.tex', '.pdf')
    build_pdf = os.path.join(out_dir, file_pdf)
    if verbose:
        print('Converting %s to %s' % (file_tex, build_pdf))
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    command = [
        'lualatex', '-output-directory=' + out_dir, '-interaction=nonstopmode',
        file_tex
    ]
    print('Executing:' + ' '.join(command))
    subprocess.call(command)

    if verbose:
        print('Moving %s to %s' % (build_pdf, file_pdf))
    if os.path.exists(file_pdf):
        os.remove(file_pdf)
    os.rename(build_pdf, file_pdf)
예제 #28
0
def reduce(fname, reduction_factor):
    """
    Produce a submodel from `fname` by sampling the nodes randomly.
    Supports source models, site models and exposure models. As a special
    case, it is also able to reduce .csv files by sampling the lines.
    This is a debugging utility to reduce large computations to small ones.
    """
    if fname.endswith('.csv'):
        with open(fname) as f:
            all_lines = f.readlines()
        lines = random_filter(all_lines, reduction_factor)
        shutil.copy(fname, fname + '.bak')
        print('Copied the original file in %s.bak' % fname)
        with open(fname, 'wb') as f:
            for line in lines:
                f.write(encode(line))
        print('Extracted %d lines out of %d' % (len(lines), len(all_lines)))
        return
    node = nrml.read(fname)
    model = node[0]
    if model.tag.endswith('exposureModel'):
        total = len(model.assets)
        model.assets.nodes = random_filter(model.assets, reduction_factor)
        num_nodes = len(model.assets)
    elif model.tag.endswith('siteModel'):
        total = len(model)
        model.nodes = random_filter(model, reduction_factor)
        num_nodes = len(model)
    elif model.tag.endswith('sourceModel'):
        total = len(model)
        model.nodes = random_filter(model, reduction_factor)
        num_nodes = len(model)
    else:
        raise RuntimeError('Unknown model tag: %s' % model.tag)
    shutil.copy(fname, fname + '.bak')
    print('Copied the original file in %s.bak' % fname)
    with open(fname, 'wb') as f:
        nrml.write([model], f, xmlns=node['xmlns'])
    print('Extracted %d nodes out of %d' % (num_nodes, total))
예제 #29
0
def get_rupture(oqparam):
    """
    Read the `rupture_model` file and by filter the site collection

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an hazardlib rupture
    """
    rup_model = oqparam.inputs['rupture_model']
    if rup_model.endswith('.csv'):
        return rupture.from_array(hdf5.read_csv(rup_model))
    if rup_model.endswith('.xml'):
        [rup_node] = nrml.read(rup_model)
        conv = sourceconverter.RuptureConverter(
            oqparam.rupture_mesh_spacing, oqparam.complex_fault_mesh_spacing)
        rup = conv.convert_node(rup_node)
    else:
        raise ValueError('Unrecognized ruptures model %s' % rup_model)
    rup.tectonic_region_type = '*'  # there is not TRT for scenario ruptures
    rup.rup_id = oqparam.ses_seed
    return rup
예제 #30
0
 def read(self,
          nrml_file,
          validate=False,
          simple_fault_spacing=1.0,
          complex_mesh_spacing=5.0,
          mfd_spacing=0.1):
     """
     Build the source model from nrml format
     """
     self.source_file = nrml_file
     if validate:
         converter = SourceConverter(1.0, simple_fault_spacing,
                                     complex_mesh_spacing, mfd_spacing,
                                     10.0)
         converter.fname = nrml_file
     src_nodes = nrml.read(nrml_file).sourceModel
     sources = []
     for no, src_node in enumerate(src_nodes, 1):
         if validate:
             print("Validating Source %s" % src_node.attrib["id"])
             converter.convert_node(src_node)
         sources.append(src_node)
     return SourceModel(sources)
예제 #31
0
def main(what, fnames, chatty=False, *, outdir='.'):
    """
    Convert source models into CSV files (or geopackages, if fiona is
    installed).
    """
    t0 = time.time()
    for fname in fnames:
        logging.info('Reading %s', fname)
        converter.fname = fname
        name, _ext = os.path.splitext(os.path.basename(fname))
        root = nrml.read(fname)
        srcs = collections.defaultdict(list)  # geom_index -> rows
        if 'nrml/0.4' in root['xmlns']:
            for srcnode in root.sourceModel:
                appendrow(converter.convert_node(srcnode), srcs, chatty)
        else:
            for srcgroup in root.sourceModel:
                trt = srcgroup['tectonicRegion']
                for srcnode in srcgroup:
                    srcnode['tectonicRegion'] = trt
                    appendrow(converter.convert_node(srcnode), srcs, chatty)
        if what == 'csv':
            for kind, rows in srcs.items():
                dest = os.path.join(outdir, '%s_%s.csv' % (name, kind))
                logging.info('Saving %d sources on %s', len(rows), dest)
                tups = []
                for row in rows:
                    tup = row[:-2] + (to_wkt(*row[-2:]),)
                    tups.append(tup)
                header = rows[0]._fields[:-2] + ('wkt',)
                write_csv(dest, tups, header=header)
        else:  # gpkg
            gpkg = GeoPackager(name + '.gpkg')
            for kind, rows in srcs.items():
                logging.info('Saving %d sources on layer %s', len(rows), kind)
                gpkg.save_layer(kind, rows)
    logging.info('Finished in %d seconds', time.time() - t0)
예제 #32
0
def upgrade_nrml(directory, dry_run, multipoint):
    """
    Upgrade all the NRML files contained in the given directory to the latest
    NRML version. Works by walking all subdirectories.
    WARNING: there is no downgrade!
    """
    for cwd, dirs, files in os.walk(directory):
        for f in files:
            path = os.path.join(cwd, f)
            if f.endswith('.xml'):
                ip = iterparse(path, events=('start',))
                next(ip)  # read node zero
                try:
                    fulltag = next(ip)[1].tag  # tag of the first node
                    xmlns, tag = fulltag.split('}')
                except Exception:  # not a NRML file
                    xmlns, tag = '', ''
                if xmlns[1:] == NRML05:  # already upgraded
                    if 'sourceModel' in tag and multipoint:
                        print('upgrading to multiPointSources', path)
                        node0 = nrml.read(path)[0]
                        sourceconverter.update_source_model(node0, path)
                        with open(path, 'wb') as f:
                            nrml.write([node0], f, gml=True)
                elif 'nrml/0.4' in xmlns and (
                        'vulnerability' in tag or 'fragility' in tag or
                        'sourceModel' in tag):
                    if not dry_run:
                        print('Upgrading', path)
                        try:
                            upgrade_file(path, multipoint)
                        except Exception as exc:
                            raise
                            print(exc)
                    else:
                        print('Not upgrading', path)
예제 #33
0
def _get_ebruptures(fname, conv=None, ses_seed=None):
    """
    :param fname: path to a rupture file (XML or CSV)
    :param conv: RuptureConverter instanc, used for XML ruptures
    :param ses_seed: used for XML ruptures
    :returns: a list of one or more EBRuptures
    """
    if fname.endswith('.xml'):
        [rup_node] = nrml.read(fname)
        rup = conv.convert_node(rup_node)
        rup.tectonic_region_type = '*'  # no TRT for scenario ruptures
        rup.rup_id = ses_seed
        ebrs = [EBRupture(rup, 'NA', 0, id=rup.rup_id, scenario=True)]
        return ebrs

    assert fname.endswith('.csv'), fname
    aw = get_ruptures(fname)
    ebrs = []
    for i, rec in enumerate(aw.array):
        rupture = _get_rupture(rec, aw.geoms[i], aw.trts[rec['trt_smr']])
        ebr = EBRupture(rupture, rec['source_id'], rec['trt_smr'],
                        rec['n_occ'], rec['id'], rec['e0'])
        ebrs.append(ebr)
    return ebrs
예제 #34
0
def main(directory, dry_run=False, multipoint=False):
    """
    Upgrade all the NRML files contained in the given directory to the latest
    NRML version. Works by walking all subdirectories.
    WARNING: there is no downgrade!
    """
    for cwd, dirs, files in os.walk(directory):
        for f in files:
            path = os.path.join(cwd, f)
            if f.endswith('.xml'):
                ip = iterparse(path, events=('start', ))
                next(ip)  # read node zero
                try:
                    fulltag = next(ip)[1].tag  # tag of the first node
                    xmlns, tag = fulltag.split('}')
                except Exception:  # not a NRML file
                    xmlns, tag = '', ''
                if xmlns[1:] == NRML05:  # already upgraded
                    if 'sourceModel' in tag and multipoint:
                        print('upgrading to multiPointSources', path)
                        node0 = nrml.read(path)[0]
                        sourceconverter.update_source_model(node0, path)
                        with open(path, 'wb') as f:
                            nrml.write([node0], f, gml=True)
                elif 'nrml/0.4' in xmlns and ('vulnerability' in tag
                                              or 'fragility' in tag
                                              or 'sourceModel' in tag):
                    if not dry_run:
                        print('Upgrading', path)
                        try:
                            upgrade_file(path, multipoint)
                        except Exception as exc:
                            raise
                            print(exc)
                    else:
                        print('Not upgrading', path)
예제 #35
0
def get_input_files(oqparam, hazard=False):
    """
    :param oqparam: an OqParam instance
    :param hazard: if True, consider only the hazard files
    :returns: input path names in a specific order
    """
    fnames = set()  # files entering in the checksum
    uri = oqparam.shakemap_uri
    if isinstance(uri, dict) and uri:
        # local files
        for key, val in uri.items():
            if key == 'fname' or key.endswith('_url'):
                val = val.replace('file://', '')
                fname = os.path.join(oqparam.base_path, val)
                if os.path.exists(fname):
                    uri[key] = fname
                    fnames.add(fname)
        # additional separate shapefiles
        if uri['kind'] == 'shapefile' and not uri['fname'].endswith('.zip'):
            fnames.update(get_shapefiles(os.path.dirname(fname)))

    for key in oqparam.inputs:
        fname = oqparam.inputs[key]
        if hazard and key not in ('source_model_logic_tree', 'gsim_logic_tree',
                                  'source'):
            continue
        # collect .hdf5 tables for the GSIMs, if any
        elif key == 'gsim_logic_tree':
            gsim_lt = get_gsim_lt(oqparam)
            for gsims in gsim_lt.values.values():
                for gsim in gsims:
                    for k, v in gsim.kwargs.items():
                        if k.endswith(('_file', '_table')):
                            fnames.add(v)
            fnames.add(fname)
        elif key == 'source_model':  # UCERF
            f = oqparam.inputs['source_model']
            fnames.add(f)
            fname = nrml.read(f).sourceModel.UCERFSource['filename']
            fnames.add(os.path.join(os.path.dirname(f), fname))
        elif key == 'exposure':  # fname is a list
            for exp in asset.Exposure.read_headers(fname):
                fnames.update(exp.datafiles)
            fnames.update(fname)
        elif isinstance(fname, dict):
            fnames.update(fname.values())
        elif isinstance(fname, list):
            for f in fname:
                if f == oqparam.input_dir:
                    raise InvalidFile('%s there is an empty path in %s' %
                                      (oqparam.inputs['job_ini'], key))
            fnames.update(fname)
        elif key == 'source_model_logic_tree':
            args = (fname, oqparam.random_seed,
                    oqparam.number_of_logic_tree_samples,
                    oqparam.sampling_method)
            try:
                smlt = smlt_cache[args]
            except KeyError:
                smlt = smlt_cache[args] = logictree.SourceModelLogicTree(*args)
            fnames.update(smlt.hdf5_files)
            fnames.update(smlt.info.smpaths)
            fnames.add(fname)
        else:
            fnames.add(fname)
    return sorted(fnames)
예제 #36
0
파일: asset.py 프로젝트: gem/oq-engine
def _get_exposure(fname, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except AttributeError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except AttributeError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = exposure.occupancyPeriods.text or ''
    except AttributeError:
        occupancy_periods = ''
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ~tagNames or []
    if set(tagnames) & {'taxonomy', 'exposure', 'country'}:
        raise InvalidFile('taxonomy, exposure and country are reserved names '
                          'you cannot use it in <tagNames>: %s' % fname)
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    retrofitted = False
    for ct in conversions.costTypes:
        with context(fname, ct):
            ctname = ct['name']
            if ctname == 'structural' and 'retrofittedType' in ct.attrib:
                if ct['retrofittedType'] != ct['type']:
                    raise ValueError(
                        'The retrofittedType %s is different from the type'
                        '%s' % (ct['retrofittedType'], ct['type']))
                if ct['retrofittedUnit'] != ct['unit']:
                    raise ValueError(
                        'The retrofittedUnit %s is different from the unit'
                        '%s' % (ct['retrofittedUnit'], ct['unit']))
                retrofitted = True
            cost_types.append(
                (ctname, valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    insurance_limit_is_absolute = il = inslimit.get('isAbsolute')
    deductible_is_absolute = de = deductible.get('isAbsolute')
    cc = CostCalculator(
        {}, {}, {},
        True if de is None else de,
        True if il is None else il,
        {name: i for i, name in enumerate(tagnames)},
    )
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    assets = []
    asset_refs = []
    exp = Exposure(
        exposure['id'], exposure['category'],
        description.text, cost_types, occupancy_periods,
        insurance_limit_is_absolute, deductible_is_absolute, retrofitted,
        area.attrib, assets, asset_refs, cc, TagCollection(tagnames))
    assets_text = exposure.assets.text.strip()
    if assets_text:
        # the <assets> tag contains a list of file names
        dirname = os.path.dirname(fname)
        exp.datafiles = [os.path.join(dirname, f) for f in assets_text.split()]
    else:
        exp.datafiles = []
    return exp, exposure.assets
예제 #37
0
파일: info.py 프로젝트: jbyronar/oq-engine
def main(what, report=False):
    """
    Give information about the passed keyword or filename
    """
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    if what == 'calculators':
        for calc in sorted(base.calculators):
            print(calc)
    elif what == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif what == 'imts':
        for im in gen_subclasses(imt.IMT):
            print(im.__name__)
    elif what == 'views':
        for name in sorted(view):
            print(name)
    elif what == 'exports':
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    elif what == 'extracts':
        for key in extract:
            func = extract[key]
            if hasattr(func, '__wrapped__'):
                fm = FunctionMaker(func.__wrapped__)
            elif hasattr(func, 'func'):  # for partial objects
                fm = FunctionMaker(func.func)
            else:
                fm = FunctionMaker(func)
            print('%s(%s)%s' % (fm.name, fm.signature, fm.doc))
    elif what == 'parameters':
        params = []
        for val in vars(OqParam).values():
            if hasattr(val, 'name'):
                params.append(val)
        params.sort(key=lambda x: x.name)
        for param in params:
            print(param.name)
    elif what == 'mfds':
        for cls in gen_subclasses(BaseMFD):
            print(cls.__name__)
    elif what == 'sources':
        for cls in gen_subclasses(BaseSeismicSource):
            print(cls.__name__)
    elif os.path.isdir(what) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(what)
        print(mon)
    elif what.endswith('.xml'):
        node = nrml.read(what)
        if node[0].tag.endswith('sourceModel'):
            print(source_model_info([node]))
        elif node[0].tag.endswith('logicTree'):
            sm_nodes = []
            for smpath in logictree.collect_info(what).smpaths:
                sm_nodes.append(nrml.read(smpath))
            print(source_model_info(sm_nodes))
        else:
            print(node.to_str())
    elif what.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(what))
            else:
                print(readinput.get_oqparam(what).json())
        if mon.duration > 1:
            print(mon)
    elif what:
        print("No info for '%s'" % what)
예제 #38
0
 def setUp(self):
     self.expected_source = nrml.read(
         os.path.join(BASE_DATA_PATH, "collapse_test_output.xml"))[0]
     self.src = self.expected_source[0][0]
예제 #39
0
def get_source_models(oqparam, gsim_lt, source_model_lt, in_memory=True):
    """
    Build all the source models generated by the logic tree.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param gsim_lt:
        a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
    :param source_model_lt:
        a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
    :param in_memory:
        if True, keep in memory the sources, else just collect the TRTs
    :returns:
        an iterator over :class:`openquake.commonlib.logictree.SourceModel`
        tuples
    """
    converter = sourceconverter.SourceConverter(
        oqparam.investigation_time, oqparam.rupture_mesh_spacing,
        oqparam.complex_fault_mesh_spacing, oqparam.width_of_mfd_bin,
        oqparam.area_source_discretization)
    psr = nrml.SourceModelParser(converter)

    # consider only the effective realizations
    for sm in source_model_lt.gen_source_models(gsim_lt):
        src_groups = []
        for name in sm.name.split():
            fname = possibly_gunzip(
                os.path.abspath(os.path.join(oqparam.base_path, name)))
            if in_memory:
                apply_unc = source_model_lt.make_apply_uncertainties(sm.path)
                try:
                    logging.info('Parsing %s', fname)
                    src_groups.extend(psr.parse_src_groups(fname, apply_unc))
                except ValueError as e:
                    if str(e) in ('Surface does not conform with Aki & '
                                  'Richards convention',
                                  'Edges points are not in the right order'):
                        raise InvalidFile('''\
        %s: %s. Probably you are using an obsolete model.
        In that case you can fix the file with the command
        python -m openquake.engine.tools.correct_complex_sources %s
        ''' % (fname, e, fname))
                    else:
                        raise
            else:  # just collect the TRT models
                smodel = nrml.read(fname).sourceModel
                if smodel[0].tag.endswith('sourceGroup'):  # NRML 0.5 format
                    for sg_node in smodel:
                        sg = sourceconverter.SourceGroup(
                            sg_node['tectonicRegion'])
                        sg.sources = sg_node.nodes
                        src_groups.append(sg)
                else:  # NRML 0.4 format: smodel is a list of source nodes
                    src_groups.extend(
                        sourceconverter.SourceGroup.collect(smodel))
        num_sources = sum(len(sg.sources) for sg in src_groups)
        sm.src_groups = src_groups
        trts = [mod.trt for mod in src_groups]
        source_model_lt.tectonic_region_types.update(trts)
        logging.info(
            'Processed source model %d with %d potential gsim path(s) and %d '
            'sources', sm.ordinal + 1, sm.num_gsim_paths, num_sources)

        gsim_file = oqparam.inputs.get('gsim_logic_tree')
        if gsim_file:  # check TRTs
            for src_group in src_groups:
                if src_group.trt not in gsim_lt.values:
                    raise ValueError(
                        "Found in %r a tectonic region type %r inconsistent "
                        "with the ones in %r" % (sm, src_group.trt, gsim_file))
        yield sm

    # log if some source file is being used more than once
    for fname, hits in psr.fname_hits.items():
        if hits > 1:
            logging.info('%s has been considered %d times', fname, hits)
예제 #40
0
    for col_str in header:
        col = col_str.strip().split(':')
        n = len(col)
        if n == 1:  # default dtype and no shape
            col = [col[0], 'float32', '']
        elif n == 2:
            if castable_to_int(col[1]):  # default dtype and shape
                col = [col[0], 'float32', col[1]]
            else:  # dtype and no shape
                col = [col[0], col[1], '']
        elif n > 3:
            raise ValueError('Invalid column description: %s' % col_str)
        field = col[0]
        numpytype = col[1]
        shape = () if not col[2].strip() else (int(col[2]),)
        triples.append((field, numpytype, shape))
        fields.append(field)
    return fields, numpy.dtype(triples)


if __name__ == '__main__':  # pretty print of NRML files
    import sys
    import shutil
    from openquake.hazardlib import nrml
    nrmlfiles = sys.argv[1:]
    for fname in nrmlfiles:
        node = nrml.read(fname)
        shutil.copy(fname, fname + '.bak')
        with open(fname, 'w') as out:
            nrml.write(list(node), out)
예제 #41
0
def get_site_model(oqparam):
    """
    Convert the NRML file into an array of site parameters.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an array with fields lon, lat, vs30, ...
    """
    req_site_params = get_gsim_lt(oqparam).req_site_params
    if 'amplification' in oqparam.inputs:
        req_site_params.add('ampcode')
    arrays = []
    for fname in oqparam.inputs['site_model']:
        if isinstance(fname, str) and fname.endswith('.csv'):
            sm = hdf5.read_csv(fname, site.site_param_dt).array
            sm['lon'] = numpy.round(sm['lon'], 5)
            sm['lat'] = numpy.round(sm['lat'], 5)
            dupl = get_duplicates(sm, 'lon', 'lat')
            if dupl:
                raise InvalidFile(
                    'Found duplicate sites %s in %s' % (dupl, fname))
            if 'site_id' in sm.dtype.names:
                raise InvalidFile('%s: you passed a sites.csv file instead of '
                                  'a site_model.csv file!' % fname)
            z = numpy.zeros(len(sm), sorted(sm.dtype.descr))
            for name in z.dtype.names:  # reorder the fields
                z[name] = sm[name]
            arrays.append(z)
            continue
        nodes = nrml.read(fname).siteModel
        params = [valid.site_param(node.attrib) for node in nodes]
        missing = req_site_params - set(params[0])
        if 'vs30measured' in missing:  # use a default of False
            missing -= {'vs30measured'}
            for param in params:
                param['vs30measured'] = False
        if 'backarc' in missing:  # use a default of False
            missing -= {'backarc'}
            for param in params:
                param['backarc'] = False
        if 'ampcode' in missing:  # use a default of b''
            missing -= {'ampcode'}
            for param in params:
                param['ampcode'] = b''
        if missing:
            raise InvalidFile('%s: missing parameter %s' %
                              (oqparam.inputs['site_model'],
                               ', '.join(missing)))
        # NB: the sorted in sorted(params[0]) is essential, otherwise there is
        # an heisenbug in scenario/test_case_4
        site_model_dt = numpy.dtype([(p, site.site_param_dt[p])
                                     for p in sorted(params[0])])
        sm = numpy.array([tuple(param[name] for name in site_model_dt.names)
                          for param in params], site_model_dt)
        dupl = "\n".join(
            '%s %s' % loc for loc, n in countby(sm, 'lon', 'lat').items()
            if n > 1)
        if dupl:
            raise InvalidFile('There are duplicated sites in %s:\n%s' %
                              (fname, dupl))
        arrays.append(sm)
    return numpy.concatenate(arrays)
예제 #42
0
    def test_nrml(self):
        # can read and write a NRML file converted into a Node object
        xmlfile = io.BytesIO(b"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
  <exposureModel
      id="my_exposure_model_for_population"
      category="population"
      taxonomySource="fake population datasource">

    <description>
      Sample population
    </description>

    <assets>
      <asset id="asset_01" number="7" taxonomy="IT-PV">
          <location lon="9.15000" lat="45.16667" />
      </asset>

      <asset id="asset_02" number="7" taxonomy="IT-CE">
          <location lon="9.15333" lat="45.12200" />
      </asset>
    </assets>
  </exposureModel>
</nrml>
""")
        root = read(xmlfile)

        tag, version = get_tag_version(root[0])
        self.assertEqual(tag, 'exposureModel')
        self.assertEqual(version, 'nrml/0.4')

        outfile = io.BytesIO()
        node_to_xml(root, outfile, {})

        expected = b"""\
<?xml version="1.0" encoding="utf-8"?>
<nrml
xmlns="http://openquake.org/xmlns/nrml/0.4"
xmlns:gml="http://www.opengis.net/gml"
>
    <exposureModel
    category="population"
    id="my_exposure_model_for_population"
    taxonomySource="fake population datasource"
    >
        <description>
            Sample population
        </description>
        <assets>
            <asset
            id="asset_01"
            number="7.000000000E+00"
            taxonomy="IT-PV"
            >
                <location lat="4.516667000E+01" lon="9.150000000E+00"/>
            </asset>
            <asset
            id="asset_02"
            number="7.000000000E+00"
            taxonomy="IT-CE"
            >
                <location lat="4.512200000E+01" lon="9.153330000E+00"/>
            </asset>
        </assets>
    </exposureModel>
</nrml>
"""
        self.assertEqual(outfile.getvalue(), expected)
예제 #43
0
def _get_exposure(fname, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = exposure.occupancyPeriods.text or ''
    except AttributeError:
        occupancy_periods = ''
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ['id'] + (~tagNames or [])
    if set(tagnames) & {'taxonomy', 'exposure', 'country'}:
        raise InvalidFile('taxonomy, exposure and country are reserved names '
                          'you cannot use it in <tagNames>: %s' % fname)
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    retrofitted = False
    for ct in conversions.costTypes:
        with context(fname, ct):
            ctname = ct['name']
            if ctname == 'structural' and 'retrofittedType' in ct.attrib:
                if ct['retrofittedType'] != ct['type']:
                    raise ValueError(
                        'The retrofittedType %s is different from the type'
                        '%s' % (ct['retrofittedType'], ct['type']))
                if ct['retrofittedUnit'] != ct['unit']:
                    raise ValueError(
                        'The retrofittedUnit %s is different from the unit'
                        '%s' % (ct['retrofittedUnit'], ct['unit']))
                retrofitted = True
            cost_types.append(
                (ctname, valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    cc = CostCalculator({}, {}, {},
                        {name: i
                         for i, name in enumerate(tagnames)})
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    exp = Exposure(exposure['id'], exposure['category'], description.text,
                   cost_types, occupancy_periods, retrofitted, area.attrib, [],
                   [], cc, TagCollection(tagnames))
    assets_text = exposure.assets.text.strip()
    if assets_text:
        # the <assets> tag contains a list of file names
        dirname = os.path.dirname(fname)
        exp.datafiles = [os.path.join(dirname, f) for f in assets_text.split()]
    else:
        exp.datafiles = []
    return exp, exposure.assets
예제 #44
0
def _get_exposure(fname, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except AttributeError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except AttributeError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = exposure.occupancyPeriods.text or ''
    except AttributeError:
        occupancy_periods = ''
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ~tagNames or []
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    retrofitted = False
    for ct in conversions.costTypes:
        with context(fname, ct):
            ctname = ct['name']
            if ctname == 'structural' and 'retrofittedType' in ct.attrib:
                if ct['retrofittedType'] != ct['type']:
                    raise ValueError(
                        'The retrofittedType %s is different from the type'
                        '%s' % (ct['retrofittedType'], ct['type']))
                if ct['retrofittedUnit'] != ct['unit']:
                    raise ValueError(
                        'The retrofittedUnit %s is different from the unit'
                        '%s' % (ct['retrofittedUnit'], ct['unit']))
                retrofitted = True
            cost_types.append(
                (ctname, valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    insurance_limit_is_absolute = il = inslimit.get('isAbsolute')
    deductible_is_absolute = de = deductible.get('isAbsolute')
    cc = CostCalculator(
        {},
        {},
        {},
        True if de is None else de,
        True if il is None else il,
        {name: i
         for i, name in enumerate(tagnames)},
    )
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    assets = []
    asset_refs = []
    exp = Exposure(exposure['id'], exposure['category'], description.text,
                   cost_types, occupancy_periods, insurance_limit_is_absolute,
                   deductible_is_absolute, retrofitted, area.attrib, assets,
                   asset_refs, cc, TagCollection(tagnames))
    return exp, exposure.assets
예제 #45
0
def info(calculators, gsims, views, exports, extracts, report, input_file=''):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    if calculators:
        for calc in sorted(base.calculators):
            print(calc)
    if gsims:
        for gs in gsim.get_available_gsims():
            print(gs)
    if views:
        for name in sorted(view):
            print(name)
    if exports:
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    if extracts:
        for key in extract:
            func = extract[key]
            if hasattr(func, '__wrapped__'):
                fm = FunctionMaker(func.__wrapped__)
            else:
                fm = FunctionMaker(func)
            print('%s(%s)%s' % (fm.name, fm.signature, fm.doc))
    if os.path.isdir(input_file) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(input_file)
        print(mon)
    elif input_file.endswith('.xml'):
        node = nrml.read(input_file)
        if node[0].tag.endswith('sourceModel'):
            if node['xmlns'].endswith('nrml/0.4'):
                raise InvalidFile(
                    '%s is in NRML 0.4 format, please run the following '
                    'command:\noq upgrade_nrml %s' %
                    (input_file, os.path.dirname(input_file) or '.'))
            print(source_model_info([node[0]]))
        elif node[0].tag.endswith('logicTree'):
            nodes = [
                nrml.read(sm_path)[0]
                for sm_path in logictree.collect_info(input_file).smpaths
            ]
            print(source_model_info(nodes))
        else:
            print(node.to_str())
    elif input_file.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(input_file))
            else:
                print_csm_info(input_file)
        if mon.duration > 1:
            print(mon)
    elif input_file:
        print("No info for '%s'" % input_file)
예제 #46
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model.

    :param smlt_file: path to a source model logic tree file
    :param source_ids: dictionary source_id -> records (src_id, code)
    :param remove: if True, remove sm.xml files containing no sources
    :returns: the number of sources satisfying the filter vs the total
    """
    if isinstance(source_ids, dict):  # in oq reduce_sm
        def ok(src_node):
            code = tag2code[re.search(r'\}(\w\w)', src_node.tag).group(1)]
            arr = source_ids.get(src_node['id'])
            if arr is None:
                return False
            return (arr['code'] == code).any()
    else:  # list of source IDs, in extract_source
        def ok(src_node):
            return src_node['id'] in source_ids

    good, total = 0, 0
    to_remove = set()
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    total += 1
                    if ok(src_node):
                        good += 1
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        total += 1
                        if ok(src_node):
                            good += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.add(path)
    if good:
        for path in to_remove:
            os.remove(path)
    return good, total
예제 #47
0
# this is simple and without error checking for the moment
def read_array(fname, sep=','):
    r"""
    Convert a CSV file without header into a numpy array of floats.

    >>> from openquake.baselib.general import writetmp
    >>> print(read_array(writetmp('.1 .2, .3 .4, .5 .6\n')))
    [[[ 0.1  0.2]
      [ 0.3  0.4]
      [ 0.5  0.6]]]
    """
    with open(fname) as f:
        records = []
        for line in f:
            row = line.split(sep)
            record = [list(map(float, col.split())) for col in row]
            records.append(record)
        return numpy.array(records)


if __name__ == '__main__':  # pretty print of NRML files
    import sys
    import shutil
    from openquake.hazardlib import nrml
    nrmlfiles = sys.argv[1:]
    for fname in nrmlfiles:
        node = nrml.read(fname)
        shutil.copy(fname, fname + '.bak')
        with open(fname, 'w') as out:
            nrml.write(list(node), out)
예제 #48
0
    def test_nrml(self):
        # can read and write a NRML file converted into a Node object
        xmlfile = io.BytesIO(b"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
  <exposureModel
      id="my_exposure_model_for_population"
      category="population"
      taxonomySource="fake population datasource">

    <description>
      Sample population
    </description>

    <assets>
      <asset id="asset_01" number="7" taxonomy="IT-PV">
          <location lon="9.15000" lat="45.16667" />
      </asset>

      <asset id="asset_02" number="7" taxonomy="IT-CE">
          <location lon="9.15333" lat="45.12200" />
      </asset>
    </assets>
  </exposureModel>
</nrml>
""")
        root = read(xmlfile)

        tag, version = get_tag_version(root[0])
        self.assertEqual(tag, 'exposureModel')
        self.assertEqual(version, 'nrml/0.4')

        outfile = io.BytesIO()
        node_to_xml(root, outfile, {})

        expected = b"""\
<?xml version="1.0" encoding="utf-8"?>
<nrml
xmlns="http://openquake.org/xmlns/nrml/0.4"
xmlns:gml="http://www.opengis.net/gml"
>
    <exposureModel
    category="population"
    id="my_exposure_model_for_population"
    taxonomySource="fake population datasource"
    >
        <description>
            Sample population
        </description>
        <assets>
            <asset
            id="asset_01"
            number="7.000000000E+00"
            taxonomy="IT-PV"
            >
                <location lat="4.516667000E+01" lon="9.150000000E+00"/>
            </asset>
            <asset
            id="asset_02"
            number="7.000000000E+00"
            taxonomy="IT-CE"
            >
                <location lat="4.512200000E+01" lon="9.153330000E+00"/>
            </asset>
        </assets>
    </exposureModel>
</nrml>
"""
        self.assertEqual(outfile.getvalue(), expected)
예제 #49
0
def expo2csv(job_ini):
    """
    Convert an exposure in XML format into CSV format
    """
    oq = readinput.get_oqparam(job_ini)
    exposure = readinput.get_exposure(oq)
    rows = []
    header = ['id', 'lon', 'lat', 'number']
    area = exposure.area['type'] != '?'
    if area:
        header.append('area')
    for costname in exposure.cost_types['name']:
        if costname != 'occupants':
            header.append(costname)
            if exposure.deductible_is_absolute is not None:
                header.append(costname + '-deductible')
            if exposure.insurance_limit_is_absolute is not None:
                header.append(costname + '-insured_limit')
    if exposure.retrofitted:
        header.append('retrofitted')
    header.extend(exposure.occupancy_periods)
    header.extend(exposure.tagcol.tagnames)
    for asset, asset_ref in zip(exposure.assets, exposure.asset_refs):
        row = [asset_ref.decode('utf8'), asset.location[0], asset.location[1],
               asset.number]
        if area:
            row.append(asset.area)
        for costname in exposure.cost_types['name']:
            if costname != 'occupants':
                row.append(asset.values[costname])
                if exposure.deductible_is_absolute is not None:
                    row.append(asset.deductibles[costname])
                if exposure.insurance_limit_is_absolute is not None:
                    row.append(asset.insurance_limits[costname])
        if exposure.retrofitted:
            row.append(asset._retrofitted)
        for time_event in exposure.occupancy_periods:
            row.append(asset.values['occupants_' + time_event])
        for tagname, tagidx in zip(exposure.tagcol.tagnames, asset.tagidxs):
            tags = getattr(exposure.tagcol, tagname)
            row.append(tags[tagidx])
        rows.append(row)

    with performance.Monitor('expo2csv') as mon:
        # save exposure data as csv
        csvname = oq.inputs['exposure'].replace('.xml', '.csv')
        print('Saving %s' % csvname)
        with codecs.open(csvname, 'wb', encoding='utf8') as f:
            writer = csv.writer(f)
            writer.writerow(header)
            for row in rows:
                writer.writerow(row)

        # save exposure header as xml
        head = nrml.read(oq.inputs['exposure'], stop='assets')
        xmlname = oq.inputs['exposure'].replace('.xml', '-header.xml')
        print('Saving %s' % xmlname)
        head[0].assets.text = os.path.basename(csvname)
        with open(xmlname, 'wb') as f:
            nrml.write(head, f)
    print(mon)
예제 #50
0
def get_source_models(oqparam, gsim_lt, source_model_lt, in_memory=True):
    """
    Build all the source models generated by the logic tree.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param gsim_lt:
        a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
    :param source_model_lt:
        a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
    :param in_memory:
        if True, keep in memory the sources, else just collect the TRTs
    :returns:
        an iterator over :class:`openquake.commonlib.logictree.LtSourceModel`
        tuples
    """
    converter = sourceconverter.SourceConverter(
        oqparam.investigation_time, oqparam.rupture_mesh_spacing,
        oqparam.complex_fault_mesh_spacing, oqparam.width_of_mfd_bin,
        oqparam.area_source_discretization)
    psr = nrml.SourceModelParser(converter)

    # consider only the effective realizations
    smlt_dir = os.path.dirname(source_model_lt.filename)
    for sm in source_model_lt.gen_source_models(gsim_lt):
        src_groups = []
        for name in sm.names.split():
            fname = os.path.abspath(os.path.join(smlt_dir, name))
            if in_memory:
                apply_unc = source_model_lt.make_apply_uncertainties(sm.path)
                logging.info('Reading %s', fname)
                src_groups.extend(psr.parse_src_groups(fname, apply_unc))
            else:  # just collect the TRT models
                smodel = nrml.read(fname).sourceModel
                if smodel[0].tag.endswith('sourceGroup'):  # NRML 0.5 format
                    for sg_node in smodel:
                        sg = sourceconverter.SourceGroup(
                            sg_node['tectonicRegion'])
                        sg.sources = sg_node.nodes
                        src_groups.append(sg)
                else:  # NRML 0.4 format: smodel is a list of source nodes
                    src_groups.extend(
                        sourceconverter.SourceGroup.collect(smodel))
        num_sources = sum(len(sg.sources) for sg in src_groups)
        sm.src_groups = src_groups
        trts = [mod.trt for mod in src_groups]
        source_model_lt.tectonic_region_types.update(trts)
        logging.info(
            'Processed source model %d with %d potential gsim path(s) and %d '
            'sources', sm.ordinal + 1, sm.num_gsim_paths, num_sources)

        gsim_file = oqparam.inputs.get('gsim_logic_tree')
        if gsim_file:  # check TRTs
            for src_group in src_groups:
                if src_group.trt not in gsim_lt.values:
                    raise ValueError(
                        "Found in %r a tectonic region type %r inconsistent "
                        "with the ones in %r" % (sm, src_group.trt, gsim_file))
        yield sm

    # check investigation_time
    psr.check_nonparametric_sources(oqparam.investigation_time)

    # log if some source file is being used more than once
    dupl = 0
    for fname, hits in psr.fname_hits.items():
        if hits > 1:
            logging.info('%s has been considered %d times', fname, hits)
            if not psr.changed_sources:
                dupl += hits
    if dupl and not oqparam.optimize_same_id_sources:
        logging.warn('You are doing redundant calculations: please make sure '
                     'that different sources have different IDs and set '
                     'optimize_same_id_sources=true in your .ini file')
예제 #51
0
def import_exposure_file(nrml_file):
    """
    Import exposure from a NRML file
    """
    return import_exposure_model(read(nrml_file).exposureModel, nrml_file)
예제 #52
0
파일: info.py 프로젝트: pheresi/oq-engine
def main(what, report=False):
    """
    Give information about the passed keyword or filename
    """
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    if what == 'calculators':
        for calc in sorted(base.calculators):
            print(calc)
    elif what == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif what == 'portable_gsims':
        for gs in gsim.get_portable_gsims():
            print(gs)
    elif what == 'imts':
        for im in vars(imt).values():
            if inspect.isfunction(im) and is_upper(im):
                print(im.__name__)
    elif what == 'views':
        for name in sorted(view):
            print(name)
    elif what == 'exports':
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        items = [(DISPLAY_NAME.get(exporter, '?'), exporter, formats)
                 for exporter, formats in dic.items()]
        n = 0
        for dispname, exporter, formats in sorted(items):
            print(dispname, '"%s"' % exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    elif what == 'extracts':
        for key in extract:
            func = extract[key]
            if hasattr(func, '__wrapped__'):
                fm = FunctionMaker(func.__wrapped__)
            elif hasattr(func, 'func'):  # for partial objects
                fm = FunctionMaker(func.func)
            else:
                fm = FunctionMaker(func)
            print('%s(%s)%s' % (fm.name, fm.signature, fm.doc))
    elif what == 'parameters':
        docs = OqParam.docs()
        names = set()
        for val in vars(OqParam).values():
            if hasattr(val, 'name'):
                names.add(val.name)
        params = sorted(names)
        for param in params:
            print(param)
            print(docs[param])
    elif what == 'mfds':
        for cls in gen_subclasses(BaseMFD):
            print(cls.__name__)
    elif what == 'venv':
        print(sys.prefix)
    elif what == 'sources':
        for cls in gen_subclasses(BaseSeismicSource):
            print(cls.__name__)
    elif what == 'consequences':
        known = scientific.KNOWN_CONSEQUENCES
        print('The following %d consequences are implemented:' % len(known))
        for cons in known:
            print(cons)
    elif os.path.isdir(what) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(what)
        print(mon)
    elif what.endswith('.xml'):
        node = nrml.read(what)
        if node[0].tag.endswith('sourceModel'):
            print(source_model_info([node]))
        elif node[0].tag.endswith('logicTree'):
            bset = node[0][0]
            if bset.tag.endswith("logicTreeBranchingLevel"):
                bset = bset[0]
            if bset.attrib['uncertaintyType'] == 'sourceModel':
                sm_nodes = []
                for smpath in logictree.collect_info(what).smpaths:
                    sm_nodes.append(nrml.read(smpath))
                print(source_model_info(sm_nodes))
            elif bset.attrib['uncertaintyType'] == 'gmpeModel':
                print(logictree.GsimLogicTree(what))
        else:
            print(node.to_str())
    elif what.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(what))
            else:
                print(readinput.get_oqparam(what).json())
        if mon.duration > 1:
            print(mon)
    elif what:
        print("No info for '%s'" % what)
예제 #53
0
def info(calculators, gsims, views, exports, extracts, parameters,
         report, input_file=''):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    if calculators:
        for calc in sorted(base.calculators):
            print(calc)
    if gsims:
        for gs in gsim.get_available_gsims():
            print(gs)
    if views:
        for name in sorted(view):
            print(name)
    if exports:
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    if extracts:
        for key in extract:
            func = extract[key]
            if hasattr(func, '__wrapped__'):
                fm = FunctionMaker(func.__wrapped__)
            else:
                fm = FunctionMaker(func)
            print('%s(%s)%s' % (fm.name, fm.signature, fm.doc))
    if parameters:
        params = []
        for val in vars(OqParam).values():
            if hasattr(val, 'name'):
                params.append(val)
        params.sort(key=lambda x: x.name)
        for param in params:
            print(param.name)
    if os.path.isdir(input_file) and report:
        with Monitor('info', measuremem=True) as mon:
            with mock.patch.object(logging.root, 'info'):  # reduce logging
                do_build_reports(input_file)
        print(mon)
    elif input_file.endswith('.xml'):
        node = nrml.read(input_file)
        if node[0].tag.endswith('sourceModel'):
            if node['xmlns'].endswith('nrml/0.4'):
                raise InvalidFile(
                    '%s is in NRML 0.4 format, please run the following '
                    'command:\noq upgrade_nrml %s' % (
                        input_file, os.path.dirname(input_file) or '.'))
            print(source_model_info([node[0]]))
        elif node[0].tag.endswith('logicTree'):
            nodes = [nrml.read(sm_path)[0]
                     for sm_path in logictree.collect_info(input_file).smpaths]
            print(source_model_info(nodes))
        else:
            print(node.to_str())
    elif input_file.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(input_file))
            else:
                print_csm_info(input_file)
        if mon.duration > 1:
            print(mon)
    elif input_file:
        print("No info for '%s'" % input_file)
예제 #54
0
def _get_exposure(fname, ok_cost_types, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param ok_cost_types:
        a set of cost types (as strings)
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except AttributeError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except AttributeError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))

    # read the cost types and make some check
    cost_types = []
    for ct in conversions.costTypes:
        if ct['name'] in ok_cost_types:
            with context(fname, ct):
                cost_types.append(
                    (ct['name'], valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in ok_cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    insurance_limit_is_absolute = inslimit.attrib.get('isAbsolute', True)
    deductible_is_absolute = deductible.attrib.get('isAbsolute', True)
    time_events = set()
    cc = riskmodels.CostCalculator({}, {}, {}, deductible_is_absolute,
                                   insurance_limit_is_absolute)
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    exp = Exposure(exposure['id'], exposure['category'], ~description,
                   cost_types, time_events, insurance_limit_is_absolute,
                   deductible_is_absolute, area.attrib, [], set(), [], cc)
    return exp, exposure.assets