Beispiel #1
0
def check_export(output_id, target):
    """
    Call export by checking that the exported file is valid
    """
    out_file = core.export(output_id, target, 'xml')
    nrml.read(out_file)
    return out_file
Beispiel #2
0
    def test_ill_formed_rupture(self):
        rup_file = BytesIO(b'''\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml"
      xmlns="http://openquake.org/xmlns/nrml/0.4">
    <simpleFaultRupture>
        <magnitude>7.65</magnitude>
        <rake>15.0</rake>
        <hypocenter lon="0.0" lat="0.0" depth="-5.0"/>
        <simpleFaultGeometry>
                <gml:LineString>
                    <gml:posList>
                        -124.704 40.363
                        -124.977 41.214
                        -125.140 42.096
                    </gml:posList>
                </gml:LineString>
            <dip>50.0</dip>
            <upperSeismoDepth>12.5</upperSeismoDepth>
            <lowerSeismoDepth>19.5</lowerSeismoDepth>
        </simpleFaultGeometry>
    </simpleFaultRupture>
</nrml>
''')

        # at line 7 there is an invalid depth="-5.0"
        with self.assertRaises(ValueError) as ctx:
            nrml.read(rup_file)
        self.assertIn('line 7', str(ctx.exception))
def upgrade_file(path):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    tag = striptag(node0.tag)
    gml = True
    if tag == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        # below I am converting into a NRML 0.5 vulnerabilityModel
        node0 = Node(
            'vulnerabilityModel', cat_dict,
            nodes=list(map(riskmodels.obj_to_node, vf_dict.values())))
        gml = False
    elif tag == 'fragilityModel':
        node0 = riskmodels.convert_fragility_model_04(
            nrml.read(path)[0], path)
        gml = False
    elif tag == 'sourceModel':
        node0 = nrml.read(path)[0]
        dic = groupby(node0.nodes, operator.itemgetter('tectonicRegion'))
        node0.nodes = [Node('sourceGroup',
                            dict(tectonicRegion=trt, name="group %s" % i),
                            nodes=srcs)
                       for i, (trt, srcs) in enumerate(dic.items(), 1)]
    with open(path, 'w') as f:
        nrml.write([node0], f, gml=gml)
Beispiel #4
0
def check_export(output_id, target):
    """
    Call export by checking that the exported file is valid
    """
    out_file = core.export(output_id, target, 'xml')
    nrml.read(out_file)
    return out_file
Beispiel #5
0
def validate_nrml(request):
    """
    Leverage oq-risklib to check if a given XML text is a valid NRML

    :param request:
        a `django.http.HttpRequest` object containing the mandatory
        parameter 'xml_text': the text of the XML to be validated as NRML

    :returns: a JSON object, containing:
        * 'valid': a boolean indicating if the provided text is a valid NRML
        * 'error_msg': the error message, if any error was found
                       (None otherwise)
        * 'error_line': line of the given XML where the error was found
                        (None if no error was found or if it was not a
                        validation error)
    """
    xml_text = request.POST.get('xml_text')
    if not xml_text:
        return HttpResponseBadRequest(
            'Please provide the "xml_text" parameter')
    xml_file = writetmp(xml_text, suffix='.xml')
    try:
        nrml.read(xml_file)
    except etree.ParseError as exc:
        return _make_response(error_msg=exc.message.message,
                              error_line=exc.message.lineno,
                              valid=False)
    except Exception as exc:
        # get the exception message
        exc_msg = exc.args[0]
        if isinstance(exc_msg, bytes):
            exc_msg = exc_msg.decode('utf-8')   # make it a unicode object
        elif isinstance(exc_msg, unicode):
            pass
        else:
            # if it is another kind of object, it is not obvious a priori how
            # to extract the error line from it
            return _make_response(
                error_msg=unicode(exc_msg), error_line=None, valid=False)
        # if the line is not mentioned, the whole message is taken
        error_msg = exc_msg.split(', line')[0]
        # check if the exc_msg contains a line number indication
        search_match = re.search(r'line \d+', exc_msg)
        if search_match:
            error_line = int(search_match.group(0).split()[1])
        else:
            error_line = None
        return _make_response(
            error_msg=error_msg, error_line=error_line, valid=False)
    else:
        return _make_response(error_msg=None, error_line=None, valid=True)
def parse_nrml_hazard_map(nrml_hazard_map):
    """
    Reads the NRML file and returns the metadata as a dictionary and the value
    as a numpy array of [lon, lat, IML]
    """
    node_set = read(nrml_hazard_map).hazardMap
    metadata = {
        "imt": node_set.attrib["IMT"],
        "investigation_time": float(node_set.attrib["investigationTime"])}
    for option, name in OPTIONAL_PATHS:
        if name in node_set.attrib:
            metadata[option] = node_set.attrib[name]
        else:
            metadata[option] = None
    if "SA" in metadata["imt"]:
        imt_str = node_set.attrib['IMT']
        m = re.search('.*\((\d*\.\d*)\).*', imt_str)
        period = m.group(1)
        metadata["sa_period"] = period
        # TODO need to fix this after the damping will be added again to
        # nrml
        # metadata['sa_damping'] = node_set.attrib['saDamping']
        metadata['sa_damping'] = '5'
    values = []
    for node in node_set.nodes:
        values.append([float(node.attrib["lon"]),
                       float(node.attrib["lat"]),
                       float(node.attrib["iml"])])
    values = numpy.array(values)
    return metadata, values
Beispiel #7
0
 def test_well_formed_ruptures(self):
     converter = s.RuptureConverter(rupture_mesh_spacing=1.5,
                                    complex_fault_mesh_spacing=1.5)
     for fname in (SIMPLE_FAULT_RUPTURE, COMPLEX_FAULT_RUPTURE,
                   SINGLE_PLANE_RUPTURE, MULTI_PLANES_RUPTURE):
         [node] = nrml.read(fname)
         converter.convert_node(node)
Beispiel #8
0
 def test_alternative_mfds(self):
     converter = s.SourceConverter(
         investigation_time=1.,
         rupture_mesh_spacing=1,  # km
         complex_fault_mesh_spacing=5,  # km
         width_of_mfd_bin=0.1,  # for Truncated GR MFDs
         area_source_discretization=1.)
     grp_nodes = nrml.read(ALT_MFDS_SRC_MODEL).sourceModel.nodes
     [[sflt1, sflt2], [cplx1]] = map(converter.convert_node, grp_nodes)
     # Check the values
     # Arbitrary MFD
     assert_close(cplx1.mfd.magnitudes, [8.6, 8.8, 9.0])
     assert_close(cplx1.mfd.occurrence_rates, [0.0006, 0.0008, 0.0004])
     # Youngs & Coppersmith from characteristic rate
     self.assertAlmostEqual(sflt1.mfd.b_val, 1.0)
     self.assertAlmostEqual(sflt1.mfd.a_val, 3.3877843113)
     self.assertAlmostEqual(sflt1.mfd.char_mag, 7.0)
     self.assertAlmostEqual(sflt1.mfd.char_rate, 0.005)
     self.assertAlmostEqual(sflt1.mfd.min_mag, 5.0)
     # Youngs & Coppersmith from total moment rate
     self.assertAlmostEqual(sflt2.mfd.b_val, 1.0)
     self.assertAlmostEqual(sflt2.mfd.a_val, 5.0800, 3)
     self.assertAlmostEqual(sflt2.mfd.char_mag, 7.0)
     self.assertAlmostEqual(sflt2.mfd.char_rate, 0.24615, 5)
     self.assertAlmostEqual(sflt2.mfd.min_mag, 5.0)
Beispiel #9
0
def _info(name, filtersources, weightsources):
    if name in base.calculators:
        print(textwrap.dedent(base.calculators[name].__doc__.strip()))
    elif name == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif name.endswith('.xml'):
        print(nrml.read(name).to_str())
    elif name.endswith(('.ini', '.zip')):
        oqparam = readinput.get_oqparam(name)
        if 'exposure' in oqparam.inputs:
            expo = readinput.get_exposure(oqparam)
            sitecol, assets_by_site = readinput.get_sitecol_assets(
                oqparam, expo)
        elif filtersources or weightsources:
            sitecol = readinput.get_site_collection(oqparam)
        else:
            sitecol = None
        if 'source_model_logic_tree' in oqparam.inputs:
            print('Reading the source model...')
            if weightsources:
                sp = source.SourceFilterWeighter
            elif filtersources:
                sp = source.SourceFilter
            else:
                sp = source.BaseSourceProcessor  # do nothing
            csm = readinput.get_composite_source_model(oqparam, sitecol, sp)
            assoc = csm.get_rlzs_assoc()
            dstore = datastore.Fake(vars(oqparam),
                                    rlzs_assoc=assoc,
                                    composite_source_model=csm,
                                    sitecol=sitecol)
            _print_info(dstore, filtersources, weightsources)
    else:
        print("No info for '%s'" % name)
Beispiel #10
0
def get_hcurves_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        an XML file containing hazard curves
    :returns:
        sitecol, curve array
    """
    hcurves_by_imt = {}
    oqparam.hazard_imtls = imtls = collections.OrderedDict()
    for hcurves in nrml.read(fname):
        imt = hcurves['IMT']
        oqparam.investigation_time = hcurves['investigationTime']
        if imt == 'SA':
            imt += '(%s)' % hcurves['saPeriod']
        imtls[imt] = ~hcurves.IMLs
        data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:])
        hcurves_by_imt[imt] = numpy.array([d[1] for d in data])
    n = len(hcurves_by_imt[imt])
    curves = zero_curves(n, imtls)
    for imt in imtls:
        curves[imt] = hcurves_by_imt[imt]
    lons, lats = [], []
    for xy, poes in data:
        lons.append(xy[0])
        lats.append(xy[1])
    mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
    sitecol = get_site_collection(oqparam, mesh)
    return sitecol, curves
def upgrade_file(path):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    tag = striptag(node0.tag)
    if tag == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        # below I am converting into a NRML 0.5 vulnerabilityModel
        node0 = LiteralNode(
            'vulnerabilityModel', cat_dict,
            nodes=list(map(riskmodels.obj_to_node, list(vf_dict.values()))))
    elif tag == 'fragilityModel':
        node0 = riskmodels.convert_fragility_model_04(
            nrml.read(path)[0], path)
    with open(path, 'w') as f:
        nrml.write([node0], f)
Beispiel #12
0
def get_vulnerability_functions(fname):
    """
    :param fname:
        path of the vulnerability filter
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the vulnerabilitySetID is not an unique ID!
    # it is right to have several vulnerability sets with the same ID
    # the IMTs can also be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    node = nrml.read(fname)
    if node['xmlns'] == 'http://openquake.org/xmlns/nrml/0.5':
        vmodel = node[0]
        for vfun in vmodel[1:]:  # the first node is the description
            imt = vfun.imls['imt']
            imls = numpy.array(~vfun.imls)
            taxonomy = vfun['id']
            loss_ratios, probs = [], []
            for probabilities in vfun[1:]:
                loss_ratios.append(probabilities['lr'])
                probs.append(valid.probabilities(~probabilities))
            probs = numpy.array(probs)
            assert probs.shape == (len(loss_ratios), len(imls))
            vf_dict[imt, taxonomy] = scientific.VulnerabilityFunctionWithPMF(
                taxonomy, imt, imls, numpy.array(loss_ratios), probs)
        return vf_dict
    # otherwise, read the old format (NRML 0.4)
    for vset in read_nodes(fname, filter_vset,
                           nodefactory['vulnerabilityModel']):
        imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    return vf_dict
Beispiel #13
0
def info(calculators, gsims, views, exports, report, input_file=''):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    logging.basicConfig(level=logging.INFO)
    if calculators:
        for calc in sorted(base.calculators):
            print(calc)
    if gsims:
        for gs in gsim.get_available_gsims():
            print(gs)
    if views:
        for name in sorted(datastore.view):
            print(name)
    if exports:
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    if input_file.endswith('.xml'):
        print(nrml.read(input_file).to_str())
    elif input_file.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(input_file))
            else:
                print_csm_info(input_file)
        if mon.duration > 1:
            print(mon)
    elif input_file:
        print("No info for '%s'" % input_file)
Beispiel #14
0
def info(calculators, gsims, views, exports, report, input_file=''):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    logging.basicConfig(level=logging.INFO)
    if calculators:
        for calc in sorted(base.calculators):
            print(calc)
    if gsims:
        for gs in gsim.get_available_gsims():
            print(gs)
    if views:
        for name in sorted(datastore.view):
            print(name)
    if exports:
        dic = groupby(export, operator.itemgetter(0),
                      lambda group: [r[1] for r in group])
        n = 0
        for exporter, formats in dic.items():
            print(exporter, formats)
            n += len(formats)
        print('There are %d exporters defined.' % n)
    if input_file.endswith('.xml'):
        print(nrml.read(input_file).to_str())
    elif input_file.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(input_file))
            else:
                print_csm_info(input_file)
        if mon.duration > 1:
            print(mon)
    elif input_file:
        print("No info for '%s'" % input_file)
Beispiel #15
0
def event_set_to_rupture_xmls(input_ses, output_dir):
    """
    Parses the entire event set to a set of files
    """
    if os.path.exists(output_dir):
        raise IOError("Output directory %s already exists" % output_dir)
    else:
        os.mkdir(output_dir)
    nodeset = nrml.read(input_ses, chatty=False)
    for sesc in nodeset:
        sesc_dir = os.path.join(
            output_dir, 
            "smltp_{:s}".format(sesc["sourceModelTreePath"]))
        os.mkdir(sesc_dir)
        for i, ses in enumerate(sesc):
            ses_dir = os.path.join(sesc_dir, "ses_{:s}".format(str(ses["id"])))
            os.mkdir(ses_dir)
            for rupture in ses:
                print "Parsing event %s" % rupture["id"]
                if hasattr(rupture, "planarSurface"):
                    rupture_node = parse_planar_surface(rupture)
                elif hasattr(rupture, "mesh"):
                    rupture_node = parse_mesh_surface(rupture)
                rup_id = rupture["id"].replace("=", "_")
                filename = os.path.join(ses_dir,
                                        rup_id.replace("|", "_") + ".xml")
                with open(filename, "w") as f:
                    nrml.write([rupture_node], f, "%s")
Beispiel #16
0
def _get_exposure(fname, ok_cost_types, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param ok_cost_types:
        a set of cost types (as strings)
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    description = exposure.description
    try:
        conversions = exposure.conversions
    except NameError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except NameError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except NameError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except NameError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))

    # read the cost types and make some check
    cost_types = []
    for ct in conversions.costTypes:
        if ct['name'] in ok_cost_types:
            with context(fname, ct):
                cost_types.append(
                    (ct['name'], valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in ok_cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    time_events = set()
    exp = Exposure(
        exposure['id'], exposure['category'],
        ~description, numpy.array(cost_types, cost_type_dt), time_events,
        inslimit.attrib.get('isAbsolute', True),
        deductible.attrib.get('isAbsolute', True),
        area.attrib, [], set(), [])
    cc = riskmodels.CostCalculator(
        {}, {}, {},
        exp.deductible_is_absolute, exp.insurance_limit_is_absolute)
    for ct in exp.cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = exp.area['type']
        cc.units[name] = ct['unit']
    return exp, exposure.assets, cc
def read_hazard_curves(filename):
    """
    Reads the hazard curves from the NRML file and sorts the results
    into a dictionary of hazard curves information
    """
    node_set = read(filename)[0]
    hazard_curves = {
        "imt": node_set.attrib["IMT"],
        "investigation_time": node_set["investigationTime"],
        "imls": ~node_set.nodes[0]}
    for option, name in OPTIONAL_PATHS:
        if name in node_set.attrib:
            hazard_curves[option] = node_set.attrib[name]
        else:
            hazard_curves[option] = None
    n_curves = len(node_set.nodes) - 1
    locations = []
    poes = []
    for hc_node in node_set.nodes[1:]:
        # Get location info
        locations.append(~hc_node.nodes[0].nodes[0])
        # Get PoEs
        poes.append(hc_node.nodes[1].text)
    hazard_curves["curves"] = numpy.column_stack([numpy.array(locations),
                                                  numpy.array(poes)])
    return hazard_curves
Beispiel #18
0
def get_hcurves_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        an XML file containing hazard curves
    :returns:
        sitecol, curve array
    """
    hcurves_by_imt = {}
    oqparam.hazard_imtls = imtls = {}
    for hcurves in nrml.read(fname):
        imt = hcurves['IMT']
        oqparam.investigation_time = hcurves['investigationTime']
        if imt == 'SA':
            imt += '(%s)' % hcurves['saPeriod']
        imtls[imt] = ~hcurves.IMLs
        data = []
        for node in hcurves[1:]:
            xy = ~node.Point.pos
            poes = ~node.poEs
            data.append((xy, poes))
        data.sort()
        hcurves_by_imt[imt] = numpy.array([d[1] for d in data])
    n = len(hcurves_by_imt[imt])
    curves = zero_curves(n, imtls)
    for imt in imtls:
        curves[imt] = hcurves_by_imt[imt]
    lons, lats = [], []
    for xy, poes in data:
        lons.append(xy[0])
        lats.append(xy[1])
    mesh = geo.Mesh(numpy.array(lons), numpy.array(lats))
    sitecol = get_site_collection(oqparam, mesh)
    return sitecol, curves
def xml_to_csv(input_xml, output_csv):
    """
    Parses the site model from an input xml file to a headed csv file
    """
    # Read in from XML
    sites = nrml.read(input_xml).siteModel
    fid = open(output_csv, "w")
    print >> fid, "%s" % "longitude,latitude,vs30,vs30Type,z1pt0,z2pt5,backarc"
    for site in sites:
        if "backarc" in site.attrib:
            if ast.literal_eval(site.attrib["backarc"]):
                site.attrib["backarc"] = 1
            else:
                site.attrib["backarc"] = 0

        else:
            site.attrib["backarc"] = 0

        if site["vs30Type"] == "measured":
            vs30_type = 1
        else:
            vs30_type = 0
        print >> fid, "%s" % ",".join(
            [
                str(site["lon"]),
                str(site["lat"]),
                str(site["vs30"]),
                str(vs30_type),
                str(site["z1pt0"]),
                str(site["z2pt5"]),
                str(site["backarc"]),
            ]
        )
    fid.close()
Beispiel #20
0
 def convert_from_nrml(self, fname):
     """
     Populate the underlying archive with CSV files extracted from the
     given XML file.
     """
     assert fname.endswith('.xml'), fname
     prefix = os.path.basename(fname)[:-4]
     return self.convert_from_node(nrml.read(fname)[0], prefix)
def parse_sesc_file(file_name):
    """
    Parse NRML 0.4 SES collection file.
    """
    
    element = read(file_name, chatty=False)[0]
    #sesc = parse_ses_collection(element)
    return parse_ses_collection(element)
Beispiel #22
0
 def test_nonparametric_source_ok(self):
     converter = s.SourceConverter(
         investigation_time=50.,
         rupture_mesh_spacing=1,  # km
         complex_fault_mesh_spacing=1,  # km
         width_of_mfd_bin=1.,  # for Truncated GR MFDs
         area_source_discretization=1.)
     [np] = nrml.read(NONPARAMETRIC_SOURCE).sourceModel
     converter.convert_node(np)
Beispiel #23
0
def get_site_model(oqparam):
    """
    Convert the NRML file into an iterator over 6-tuple of the form
    (z1pt0, z2pt5, measured, vs30, lon, lat)

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    """
    for node in nrml.read(oqparam.inputs['site_model']).siteModel:
        yield valid.site_param(**node.attrib)
Beispiel #24
0
def upgrade_file(path):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    if striptag(node0.tag) == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        node0 = LiteralNode(
            'vulnerabilityModel', cat_dict,
            nodes=list(map(riskmodels.obj_to_node, list(vf_dict.values()))))
    with open(path, 'w') as f:
        nrml.write([node0], f)
Beispiel #25
0
def get_rupture(oqparam):
    """
    Returns a hazardlib rupture by reading the `rupture_model` file.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    """
    rup_model = oqparam.inputs['rupture_model']
    [rup_node] = nrml.read(rup_model)
    conv = sourceconverter.RuptureConverter(
        oqparam.rupture_mesh_spacing, oqparam.complex_fault_mesh_spacing)
    return conv.convert_node(rup_node)
Beispiel #26
0
    def test_invalid(self):
        fname = writetmp('''\
<?xml version="1.0" encoding="UTF-8"?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.5">
  <fragilityModel id="Ethiopia" assetCategory="buildings"
        lossCategory="structural">
    <description>structural_vul_ethiopia</description>
    <limitStates> slight moderate extensive collapse</limitStates>
    <fragilityFunction id="CR/LFINF/H:1,2" format="continuous" shape="logncdf">
       <imls imt="SA" noDamageLimit="0.1" minIML="0.01" maxIML="1.2"/>
       <params ls="slight" mean="0.184422723" stddev="0.143988438"/>
       <params ls="moderate" mean="1.659007804" stddev="3.176361273"/>
       <params ls="extensive" mean="9.747745727" stddev="38.54171001"/>
       <params ls="collapse" mean="247.1792873" stddev="4014.774504"/>
     </fragilityFunction>
  </fragilityModel>
</nrml>''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn('Could not convert imt->intensity_measure_type: '
                      "Invalid IMT: 'SA', line 8", str(ctx.exception))
Beispiel #27
0
    def test_invalid(self):
        fname = writetmp('''\
<?xml version="1.0" encoding="UTF-8"?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.5">
  <fragilityModel id="Ethiopia" assetCategory="buildings"
        lossCategory="structural">
    <description>structural_vul_ethiopia</description>
    <limitStates> slight moderate extensive collapse</limitStates>
    <fragilityFunction id="CR/LFINF/H:1,2" format="continuous" shape="logncdf">
       <imls imt="SA" noDamageLimit="0.1" minIML="0.01" maxIML="1.2"/>
       <params ls="slight" mean="0.184422723" stddev="0.143988438"/>
       <params ls="moderate" mean="1.659007804" stddev="3.176361273"/>
       <params ls="extensive" mean="9.747745727" stddev="38.54171001"/>
       <params ls="collapse" mean="247.1792873" stddev="4014.774504"/>
     </fragilityFunction>
  </fragilityModel>
</nrml>''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn(
            'Could not convert imt->intensity_measure_type: '
            "Invalid IMT: 'SA', line 8 of", str(ctx.exception))
def parse_gmfc_file(file_name):
    """
    Parse NRML 0.4 GMF collection file.
    """
    node_set = read(file_name)[0]
    gmfss = []
    gmfc = GmfCollection(node_set["sourceModelTreePath"],
                         node_set["gsimTreePath"],
                         None)
    for gmf_set in node_set:
        gmfss.append(parse_gmf_set(gmf_set))
    gmfc.gmfss = gmfss
    return gmfc
Beispiel #29
0
def get_rupture(oqparam):
    """
    Returns a hazardlib rupture by reading the `rupture_model` file.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    """
    rup_model = oqparam.inputs['rupture_model']
    [rup_node] = nrml.read(rup_model)
    conv = sourceconverter.RuptureConverter(
        oqparam.rupture_mesh_spacing, oqparam.complex_fault_mesh_spacing)
    rup = conv.convert_node(rup_node)
    rup.tectonic_region_type = '*'  # there is not TRT for scenario ruptures
    return rup
Beispiel #30
0
def get_scenario_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the NRML files containing the GMFs
    :returns:
        a triple (sitecol, etags, gmf array)
    """
    if not oqparam.imtls:
        oqparam.set_risk_imtls(get_risk_models(oqparam))
    imts = sorted(oqparam.imtls)
    num_imts = len(imts)
    imt_dt = numpy.dtype([(imt, F32) for imt in imts])
    gmfset = nrml.read(fname).gmfCollection.gmfSet
    etags, sitecounts = _extract_etags_sitecounts(gmfset)
    oqparam.sites = sorted(sitecounts)
    site_idx = {lonlat: i for i, lonlat in enumerate(oqparam.sites)}
    oqparam.number_of_ground_motion_fields = num_events = len(etags)
    sitecol = get_site_collection(oqparam)
    num_sites = len(oqparam.sites)
    gmf_by_imt = numpy.zeros((num_events, num_sites), imt_dt)
    counts = collections.Counter()
    for i, gmf in enumerate(gmfset):
        if len(gmf) != num_sites:  # there must be one node per site
            raise InvalidFile('Expected %d sites, got %d nodes in %s, line %d'
                              % (num_sites, len(gmf), fname, gmf.lineno))
        counts[gmf['ruptureId']] += 1
        imt = gmf['IMT']
        if imt == 'SA':
            imt = 'SA(%s)' % gmf['saPeriod']
        for node in gmf:
            sid = site_idx[node['lon'], node['lat']]
            gmf_by_imt[imt][i % num_events, sid] = node['gmv']

    for etag, count in sorted(counts.items()):
        if count < num_imts:
            raise InvalidFile("Found a missing etag '%s' in %s" %
                              (etag, fname))
        elif count > num_imts:
            raise InvalidFile("Found a duplicated etag '%s' in %s" %
                              (etag, fname))
    expected_gmvs_per_site = num_imts * len(etags)
    for lonlat, counts in sitecounts.items():
        if counts != expected_gmvs_per_site:
            raise InvalidFile(
                '%s: expected %d gmvs at location %s, found %d' %
                (fname, expected_gmvs_per_site, lonlat, counts))
    return sitecol, etags, gmf_by_imt.T
def get_vulnerability_functions_04(fname):
    """
    Parse the vulnerability model in NRML 0.4 format.

    :param fname:
        path of the vulnerability file
    :returns:
        a dictionary imt, taxonomy -> vulnerability function + vset
    """
    categories = dict(assetCategory=set(), lossCategory=set(),
                      vulnerabilitySetID=set())
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    for vset in nrml.read(fname).vulnerabilityModel:
        categories['assetCategory'].add(vset['assetCategory'])
        categories['lossCategory'].add(vset['lossCategory'])
        categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
        IML = vset.IML
        imt_str = IML['IMT']
        imls = ~IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
    del categories['vulnerabilitySetID']
    return vf_dict, categories
Beispiel #32
0
    def test_raises_useful_error_2(self):
        area_file = BytesIO(b"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml"
      xmlns="http://openquake.org/xmlns/nrml/0.4">
    <sourceModel name="Some Source Model">
        <areaSource id="1" name="Quito" tectonicRegion="Active Shallow Crust">
            <areaGeometry>
                <gml:Polygon>
                    <gml:exterior>
                        <gml:LinearRing>
                            <gml:posList>
                             -122.5 37.5
                             -121.5 37.5
                             -121.5 38.5
                             -122.5 38.5
                            </gml:posList>
                        </gml:LinearRing>
                    </gml:exterior>
                </gml:Polygon>
                <upperSeismoDepth>0.0</upperSeismoDepth>
                <lowerSeismoDepth>10.0</lowerSeismoDepth>
            </areaGeometry>
            <magScaleRel>PeerMSR</magScaleRel>
            <ruptAspectRatio>1.5</ruptAspectRatio>
            <incrementalMFD minMag="6.55" binWidth="0.1">
                <occurRates>0.0010614989 8.8291627E-4 7.3437777E-4
                            6.108288E-4 5.080653E-4
                </occurRates>
            </incrementalMFD>
            <nodalPlanedist>
         <nodalPlane probability="0.3" strike="0.0" dip="90.0" rake="0.0" />
         <nodalPlane probability="0.7" strike="90.0" dip="45.0" rake="90.0" />
            </nodalPlanedist>
            <hypoDepthDist>
                <hypoDepth probability="0.5" depth="4.0" />
                <hypoDepth probability="0.5" depth="8.0" />
            </hypoDepthDist>
        </areaSource>

    </sourceModel>
</nrml>
""")
        [area] = nrml.read(area_file).sourceModel
        with self.assertRaises(NameError) as ctx:
            self.parser.converter.convert_node(area)
        self.assertIn(
            "node areaSource: No subnode named 'nodalPlaneDist'"
            " found in 'areaSource', line 5 of", str(ctx.exception))
Beispiel #33
0
def collect_source_model_paths(smlt):
    """
    Given a path to a source model logic tree or a file-like, collect all of
    the soft-linked path names to the source models it contains and return them
    as a uniquified list (no duplicates).

    :param smlt: source model logic tree file
    """
    for blevel in nrml.read(smlt).logicTree:
        with node.context(smlt, blevel):
            for bset in blevel:
                for br in bset:
                    smfname = br.uncertaintyModel.text
                    if smfname:
                        yield smfname
Beispiel #34
0
def check(fname, pprint):
    """
    Check the validity of NRML files and .ini files.
    Optionally, displays NRML files in indented format.
    """
    if fname.endswith('.xml'):
        node = nrml.read(fname)
        if pprint:
            print node.to_str()
    elif fname.endswith('.ini'):
        logging.basicConfig(level=logging.INFO)
        oqparam = readinput.get_oqparam(fname)
        calculators.calculators(oqparam).pre_execute()
        if pprint:
            print oqparam
Beispiel #35
0
def get_scenario_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the NRML files containing the GMFs
    :returns:
        a triple (sitecol, etags, gmf array)
    """
    if not oqparam.imtls:
        oqparam.set_risk_imtls(get_risk_models(oqparam))
    imts = list(oqparam.imtls)
    num_imts = len(imts)
    imt_dt = numpy.dtype([(bytes(imt), F32) for imt in imts])
    gmfset = nrml.read(fname).gmfCollection.gmfSet
    etags, sitecounts = _extract_etags_sitecounts(gmfset)
    oqparam.sites = sorted(sitecounts)
    site_idx = {lonlat: i for i, lonlat in enumerate(oqparam.sites)}
    oqparam.number_of_ground_motion_fields = num_events = len(etags)
    sitecol = get_site_collection(oqparam)
    num_sites = len(oqparam.sites)
    gmf_by_imt = numpy.zeros((num_events, num_sites), imt_dt)
    counts = collections.Counter()
    for i, gmf in enumerate(gmfset):
        if len(gmf) != num_sites:  # there must be one node per site
            raise InvalidFile(
                'Expected %d sites, got %d nodes in %s, line %d' %
                (num_sites, len(gmf), fname, gmf.lineno))
        counts[gmf['ruptureId']] += 1
        imt = gmf['IMT']
        if imt == 'SA':
            imt = 'SA(%s)' % gmf['saPeriod']
        for node in gmf:
            sid = site_idx[node['lon'], node['lat']]
            gmf_by_imt[imt][i % num_events, sid] = node['gmv']

    for etag, count in counts.items():
        if count < num_imts:
            raise InvalidFile('Found a missing etag %r in %s' % (etag, fname))
        elif count > num_imts:
            raise InvalidFile('Found a duplicated etag %r in %s' %
                              (etag, fname))
    expected_gmvs_per_site = num_imts * len(etags)
    for lonlat, counts in sitecounts.items():
        if counts != expected_gmvs_per_site:
            raise InvalidFile('%s: expected %d gmvs at location %s, found %d' %
                              (fname, expected_gmvs_per_site, lonlat, counts))
    return sitecol, etags, gmf_by_imt.T
Beispiel #36
0
def get_scenario_from_nrml(oqparam, fname):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fname:
        the NRML files containing the GMFs
    :returns:
        a triple (sitecol, rupture_tags, gmf array)
    """
    if not oqparam.imtls:
        oqparam.set_risk_imtls(get_risk_models(oqparam))
    imts = list(oqparam.imtls)
    imt_dt = numpy.dtype([(imt, float) for imt in imts])
    gmfset = nrml.read(fname).gmfCollection.gmfSet
    tags, oqparam.sites = _extract_tags_sites(gmfset)
    oqparam.number_of_ground_motion_fields = num_events = len(tags)
    sitecol = get_site_collection(oqparam)
    num_sites = len(oqparam.sites)
    gmf_by_imt = numpy.zeros((num_events, num_sites), imt_dt)
    num_imts = len(imts)
    counts = collections.Counter()
    for i, gmf in enumerate(gmfset):
        if len(gmf) != num_sites:  # there must be one node per site
            raise InvalidFile('Expected %d sites, got %d in %s, line %d' % (
                num_sites, len(gmf), fname, gmf.lineno))
        counts[gmf['ruptureId']] += 1
        imt = gmf['IMT']
        if imt == 'SA':
            imt = 'SA(%s)' % gmf['saPeriod']
        for site_idx, lon, lat, node in zip(
                range(num_sites), sitecol.lons, sitecol.lats, gmf):
            if (node['lon'], node['lat']) != (lon, lat):
                raise InvalidFile('The site mesh is not ordered in %s, line %d'
                                  % (fname, node.lineno))
            try:
                gmf_by_imt[imt][i % num_events, site_idx] = node['gmv']
            except IndexError:
                raise InvalidFile('Something wrong in %s, line %d' %
                                  (fname, node.lineno))
    for tag, count in counts.items():
        if count < num_imts:
            raise InvalidFile('Found a missing tag %r in %s' %
                              (tag, fname))
        elif count > num_imts:
            raise InvalidFile('Found a duplicated tag %r in %s' %
                              (tag, fname))
    return sitecol, tags, gmf_by_imt.T
Beispiel #37
0
def tidy(fnames):
    """
    Reformat a NRML file in a canonical form. That also means reducing the
    precision of the floats to a standard value. If the file is invalid,
    a clear error message is shown.
    """
    for fname in fnames:
        try:
            nodes = nrml.read(fname).nodes
        except ValueError as err:
            print(err)
            return
        with open(fname + '.bak', 'w') as f:
            f.write(open(fname).read())
        with open(fname, 'w') as f:
            nrml.write(nodes, f)
        print('Reformatted %s, original left in %s.bak' % (fname, fname))
Beispiel #38
0
def _info(name, filtersources, weightsources):
    if name in base.calculators:
        print(textwrap.dedent(base.calculators[name].__doc__.strip()))
    elif name == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif name.endswith('.xml'):
        print(nrml.read(name).to_str())
    elif name.endswith(('.ini', '.zip')):
        oqparam = readinput.get_oqparam(name)
        if 'exposure' in oqparam.inputs:
            expo = readinput.get_exposure(oqparam)
            sitecol, assets_by_site = readinput.get_sitecol_assets(
                oqparam, expo)
        elif filtersources or weightsources:
            sitecol, assets_by_site = readinput.get_site_collection(
                oqparam), []
        else:
            sitecol, assets_by_site = None, []
        if 'source_model_logic_tree' in oqparam.inputs:
            print('Reading the source model...')
            if weightsources:
                sp = source.SourceFilterWeighter
            elif filtersources:
                sp = source.SourceFilter
            else:
                sp = source.BaseSourceProcessor  # do nothing
            csm = readinput.get_composite_source_model(oqparam, sitecol, sp)
            assoc = csm.get_rlzs_assoc()
            dstore = datastore.Fake(vars(oqparam),
                                    rlzs_assoc=assoc,
                                    composite_source_model=csm,
                                    sitecol=sitecol)
            _print_info(dstore, filtersources, weightsources)
        if len(assets_by_site):
            assetcol = riskinput.build_asset_collection(assets_by_site)
            dic = groupby(assetcol, operator.attrgetter('taxonomy'))
            for taxo, num in dic.items():
                print('taxonomy #%d, %d assets' % (taxo, num))
            print('total assets = %d' % len(assetcol))
    else:
        print("No info for '%s'" % name)
Beispiel #39
0
def reduce(fname, reduction_factor):
    """
    Produce a submodel from `fname` by sampling the nodes randomly.
    Supports source models, site models and exposure models. As a special
    case, it is also able to reduce .csv files by sampling the lines.
    This is a debugging utility to reduce large computations to small ones.
    """
    if fname.endswith('.csv'):
        with open(fname) as f:
            all_lines = f.readlines()
        lines = random_filter(all_lines, reduction_factor)
        shutil.copy(fname, fname + '.bak')
        print('Copied the original file in %s.bak' % fname)
        with open(fname, 'w') as f:
            for line in lines:
                f.write(line)
        print('Extracted %d lines out of %d' % (len(lines), len(all_lines)))
        return
    model, = nrml.read(fname)
    if model.tag.endswith('exposureModel'):
        total = len(model.assets)
        model.assets.nodes = random_filter(model.assets, reduction_factor)
        num_nodes = len(model.assets)
    elif model.tag.endswith('siteModel'):
        total = len(model)
        model.nodes = random_filter(model, reduction_factor)
        num_nodes = len(model)
    elif model.tag.endswith('sourceModel'):
        total = len(model)
        model.nodes = random_filter(model, reduction_factor)
        num_nodes = len(model)
    else:
        raise RuntimeError('Unknown model tag: %s' % model.tag)
    shutil.copy(fname, fname + '.bak')
    print('Copied the original file in %s.bak' % fname)
    with open(fname, 'w') as f:
        nrml.write([model], f)
    print('Extracted %d nodes out of %d' % (num_nodes, total))
Beispiel #40
0
def info(name, report=False):
    """
    Give information. You can pass the name of an available calculator,
    a job.ini file, or a zip archive with the input files.
    """
    logging.basicConfig(level=logging.INFO)
    if name in base.calculators:
        print(textwrap.dedent(base.calculators[name].__doc__.strip()))
    elif name == 'gsims':
        for gs in gsim.get_available_gsims():
            print(gs)
    elif name.endswith('.xml'):
        print(nrml.read(name).to_str())
    elif name.endswith(('.ini', '.zip')):
        with Monitor('info', measuremem=True) as mon:
            if report:
                print('Generated', reportwriter.build_report(name))
            else:
                print_csm_info(name)
        if mon.duration > 1:
            print(mon)
    else:
        print("No info for '%s'" % name)
Beispiel #41
0
    def test_nrml(self):
        # can read and write a NRML file converted into a Node object
        xmlfile = io.BytesIO(b"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
  <exposureModel
      id="my_exposure_model_for_population"
      category="population"
      taxonomySource="fake population datasource">

    <description>
      Sample population
    </description>

    <assets>
      <asset id="asset_01" number="7" taxonomy="IT-PV">
          <location lon="9.15000" lat="45.16667" />
      </asset>

      <asset id="asset_02" number="7" taxonomy="IT-CE">
          <location lon="9.15333" lat="45.12200" />
      </asset>
    </assets>
  </exposureModel>
</nrml>
""")
        root = read(xmlfile)

        tag, version = get_tag_version(root[0])
        self.assertEqual(tag, 'exposureModel')
        self.assertEqual(version, 'nrml/0.4')

        outfile = io.BytesIO()
        node_to_xml(root, outfile, {})
        self.assertEqual(
            outfile.getvalue(), """\
<?xml version="1.0" encoding="utf-8"?>
<nrml
xmlns="http://openquake.org/xmlns/nrml/0.4"
xmlns:gml="http://www.opengis.net/gml"
>
    <exposureModel
    category="population"
    id="my_exposure_model_for_population"
    taxonomySource="fake population datasource"
    >
        <description>
            Sample population
        </description>
        <assets>
            <asset
            id="asset_01"
            number="7.000000000E+00"
            taxonomy="IT-PV"
            >
                <location lat="4.516667000E+01" lon="9.150000000E+00"/>
            </asset>
            <asset
            id="asset_02"
            number="7.000000000E+00"
            taxonomy="IT-CE"
            >
                <location lat="4.512200000E+01" lon="9.153330000E+00"/>
            </asset>
        </assets>
    </exposureModel>
</nrml>
""")
Beispiel #42
0
def get_vulnerability_functions(fname):
    """
    :param fname:
        path of the vulnerability filter
    :returns:
        a dictionary imt, taxonomy -> vulnerability function
    """
    # NB: the IMTs can be duplicated and with different levels, each
    # vulnerability function in a set will get its own levels
    imts = set()
    taxonomies = set()
    vf_dict = {}  # imt, taxonomy -> vulnerability function
    node = nrml.read(fname)
    if node['xmlns'] == nrml.NRML05:
        vmodel = node[0]
        for vfun in vmodel.getnodes('vulnerabilityFunction'):
            with context(fname, vfun):
                imt = vfun.imls['imt']
                imls = numpy.array(~vfun.imls)
                taxonomy = vfun['id']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            if vfun['dist'] == 'PM':
                loss_ratios, probs = [], []
                for probabilities in vfun[1:]:
                    loss_ratios.append(probabilities['lr'])
                    probs.append(valid.probabilities(~probabilities))
                probs = numpy.array(probs)
                assert probs.shape == (len(loss_ratios), len(imls))
                vf_dict[imt,
                        taxonomy] = (scientific.VulnerabilityFunctionWithPMF(
                            taxonomy, imt, imls, numpy.array(loss_ratios),
                            probs))
            else:
                with context(fname, vfun):
                    loss_ratios = ~vfun.meanLRs
                    coefficients = ~vfun.covLRs
                if len(loss_ratios) != len(imls):
                    raise InvalidFile(
                        'There are %d loss ratios, but %d imls: %s, line %d' %
                        (len(loss_ratios), len(imls), fname,
                         vfun.meanLRs.lineno))
                if len(coefficients) != len(imls):
                    raise InvalidFile(
                        'There are %d coefficients, but %d imls: %s, '
                        'line %d' % (len(coefficients), len(imls), fname,
                                     vfun.covLRs.lineno))
                with context(fname, vfun):
                    vf_dict[imt, taxonomy] = scientific.VulnerabilityFunction(
                        taxonomy, imt, imls, loss_ratios, coefficients,
                        vfun['dist'])
        return vf_dict
    # otherwise, read the old format (NRML 0.4)
    for vset in read_nodes(fname, filter_vset,
                           nodefactory['vulnerabilityModel']):
        imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML
        imts.add(imt_str)
        for vfun in vset.getnodes('discreteVulnerability'):
            taxonomy = vfun['vulnerabilityFunctionID']
            if taxonomy in taxonomies:
                raise InvalidFile(
                    'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
                    (taxonomy, fname, vfun.lineno))
            taxonomies.add(taxonomy)
            with context(fname, vfun):
                loss_ratios = ~vfun.lossRatio
                coefficients = ~vfun.coefficientsVariation
            if len(loss_ratios) != len(imls):
                raise InvalidFile(
                    'There are %d loss ratios, but %d imls: %s, line %d' %
                    (len(loss_ratios), len(imls), fname,
                     vfun.lossRatio.lineno))
            if len(coefficients) != len(imls):
                raise InvalidFile(
                    'There are %d coefficients, but %d imls: %s, line %d' %
                    (len(coefficients), len(imls), fname,
                     vfun.coefficientsVariation.lineno))
            with context(fname, vfun):
                vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
                    taxonomy, imt_str, imls, loss_ratios, coefficients,
                    vfun['probabilisticDistribution'])
    return vf_dict
Beispiel #43
0
def import_hazard_curves(fileobj):
    """
    Parse the file with the hazard curves and import it into the tables
    hazard_curve and hazard_curve_data. It also creates a new output record,
    unrelated to a job.

    :param fileobj:
        a file-like object associated to an XML file
    :returns:
        the generated :class:`openquake.engine.db.models.Output` object
        and the generated :class:`openquake.engine.db.models.OqJob` object.
    """
    fname = fileobj.name
    hazcurves = nrml.read(fileobj).hazardCurves
    imt = imt_str = hazcurves['IMT']
    if imt == 'SA':
        imt_str += '(%s)' % hazcurves['saPeriod']
    imls = ~hazcurves.IMLs
    hc_nodes = hazcurves[1:]

    curs = connections['job_init'].cursor().cursor  # DB API cursor
    job = engine.create_job()
    job.save_params(
        dict(base_path=os.path.dirname(fname),
             intensity_measure_types_and_levels={imt_str: imls},
             description='HazardCurve importer, file %s' %
             os.path.basename(fname),
             calculation_mode='classical'))

    out = models.Output.objects.create(display_name='Imported from %r' % fname,
                                       output_type='hazard_curve',
                                       oq_job=job)

    haz_curve = models.HazardCurve.objects.create(
        investigation_time=hazcurves['investigationTime'],
        imt=imt,
        imls=imls,
        quantile=hazcurves.attrib.get('quantileValue'),
        statistics=hazcurves.attrib.get('statistics'),
        sa_damping=hazcurves.attrib.get('saDamping'),
        sa_period=hazcurves.attrib.get('saPeriod'),
        output=out)
    hazard_curve_id = str(haz_curve.id)

    # convert the XML into a tab-separated StringIO
    f = StringIO()
    for node in hc_nodes:
        x, y = ~node.Point.pos
        poes = ~node.poEs
        poes = '{%s}' % str(poes)[1:-1]
        print >> f, '\t'.join(
            [hazard_curve_id, poes,
             'SRID=4326;POINT(%s %s)' % (x, y)])
    f.reset()
    ## import the file-like object with a COPY FROM
    try:
        curs.copy_expert(
            'copy hzrdr.hazard_curve_data (hazard_curve_id, poes, location) '
            'from stdin', f)
    except:
        curs.connection.rollback()
        raise
    else:
        curs.connection.commit()
    finally:
        f.close()
    job.save()
    return out
Beispiel #44
0
    if autoheader:
        all_fields = [col.split(':', 1)[0].split('-') for col in autoheader]
        for record in data:
            row = []
            for fields in all_fields:
                row.append(extract_from(record, fields))
            dest.write(
                sep.join(scientificformat(col, fmt) for col in row) + u'\n')
    else:
        for row in data:
            dest.write(
                sep.join(scientificformat(col, fmt) for col in row) + u'\n')
    if hasattr(dest, 'getvalue'):
        return dest.getvalue()[:-1]  # a newline is strangely added
    else:
        dest.close()
    return dest.name


if __name__ == '__main__':  # pretty print of NRML files
    import sys
    import shutil
    from openquake.commonlib import nrml
    nrmlfiles = sys.argv[1:]
    for fname in nrmlfiles:
        node = nrml.read(fname)
        shutil.copy(fname, fname + '.bak')
        with open(fname, 'w') as out:
            nrml.write(list(node), out)