Beispiel #1
0
def write_source_model(dest, sources_or_groups, name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        groups = sources_or_groups.src_groups
        attrs = dict(name=sources_or_groups.name,
                     investigation_time=sources_or_groups.investigation_time)
    elif isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
        attrs = dict(investigation_time=investigation_time)
    else:  # passed a list of sources
        srcs_by_trt = groupby(
            sources_or_groups, operator.attrgetter('tectonic_region_type'))
        groups = [sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
                  for trt in srcs_by_trt]
        attrs = dict(investigation_time=investigation_time)
    if name or 'name' not in attrs:
        attrs['name'] = name or os.path.splitext(os.path.basename(dest))[0]
    if attrs['investigation_time'] is None:
        del attrs['investigation_time']
    nodes = list(map(obj_to_node, groups))
    ddict = extract_ddict(groups)
    if ddict:
        # remove duplicate content from nodes
        for grp_node in nodes:
            for src_node in grp_node:
                src_node.nodes = []
        # save HDF5 file
        dest5 = os.path.splitext(dest)[0] + '.hdf5'
        with hdf5.File(dest5, 'w') as h:
            for src_id, dic in ddict.items():
                for k, v in dic.items():
                    key = '%s/%s' % (src_id, k)
                    if isinstance(v, numpy.ndarray):
                        h.create_dataset(key, v.shape, v.dtype,
                                         compression='gzip',
                                         compression_opts=9)
                        h[key][:] = v
                    else:
                        h[key] = v

    source_model = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    if ddict:
        return [dest, dest5]
    else:
        return [dest]
Beispiel #2
0
def kite_surface_node(profiles):
    """
    :param profiles: a list of lists of points
    :returns: a Node of kind complexFaultGeometry
    """
    node = Node('kiteSurface')
    for profile in profiles:
        node.append(profile_node(profile))
    return node
Beispiel #3
0
 def dmg_dist_per_asset_node(self, data):
     """
     :param data: a sequence of records with attributes .exposure_data,
                  .mean and .stddev
     :returns: a `dmgDistPerAsset` node
     """
     node = Node('dmgDistPerAsset', nodes=[self.dmg_states])
     data_by_location = groupby(data, lambda r: r.exposure_data.site)
     for loc in data_by_location:
         dd = Node('DDNode', nodes=[self.point_node(loc)])
         data_by_asset = groupby(
             data_by_location[loc], lambda r: r.exposure_data.asset_ref,
             lambda rows: [(r.mean, r.stddev) for r in rows])
         for asset_ref, data in data_by_asset.items():
             means, stddevs = zip(*data)
             dd.append(self.asset_node(asset_ref, means, stddevs))
         node.append(dd)
     return node
Beispiel #4
0
def build_truncated_gr_mfd(mfd):
    """
    Parses the truncated Gutenberg Richter MFD as a Node

    :param mfd:
        MFD as instance of :class:
        `openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD`
    :returns:
        Instance of :class:`openquake.baselib.node.Node`
    """
    if hasattr(mfd, 'slip_rate'):
        return Node("truncGutenbergRichterMFD",
                    {"bValue": mfd.b_val, "slipRate": mfd.slip_rate,
                     "rigidity": mfd.rigidity,
                     "minMag": mfd.min_mag, "maxMag": mfd.max_mag})
    return Node("truncGutenbergRichterMFD",
                {"aValue": mfd.a_val, "bValue": mfd.b_val,
                 "minMag": mfd.min_mag, "maxMag": mfd.max_mag})
Beispiel #5
0
def build_source_model_node(source_model):
    attrs = {}
    if source_model.name:
        attrs['name'] = source_model.name
    if source_model.investigation_time:
        attrs['investigation_time'] = source_model.investigation_time
    if source_model.start_time:
        attrs['start_time'] = source_model.start_time
    nodes = [obj_to_node(sg) for sg in source_model.src_groups]
    return Node('sourceModel', attrs, nodes=nodes)
Beispiel #6
0
 def asset_node(self, asset_ref, means, stddevs):
     """
     :param asset_ref: asset reference string
     :param means: array of means, one per damage state
     :param stddevs: array of stddevs, one per damage state
     :returns: an `asset` node
     """
     return Node('asset',
                 dict(assetRef=asset_ref),
                 nodes=self.damage_nodes(means, stddevs))
def df_to_tree(tree_df, validate=True, omit=None, sub=None):
    '''
    Converts logic tree :class:`pandas.DataFrame` to tree of
    :class:`openquake.baselib.node.Node` objects which then be written to a
    file using :func:`openquake.hazardlib.nrml.write`.
    '''
    tree = Node('logicTree', {'logicTreeID': 'lt1'}, None)

    for i, level in tree_df.iterrows():

        branching_level_attr = {'branchingLevelID': 'bl%d' % (i + 1)}
        branching_level = Node('logicTreeBranchingLevel', branching_level_attr,
                               None)

        branch_set_attr = {
            'branchSetID': 'bs%d' % (i + 1),
            'uncertaintyType': level['uncertaintyType']
        }
        for key in level.keys():
            if 'applyTo' in key and level[key] != 'all':
                branch_set_attr.update({key: level[key]})

        if 'uncertaintyWeight' in level.keys():
            weights = level['uncertaintyWeight']
        else:
            weights = None

        models_weights = models_with_weights(level['uncertaintyType'],
                                             level['uncertaintyModel'],
                                             weights,
                                             branch_set_attr['branchSetID'],
                                             validate=validate,
                                             omit=omit,
                                             sub=sub)

        if not models_weights:
            continue

        add_branch_set(branching_level, branch_set_attr, models_weights)

        tree.append(branching_level)

    return tree
Beispiel #8
0
def build_nodal_plane_dist(npd):
    """
    Returns the nodal plane distribution as a Node instance

    :param npd:
        Nodal plane distribution as instance of :class:
        `openquake.hazardlib.pmf.PMF`
    :returns:
        Instance of :class:`openquake.baselib.node.Node`
    """
    npds = []
    dist = []
    for prob, npd in npd.data:
        dist.append((prob, (npd.dip, npd.strike, npd.rake)))
        nodal_plane = Node(
            "nodalPlane", {"dip": npd.dip, "probability": prob,
                           "strike": npd.strike, "rake": npd.rake})
        npds.append(nodal_plane)
    sourceconverter.fix_dupl(dist)
    return Node("nodalPlaneDist", nodes=npds)
Beispiel #9
0
    def test_zero_node(self):
        s = BytesIO()
        node = Node('zero', {}, 0)
        with StreamingXMLWriter(s) as writer:
            writer.serialize(node)
        self.assertEqual(s.getvalue(), b'''\
<?xml version="1.0" encoding="utf-8"?>
<zero>
    0
</zero>
''')
Beispiel #10
0
def build_section(section):
    """
    Parses a FaultSection instance to a Node class

    :param section:
        A FaultSection instance
    :returns:
        Instance of :class:`openquake.baselib.node.Node`
    """
    nodes = [obj_to_node(section.surface)]
    return Node("section", {'id': section.sec_id}, nodes=nodes)
Beispiel #11
0
def build_multi_mfd(mfd):
    """
    Parses the MultiMFD as a Node

    :param mfd:
        MFD as instance of :class:
        `openquake.hazardlib.mfd.multi_mfd.MultiMFD`
    :returns:
        Instance of :class:`openquake.baselib.node.Node`
    """
    node = Node("multiMFD", dict(kind=mfd.kind, size=mfd.size))
    for name in sorted(mfd.kwargs):
        values = mfd.kwargs[name]
        if name in ('magnitudes', 'occurRates'):
            values = sum(values, [])
        node.append(Node(name, text=values))
    if 'occurRates' in mfd.kwargs:
        lengths = [len(rates) for rates in mfd.kwargs['occurRates']]
        node.append(Node('lengths', text=lengths))
    return node
Beispiel #12
0
def build_site(site):
    return Node(
        'site', {
            'lon': site.longitude,
            'lat': site.latitude,
            'vs30': site.vs30,
            'vs30Type': site.measured,
            'z1pt0': site.z1pt0,
            'z2pt5': param.z2pt5,
            'backarc': site.backarc
        })
 def write(self, destination, source_model, name=None):
     """
     Exports to NRML
     """
     if os.path.exists(destination):
         os.remove(destination)
     self.destination = destination
     if name:
         source_model.name = name
     output_source_model = Node("sourceModel", {"name": name})
     dic = groupby(source_model.sources,
                   operator.itemgetter('tectonicRegion'))
     for i, (trt, srcs) in enumerate(dic.items(), 1):
         output_source_model.append(
             Node('sourceGroup',
                  {'tectonicRegion': trt, 'name': 'group %d' % i},
                  nodes=srcs))
     print("Exporting Source Model to %s" % self.destination)
     with open(self.destination, "wb") as f:
         nrml.write([output_source_model], f, "%s")
Beispiel #14
0
 def damage_nodes(self, means, stddevs):
     """
     :param means: array of means, one per damage state
     :param stddevs: array of stddevs, one per damage state
     :returns: a list of `damage` nodes
     """
     nodes = []
     for dmg_state, mean, stddev in zip(self.damage_states, means, stddevs):
         nodes.append(
             Node('damage',
                  dict(ds=dmg_state.dmg_state, mean=mean, stddev=stddev)))
     return nodes
def add_branch_set(branching_level, branch_set_attr, models_weights):
    '''
    Add a branch set to a branching level.
    '''
    branch_set = Node('logicTreeBranchSet', branch_set_attr, None)
    branch_index_string = re.sub('[^0-9]', '', branch_set_attr['branchSetID'])

    if branch_index_string:
        branch_index = int(branch_index_string)
    else:
        branch_index = 999

    for j, (model, weight) in enumerate(models_weights):
        branch_attr = {'branchID': 'b%dm%d' % (branch_index, j + 1)}
        branch = Node('logicTreeBranch', branch_attr, None)
        branch.append(Node('uncertaintyModel', {}, model))
        branch.append(Node('uncertaintyWeight', {}, weight))

        branch_set.append(branch)

    branching_level.append(branch_set)
Beispiel #16
0
def export_site_model(ekey, dstore):
    dest = dstore.export_path('site_model.xml')
    site_model_node = Node('siteModel')
    hdffields = 'lons lats vs30 vs30measured z1pt0 z2pt5 '.split()
    xmlfields = 'lon lat vs30 vs30Type z1pt0 z2pt5'.split()
    recs = [tuple(rec[f] for f in hdffields)
            for rec in dstore['sitecol'].array]
    unique_recs = sorted(set(recs))
    for rec in unique_recs:
        n = Node('site')
        for f, hdffield in enumerate(hdffields):
            xmlfield = xmlfields[f]
            if hdffield == 'vs30measured':
                value = 'measured' if rec[f] else 'inferred'
            else:
                value = rec[f]
            n[xmlfield] = value
        site_model_node.append(n)
    with open(dest, 'wb') as f:
        nrml.write([site_model_node], f)
    return [dest]
Beispiel #17
0
def combine_pt_sources(point_source_list,
                       filename,
                       name,
                       nrml_version='04',
                       id_location_flag='location'):
    """Method for combining lists of point sources that are received
    and summing rates for co-located points
    Sources are joined based on id_location_flag, which can be 'id'
    or 'location' or None. Setting to None will mean at pts are 
    simply added together and no checking for co-located pts is undertake
    """
    # Get ids
    combined_pt_sources = []
    if id_location_flag is not None:
        for pt in point_source_list[0]:
            for source_model in point_source_list[1:]:
                #         print source_model
                for pt_source in source_model:
                    if id_location_flag == 'id':
                        if pt_source.source_id == pt.source_id:
                            new_rates = merge_rates(pt, pt_source)
                            pt.mfd.modify_set_mfd(pt.mfd.min_mag,
                                                  pt.mfd.bin_width,
                                                  list(new_rates))
                            source_model.remove(pt_source)
                    elif id_location_flag == 'location':
                        # Check if location and nodal planes are the same
                        if pt_source.location.x == pt.location.x and \
                                pt_source.location.y == pt.location.y:
                            if pt_source.nodal_plane_distribution.data == pt.nodal_plane_distribution.data:
                                new_rates = merge_rates(pt, pt_source)
                                pt.mfd.modify_set_mfd(pt.mfd.min_mag,
                                                      pt.mfd.bin_width,
                                                      list(new_rates))
                                source_model.remove(pt_source)

    # once all overlapping point sources have been merged, add all to list
    # This should work as we have added rates to the first source model as
    # we have gone and removed sources in the same locations from the other
    # source mode lists
    for source_model in point_source_list:
        for pt in source_model:
            combined_pt_sources.append(pt)

    if nrml_version == '04':
        #        for source in combined_pt_sources:
        #            source_list.append(source)
        #            id_index = max(id_index, source.source_id)
        nodes = list(map(obj_to_node, sorted(combined_pt_sources)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(filename, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
    return combined_pt_sources
def complex_fault_geometry_from_shp(shape, record):
    assert record["sourcetype"] == "complexFaultSource"
    breakers = shape.parts
    breakers.append(len(shape.z))
    indices = [range(breakers[i], breakers[i + 1])
               for i in range(0, len(breakers) - 1)]
    edges = []
    for iloc, idx in enumerate(indices):
        geom = []
        for j in idx:
            geom.extend([shape.points[j][0], shape.points[j][1],
                         shape.z[j]])
        poslist_node = Node("posList", text=geom)
        linestring_node = Node("LineString", nodes=[poslist_node])
        if iloc == 0:
            # Fault top edge
            edges.append(Node("faultTopEdge", nodes=[linestring_node]))
        elif iloc == (len(indices) - 1):
            # Fault bottom edges
            edges.append(Node("faultBottomEdge",
                              nodes=[linestring_node]))
        else:
            edges.append(Node("intermediateEdge",
                              nodes=[linestring_node]))
    return Node("complexFaultGeometry", nodes=edges)
    def add_branch(self, node):
        '''Add branch node'''

        tag = strip_fqtag(node.tag)
        attrib = deepcopy(node.attrib)
        node_id = attrib.pop(get_dict_key_match(attrib, 'id'), None)

        if tag == 'logicTreeBranchSet':
            attrib.pop('uncertaintyType', None)
            keys = node.attrib.keys()
            if 'applyToTectonicRegionType' in keys:
                name = attrib.pop('applyToTectonicRegionType')
                name = name.replace(' ', r'\\ ')
            elif 'applyToSourceType' in keys:
                name = attrib.pop('applyToSourceType')
            elif 'applyToSources' in keys:
                apply_to = attrib.pop('applyToSources')
                if os.path.isfile(apply_to):
                    name = r'each\\ source'
                else:
                    name = r'sources:\\ ' + apply_to
            elif 'applyToBranches' in keys:
                name = r'branches:\\ ' + attrib.pop('applyToBranches')
            else:
                name = ''
        elif tag == 'logicTree':
            name = r'%s\\ %s' % (node[0][0]['uncertaintyType'], tag)
        elif 'omitted' in tag:
            name = tag
        else:
            name = '\texttt{%s}' % tag

        if self.include_ids:
            name = '%s: %s' % (node_id, name)

        self.start_branch(name, attrib)
        if node.text and node.text.strip():
            print('Ignoring node "%s" text "%s"' % (name, node.text.strip()))
        if len(node) > self.max_branches:
            print('Too many (%d) nodes in %s, '
                  'abbreviating TEX to first & last %d' %
                  (len(node), tag, self.max_branches / 2))
            n_omitted = len(node) - self.max_branches
            ellipsis_text = str(n_omitted) + r' branches\\ omitted'
            ellipsis_node = Node(ellipsis_text, {}, None)
            nodes = node[:int(self.max_branches/2)] + [ellipsis_node] + \
                node[-int(self.max_branches/2):]
        else:
            nodes = node
        for subnode in nodes:
            self.serialize(subnode)
        self.end_branch()
Beispiel #20
0
 def dmg_dist_per_taxonomy_node(self, data):
     """
     :param data: a sequence of records with attributes .taxonomy,
                  .mean and .stddev
     :returns: a `dmgDistPerTaxonomy` node
     """
     node = Node('dmgDistPerTaxonomy', nodes=[self.dmg_states])
     data_by_taxo = groupby(data, operator.attrgetter('taxonomy'))
     for taxonomy in data_by_taxo:
         means = [row.mean for row in data_by_taxo[taxonomy]]
         stddevs = [row.stddev for row in data_by_taxo[taxonomy]]
         node.append(self.dd_node_taxo(taxonomy, means, stddevs))
     return node
Beispiel #21
0
 def _read_csv(self, csvnames, dirname):
     """
     :param csvnames: names of csv files, space separated
     :param dirname: the directory where the csv files are
     :yields: asset nodes
     """
     expected_header = self._csv_header()
     fnames = [os.path.join(dirname, f) for f in csvnames.split()]
     for fname in fnames:
         with open(fname, encoding='utf-8') as f:
             fields = next(csv.reader(f))
             header = set(fields)
             if len(header) < len(fields):
                 raise InvalidFile(
                     '%s: The header %s contains a duplicated field' %
                     (fname, header))
             elif expected_header - header:
                 raise InvalidFile(
                     'Unexpected header in %s\nExpected: %s\nGot: %s' %
                     (fname, sorted(expected_header), sorted(header)))
     occupancy_periods = self.occupancy_periods.split()
     for fname in fnames:
         with open(fname, encoding='utf-8') as f:
             for i, dic in enumerate(csv.DictReader(f), 1):
                 asset = Node('asset', lineno=i)
                 with context(fname, asset):
                     asset['id'] = dic['id']
                     asset['number'] = valid.positivefloat(dic['number'])
                     asset['taxonomy'] = dic['taxonomy']
                     if 'area' in dic:  # optional attribute
                         asset['area'] = dic['area']
                     loc = Node(
                         'location',
                         dict(lon=valid.longitude(dic['lon']),
                              lat=valid.latitude(dic['lat'])))
                     costs = Node('costs')
                     for cost in self.cost_types['name']:
                         a = dict(type=cost, value=dic[cost])
                         costs.append(Node('cost', a))
                     occupancies = Node('occupancies')
                     for period in occupancy_periods:
                         a = dict(occupants=float(dic[period]),
                                  period=period)
                         occupancies.append(Node('occupancy', a))
                     tags = Node('tags')
                     for tagname in self.tagcol.tagnames:
                         if tagname != 'taxonomy':
                             tags.attrib[tagname] = dic[tagname]
                     asset.nodes.extend([loc, costs, occupancies, tags])
                     if i % 100000 == 0:
                         logging.info('Read %d assets', i)
                 yield asset
Beispiel #22
0
def get_site_collection(site_model_file,
                        sites,
                        site_model_params=None,
                        filename=None):
    """
    Returns a SiteCollection instance by looking at the points and the
    site model defined by the configuration parameters.
    :param site_model_file:
        path to the site_model nrml file
    :param sites:
        Locations of hazard points that 
    :param site_model_params:
        object with a method .get_closest returning the closest site
        model parameters
    :param filename:
        path to output nrml file where new site model will be written,
        if this parameter is specified.
    """
    if site_model_params is None:
        # read the parameters directly from their file
        site_model_params = geo.geodetic.GeographicObjects(
            get_site_model(site_model_file))
    sitecol = []
    for pt in sites:
        # NB: the mesh, when read from the datastore, is a 32 bit array;
        # however, the underlying C library expects 64 bit floats, thus
        # we have to cast float(pt.longitude), float(pt.latitude);
        # we should change the geodetic speedups instead
        param, dist = site_model_params.\
                      get_closest(float(pt.longitude), float(pt.latitude))
        if dist >= MAX_SITE_MODEL_DISTANCE:
            #logging.warn('The site parameter associated to %s came from a '
            #             'distance of %d km!' % (pt, dist))
            print 'WARNING:The site parameter associated to %s came from a ' \
                'distance of %d km!' % (pt, dist)
        sitecol.append(
            site.Site(pt, param.vs30, param.measured, param.z1pt0, param.z2pt5,
                      param.backarc))
    if filename is not None:
        name = filename.split('/')[-1][:-4]
        site_nodes = list(map(obj_to_node, sitecol))  #sorted(sitecol)))
        #print site_nodes
        site_model = Node("siteModel", nodes=site_nodes)
        #site_model = site_nodes
        #print site_model
        with open(filename, 'wb') as f:
            nrml.write(site_model, f, '%s', xmlns=NAMESPACE)
        #    nrml.write([site_model], f, '%s', xmlns = NAMESPACE)
    return site.SiteCollection(sitecol)