예제 #1
0
def upgrade_file(path, multipoint):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    tag = striptag(node0.tag)
    gml = True
    if tag == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        # below I am converting into a NRML 0.5 vulnerabilityModel
        node0 = Node(
            'vulnerabilityModel', cat_dict,
            nodes=[obj_to_node(val) for val in vf_dict.values()])
        gml = False
    elif tag == 'fragilityModel':
        node0 = read_nrml.convert_fragility_model_04(
            nrml.read(path)[0], path)
        gml = False
    elif tag == 'sourceModel':
        node0 = nrml.read(path)[0]
        dic = groupby(node0.nodes, operator.itemgetter('tectonicRegion'))
        node0.nodes = [Node('sourceGroup',
                            dict(tectonicRegion=trt, name="group %s" % i),
                            nodes=srcs)
                       for i, (trt, srcs) in enumerate(dic.items(), 1)]
        if multipoint:
            sourceconverter.update_source_model(node0, path + '.bak')
    with open(path, 'wb') as f:
        nrml.write([node0], f, gml=gml)
예제 #2
0
def write_source_model(dest, sources_or_groups, name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        with open(dest, 'wb') as f:
            nrml.write([obj_to_node(sources_or_groups)], f, '%s')
        return
    if isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
    else:  # passed a list of sources
        srcs_by_trt = groupby(
            sources_or_groups, operator.attrgetter('tectonic_region_type'))
        groups = [sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
                  for trt in srcs_by_trt]
    name = name or os.path.splitext(os.path.basename(dest))[0]
    nodes = list(map(obj_to_node, sorted(groups)))
    attrs = {"name": name}
    if investigation_time is not None:
        attrs['investigation_time'] = investigation_time
    source_model = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    return dest
예제 #3
0
    def serialize(self, data):
        """
        Write a sequence of uniform hazard spectra to the specified file.

        :param data:
            Iterable of UHS data. Each datum must be an object with the
            following attributes:

            * imls: A sequence of Intensity Measure Levels
            * location: An object representing the location of the curve; must
              have `x` and `y` to represent lon and lat, respectively.
        """
        gml_ns = nrml.SERIALIZE_NS_MAP['gml']

        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')

            uh_spectra = et.SubElement(root, 'uniformHazardSpectra')

            _set_metadata(uh_spectra, self.metadata, _ATTR_MAP)

            periods_elem = et.SubElement(uh_spectra, 'periods')
            periods_elem.text = ' '.join([str(x)
                                          for x in self.metadata['periods']])

            for uhs in data:
                uhs_elem = et.SubElement(uh_spectra, 'uhs')
                gml_point = et.SubElement(uhs_elem, '{%s}Point' % gml_ns)
                gml_pos = et.SubElement(gml_point, '{%s}pos' % gml_ns)
                gml_pos.text = '%s %s' % (uhs.location.x, uhs.location.y)
                imls_elem = et.SubElement(uhs_elem, 'IMLs')
                imls_elem.text = ' '.join(['%10.7E' % x for x in uhs.imls])

            nrml.write(list(root), fh)
예제 #4
0
 def test_simple(self):
     testfile = os.path.join(testdir, 'two-point-sources.xml')
     sm = nrml.read(testfile).sourceModel
     update_source_model(sm, testfile)
     with io.BytesIO() as f:
         nrml.write(sm, f)
         got = f.getvalue().decode('utf-8')
         self.assertEqual(got, expected)
예제 #5
0
    def serialize(self, data):
        """
        :param data:

            A sequence of data where each datum has the following attributes:

            * matrix: N-dimensional numpy array containing the disaggregation
              histogram.
            * dim_labels: A list of strings which label the dimensions of a
              given histogram. For example, for a Magnitude-Distance-Epsilon
              histogram, we would expect `dim_labels` to be
              ``['Mag', 'Dist', 'Eps']``.
            * poe: The disaggregation Probability of Exceedance level for which
              these results were produced.
            * iml: Intensity measure level, interpolated from the source hazard
              curve at the given ``poe``.
        """

        with open(self.dest, 'wb') as fh, floatformat('%.6E'):
            root = et.Element('nrml')

            diss_matrices = et.SubElement(root, 'disaggMatrices')

            _set_metadata(diss_matrices, self.metadata, _ATTR_MAP)

            transform = lambda val: ', '.join(map(scientificformat, val))
            _set_metadata(diss_matrices, self.metadata, self.BIN_EDGE_ATTR_MAP,
                          transform=transform)

            for result in data:
                diss_matrix = et.SubElement(diss_matrices, 'disaggMatrix')

                # Check that we have bin edges defined for each dimension label
                # (mag, dist, lon, lat, eps, TRT)
                for label in result.dim_labels:
                    bin_edge_attr = self.DIM_LABEL_TO_BIN_EDGE_MAP.get(label)
                    assert self.metadata.get(bin_edge_attr) is not None, (
                        "Writer is missing '%s' metadata" % bin_edge_attr
                    )

                result_type = ','.join(result.dim_labels)
                diss_matrix.set('type', result_type)

                dims = ','.join(str(x) for x in result.matrix.shape)
                diss_matrix.set('dims', dims)

                diss_matrix.set('poE', scientificformat(result.poe))
                diss_matrix.set('iml', scientificformat(result.iml))

                for idxs, value in numpy.ndenumerate(result.matrix):
                    prob = et.SubElement(diss_matrix, 'prob')

                    index = ','.join([str(x) for x in idxs])
                    prob.set('index', index)
                    prob.set('value', scientificformat(value))

            nrml.write(list(root), fh)
예제 #6
0
 def test_complex(self):
     testfile = os.path.normpath(os.path.join(
         testdir, '../../../qa_tests_data/classical/case_30/ssm/shallow/'
         'gridded_seismicity_source_4.xml'))
     sm = nrml.read(testfile).sourceModel
     update_source_model(sm, testfile)
     with io.BytesIO() as f:
         nrml.write(sm, f)
         got = f.getvalue().decode('utf-8')
         self.assertEqual(got, multipoint)
def fix(fname, outname=None):
    root = nrml.read(fname)
    xmlns = root['xmlns']
    if xmlns == u'http://openquake.org/xmlns/nrml/0.4':
        for src_node in root.sourceModel:
            fix_source_node(src_node)
    else:  # nrml/0.5+
        for src_grp in root.sourceModel:
            for src_node in src_grp:
                fix_source_node(src_node)
    with open(outname or fname, 'wb') as out:
        nrml.write([root.sourceModel], out, xmlns=xmlns)
예제 #8
0
    def serialize(self, data):
        """
        Write a sequence of hazard curves to the specified file.

        :param data:
            Iterable of hazard curve data. Each datum must be an object with
            the following attributes:

            * poes: A list of probability of exceedence values (floats).
            * location: An object representing the location of the curve; must
              have `x` and `y` to represent lon and lat, respectively.
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            self.add_hazard_curves(root, self.metadata, data)
            nrml.write(list(root), fh)
예제 #9
0
    def serialize(self, data):
        """
        Write a sequence of hazard curves to the specified file.

        :param data:
            Iterable of hazard curve data. Each datum must be an object with
            the following attributes:

            * poes: A list of probability of exceedence values (floats).
            * location: An object representing the location of the curve; must
              have `x` and `y` to represent lon and lat, respectively.
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            self.add_hazard_curves(root, self.metadata, data)
            nrml.write(list(root), fh)
예제 #10
0
def write_source_model(dest, groups, name=None):
    """
    Writes a source model to XML.

    :param str dest:
        Destination path
    :param list groups:
        Source model as list of SourceGroups
    :param str name:
        Name of the source model (if missing, extracted from the filename)
    """
    name = name or os.path.splitext(os.path.basename(dest))[0]
    nodes = list(map(obj_to_node, sorted(groups)))
    source_model = Node("sourceModel", {"name": name}, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    return dest
예제 #11
0
def write_site_model(dest, sites, name=None):
    """
    Writes a site model to XML.

    :param str dest:
        Destination path
    :param SiteCollection sites:
        Site collection object of class SiteCollection
    :param str name:
        Name of the site model (if missing, extracted from the filename)
    """
    name = name or os.path.splitext(os.path.basename(dest))[0]
    nodes = list(map(obj_to_node, sorted(sites)))
    source_model = Node("siteModel", {"name": name}, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    return dest
예제 #12
0
    def serialize(self, data, fmt='%10.7E'):
        """
        Serialize a collection of ground motion fields to XML.

        :param data:
            An iterable of "GMF set" objects.
            Each "GMF set" object should:

            * have an `investigation_time` attribute
            * have an `stochastic_event_set_id` attribute
            * be iterable, yielding a sequence of "GMF" objects

            Each "GMF" object should:

            * have an `imt` attribute
            * have an `sa_period` attribute (only if `imt` is 'SA')
            * have an `sa_damping` attribute (only if `imt` is 'SA')
            * have a `event_id` attribute (to indicate which rupture
              contributed to this gmf)
            * be iterable, yielding a sequence of "GMF node" objects

            Each "GMF node" object should have:

            * a `gmv` attribute (to indicate the ground motion value
            * `lon` and `lat` attributes (to indicate the geographical location
              of the ground motion field)
        """
        gmf_set_nodes = []
        for gmf_set in data:
            gmf_set_node = Node('gmfSet')
            if gmf_set.investigation_time:
                gmf_set_node['investigationTime'] = str(
                    gmf_set.investigation_time)
            gmf_set_node['stochasticEventSetId'] = str(
                gmf_set.stochastic_event_set_id)
            gmf_set_node.nodes = gen_gmfs(gmf_set)
            gmf_set_nodes.append(gmf_set_node)

        gmf_container = Node('gmfCollection')
        gmf_container[SM_TREE_PATH] = self.sm_lt_path
        gmf_container[GSIM_TREE_PATH] = self.gsim_lt_path
        gmf_container.nodes = gmf_set_nodes

        with open(self.dest, 'wb') as dest:
            nrml.write([gmf_container], dest, fmt)
예제 #13
0
    def serialize(self, data, fmt='%10.7E'):
        """
        Serialize a collection of ground motion fields to XML.

        :param data:
            An iterable of "GMF set" objects.
            Each "GMF set" object should:

            * have an `investigation_time` attribute
            * have an `stochastic_event_set_id` attribute
            * be iterable, yielding a sequence of "GMF" objects

            Each "GMF" object should:

            * have an `imt` attribute
            * have an `sa_period` attribute (only if `imt` is 'SA')
            * have an `sa_damping` attribute (only if `imt` is 'SA')
            * have a `event_id` attribute (to indicate which rupture
              contributed to this gmf)
            * be iterable, yielding a sequence of "GMF node" objects

            Each "GMF node" object should have:

            * a `gmv` attribute (to indicate the ground motion value
            * `lon` and `lat` attributes (to indicate the geographical location
              of the ground motion field)
        """
        gmf_set_nodes = []
        for gmf_set in data:
            gmf_set_node = Node('gmfSet')
            if gmf_set.investigation_time:
                gmf_set_node['investigationTime'] = str(
                    gmf_set.investigation_time)
            gmf_set_node['stochasticEventSetId'] = str(
                gmf_set.stochastic_event_set_id)
            gmf_set_node.nodes = gen_gmfs(gmf_set)
            gmf_set_nodes.append(gmf_set_node)

        gmf_container = Node('gmfCollection')
        gmf_container[SM_TREE_PATH] = self.sm_lt_path
        gmf_container[GSIM_TREE_PATH] = self.gsim_lt_path
        gmf_container.nodes = gmf_set_nodes

        with open(self.dest, 'wb') as dest:
            nrml.write([gmf_container], dest, fmt)
예제 #14
0
def tidy(fnames):
    """
    Reformat a NRML file in a canonical form. That also means reducing the
    precision of the floats to a standard value. If the file is invalid,
    a clear error message is shown.
    """
    for fname in fnames:
        try:
            node = nrml.read(fname)
        except ValueError as err:
            print(err)
            return
        with open(fname + '.bak', 'wb') as f:
            f.write(open(fname, 'rb').read())
        with open(fname, 'wb') as f:
            # make sure the xmlns i.e. the NRML version is unchanged
            nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns'])
        print('Reformatted %s, original left in %s.bak' % (fname, fname))
예제 #15
0
def main(fnames):
    """
    Reformat a NRML file in a canonical form. That also means reducing the
    precision of the floats to a standard value. If the file is invalid,
    a clear error message is shown.
    """
    for fname in fnames:
        try:
            node = nrml.read(fname)
        except ValueError as err:
            print(err)
            return
        with open(fname + '.bak', 'wb') as f:
            f.write(open(fname, 'rb').read())
        with open(fname, 'wb') as f:
            # make sure the xmlns i.e. the NRML version is unchanged
            nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns'])
        print('Reformatted %s, original left in %s.bak' % (fname, fname))
예제 #16
0
def export_maxloss_ruptures(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    rlzs_by_gsim = dstore['csm_info'].get_rlzs_by_gsim_grp()
    num_ses = oq.ses_per_logic_tree_path
    fnames = []
    for loss_type in oq.loss_dt().names:
        ebr = getters.get_maxloss_rupture(dstore, loss_type)
        root = hazard_writers.rupture_to_element(
            ebr.export(rlzs_by_gsim[ebr.grp_id], num_ses))
        dest = dstore.export_path('rupture-%s.xml' % loss_type)
        with open(dest, 'wb') as fh:
            nrml.write(list(root), fh)
        fnames.append(dest)
    return fnames
예제 #17
0
파일: risk.py 프로젝트: gem/oq-engine
def export_maxloss_ruptures(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    mesh = get_mesh(dstore['sitecol'])
    rlzs_by_gsim = dstore['csm_info'].get_rlzs_by_gsim_grp()
    num_ses = oq.ses_per_logic_tree_path
    fnames = []
    for loss_type in oq.loss_dt().names:
        ebr = getters.get_maxloss_rupture(dstore, loss_type)
        root = hazard_writers.rupture_to_element(
            ebr.export(mesh, rlzs_by_gsim[ebr.grp_id], num_ses))
        dest = dstore.export_path('rupture-%s.xml' % loss_type)
        with open(dest, 'wb') as fh:
            nrml.write(list(root), fh)
        fnames.append(dest)
    return fnames
예제 #18
0
def reduce(fname, reduction_factor):
    """
    Produce a submodel from `fname` by sampling the nodes randomly.
    Supports source models, site models and exposure models. As a special
    case, it is also able to reduce .csv files by sampling the lines.
    This is a debugging utility to reduce large computations to small ones.
    """
    if fname.endswith('.csv'):
        with open(fname) as f:
            all_lines = f.readlines()
        lines = random_filter(all_lines, reduction_factor)
        shutil.copy(fname, fname + '.bak')
        print('Copied the original file in %s.bak' % fname)
        with open(fname, 'wb') as f:
            for line in lines:
                f.write(encode(line))
        print('Extracted %d lines out of %d' % (len(lines), len(all_lines)))
        return
    node = nrml.read(fname)
    model = node[0]
    if model.tag.endswith('exposureModel'):
        total = len(model.assets)
        model.assets.nodes = random_filter(model.assets, reduction_factor)
        num_nodes = len(model.assets)
    elif model.tag.endswith('siteModel'):
        total = len(model)
        model.nodes = random_filter(model, reduction_factor)
        num_nodes = len(model)
    elif model.tag.endswith('sourceModel'):
        if node['xmlns'] != 'http://openquake.org/xmlns/nrml/0.5':
            raise InvalidFile('%s: not NRML0.5' % fname)
        total = sum(len(sg) for sg in model)
        num_nodes = 0
        for sg in model:
            sg.nodes = random_filter(sg, reduction_factor)
            num_nodes += len(sg)
    else:
        raise RuntimeError('Unknown model tag: %s' % model.tag)
    shutil.copy(fname, fname + '.bak')
    print('Copied the original file in %s.bak' % fname)
    with open(fname, 'wb') as f:
        nrml.write([model], f, xmlns=node['xmlns'])
    print('Extracted %d nodes out of %d' % (num_nodes, total))
예제 #19
0
    def serialize(self, data):
        """
        Serialize hazard map data to XML.

        See :meth:`HazardMapWriter.serialize` for details about the expected
        input.
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            hazard_map = et.SubElement(root, 'hazardMap')
            _set_metadata(hazard_map, self.metadata, _ATTR_MAP)

            for lon, lat, iml in data:
                node = et.SubElement(hazard_map, 'node')
                node.set('lon', str(lon))
                node.set('lat', str(lat))
                node.set('iml', str(iml))

            nrml.write(list(root), fh)
예제 #20
0
    def serialize(self, data):
        """
        Serialize hazard map data to XML.

        See :meth:`HazardMapWriter.serialize` for details about the expected
        input.
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            hazard_map = et.SubElement(root, 'hazardMap')
            _set_metadata(hazard_map, self.metadata, _ATTR_MAP)

            for lon, lat, iml in data:
                node = et.SubElement(hazard_map, 'node')
                node.set('lon', str(lon))
                node.set('lat', str(lat))
                node.set('iml', str(iml))

            nrml.write(list(root), fh)
예제 #21
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model
    """
    found = 0
    to_remove = []
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    if src_node['id'] in source_ids:
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        if src_node['id'] in source_ids:
                            found += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.append(path)
    if found:
        for path in to_remove:
            os.remove(path)
예제 #22
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model
    """
    found = 0
    to_remove = set()
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    if src_node['id'] in source_ids:
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        if src_node['id'] in source_ids:
                            found += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.add(path)
    if found:
        for path in to_remove:
            os.remove(path)
예제 #23
0
 def write(self, destination, source_model, name=None):
     """
     Exports to NRML
     """
     if os.path.exists(destination):
         os.remove(destination)
     self.destination = destination
     if name:
         source_model.name = name
     output_source_model = Node("sourceModel", {"name": name})
     dic = groupby(source_model.sources,
                   operator.itemgetter('tectonicRegion'))
     for i, (trt, srcs) in enumerate(dic.items(), 1):
         output_source_model.append(
             Node('sourceGroup',
                  {'tectonicRegion': trt, 'name': 'group %d' % i},
                  nodes=srcs))
     print("Exporting Source Model to %s" % self.destination)
     with open(self.destination, "wb") as f:
         nrml.write([output_source_model], f, "%s")
예제 #24
0
def export_site_model(ekey, dstore):
    dest = dstore.export_path('site_model.xml')
    site_model_node = Node('siteModel')
    hdffields = 'lons lats vs30 vs30measured z1pt0 z2pt5 '.split()
    xmlfields = 'lon lat vs30 vs30Type z1pt0 z2pt5'.split()
    recs = [tuple(rec[f] for f in hdffields)
            for rec in dstore['sitecol'].array]
    unique_recs = sorted(set(recs))
    for rec in unique_recs:
        n = Node('site')
        for f, hdffield in enumerate(hdffields):
            xmlfield = xmlfields[f]
            if hdffield == 'vs30measured':
                value = 'measured' if rec[f] else 'inferred'
            else:
                value = rec[f]
            n[xmlfield] = value
        site_model_node.append(n)
    with open(dest, 'wb') as f:
        nrml.write([site_model_node], f)
    return [dest]
예제 #25
0
def export_site_model(ekey, dstore):
    dest = dstore.export_path('site_model.xml')
    site_model_node = Node('siteModel')
    hdf2xml = dict(lons='lon', lats='lat', depths='depth',
                   vs30measured='vs30Type')
    for rec in dstore['sitecol'].array:
        n = Node('site')
        for hdffield in rec.dtype.names:
            if hdffield == 'sids':  # skip
                continue
            elif hdffield == 'depth' and rec[hdffield] == 0:
                continue
            xmlfield = hdf2xml.get(hdffield, hdffield)
            if hdffield == 'vs30measured':
                value = 'measured' if rec[hdffield] else 'inferred'
            else:
                value = rec[hdffield]
            n[xmlfield] = value
        site_model_node.append(n)
    with open(dest, 'wb') as f:
        nrml.write([site_model_node], f)
    return [dest]
예제 #26
0
def renumber_sm(smlt_file):
    """
    Renumber the sources belonging to the same source model, even if split
    in multiple files, to avoid duplicated source IDs. NB: it changes the
    XML files in place, without making a backup, so be careful.
    """
    logging.basicConfig(level=logging.INFO)
    smpaths = logictree.collect_info(smlt_file).smpaths
    smap = parallel.Starmap(read_sm, [(path, ) for path in smpaths])
    smodel, srcs = {}, []
    for sm, fname, sources in smap:
        smodel[fname] = sm
        srcs.extend(sources)
    parallel.Starmap.shutdown()
    dic = general.groupby(srcs, operator.attrgetter('value'))
    n = 1
    for sources in dic.values():
        for src in sources:
            src.node['id'] = str(n)
        n += 1
    for fname, root in smodel.items():
        logging.info('Saving %s', fname)
        with open(fname, 'wb') as f:
            nrml.write(root, f, xmlns=root['xmlns'])
예제 #27
0
def upgrade_nrml(directory, dry_run, multipoint):
    """
    Upgrade all the NRML files contained in the given directory to the latest
    NRML version. Works by walking all subdirectories.
    WARNING: there is no downgrade!
    """
    for cwd, dirs, files in os.walk(directory):
        for f in files:
            path = os.path.join(cwd, f)
            if f.endswith('.xml'):
                ip = iterparse(path, events=('start',))
                next(ip)  # read node zero
                try:
                    fulltag = next(ip)[1].tag  # tag of the first node
                    xmlns, tag = fulltag.split('}')
                except Exception:  # not a NRML file
                    xmlns, tag = '', ''
                if xmlns[1:] == NRML05:  # already upgraded
                    if 'sourceModel' in tag and multipoint:
                        print('upgrading to multiPointSources', path)
                        node0 = nrml.read(path)[0]
                        sourceconverter.update_source_model(node0, path)
                        with open(path, 'wb') as f:
                            nrml.write([node0], f, gml=True)
                elif 'nrml/0.4' in xmlns and (
                        'vulnerability' in tag or 'fragility' in tag or
                        'sourceModel' in tag):
                    if not dry_run:
                        print('Upgrading', path)
                        try:
                            upgrade_file(path, multipoint)
                        except Exception as exc:
                            raise
                            print(exc)
                    else:
                        print('Not upgrading', path)
예제 #28
0
def main(directory, dry_run=False, multipoint=False):
    """
    Upgrade all the NRML files contained in the given directory to the latest
    NRML version. Works by walking all subdirectories.
    WARNING: there is no downgrade!
    """
    for cwd, dirs, files in os.walk(directory):
        for f in files:
            path = os.path.join(cwd, f)
            if f.endswith('.xml'):
                ip = iterparse(path, events=('start', ))
                next(ip)  # read node zero
                try:
                    fulltag = next(ip)[1].tag  # tag of the first node
                    xmlns, tag = fulltag.split('}')
                except Exception:  # not a NRML file
                    xmlns, tag = '', ''
                if xmlns[1:] == NRML05:  # already upgraded
                    if 'sourceModel' in tag and multipoint:
                        print('upgrading to multiPointSources', path)
                        node0 = nrml.read(path)[0]
                        sourceconverter.update_source_model(node0, path)
                        with open(path, 'wb') as f:
                            nrml.write([node0], f, gml=True)
                elif 'nrml/0.4' in xmlns and ('vulnerability' in tag
                                              or 'fragility' in tag
                                              or 'sourceModel' in tag):
                    if not dry_run:
                        print('Upgrading', path)
                        try:
                            upgrade_file(path, multipoint)
                        except Exception as exc:
                            raise
                            print(exc)
                    else:
                        print('Not upgrading', path)
예제 #29
0
def write_source_model(dest,
                       sources_or_groups,
                       name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        with open(dest, 'wb') as f:
            nrml.write([obj_to_node(sources_or_groups)], f, '%s')
        return
    if isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
    else:  # passed a list of sources
        srcs_by_trt = groupby(sources_or_groups,
                              operator.attrgetter('tectonic_region_type'))
        groups = [
            sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
            for trt in srcs_by_trt
        ]
    name = name or os.path.splitext(os.path.basename(dest))[0]
    nodes = list(map(obj_to_node, sorted(groups)))
    attrs = {"name": name}
    if investigation_time is not None:
        attrs['investigation_time'] = investigation_time
    source_model = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    return dest
예제 #30
0
def run_smoothing(grid_lims, config, catalogue, completeness_table, map_config,
                  run):
    """Run all the smoothing
    :params config:
        Dictionary of configuration parameters.
        For more info see helmstetter_werner_2012 code 
        and docs.
    """

    completeness_string = 'comp'
    for ym in completeness_table:
        completeness_string += '_%i_%.1f' % (ym[0], ym[1])
    smoother_filename = "Australia_Adaptive_K%i_b%.3f_mmin%.1f_%s.csv" % (
        config['k'], config['bvalue'], config['mmin'], completeness_string)

    filename = smoother_filename[:-4] + '.xml'
    if os.path.exists(filename) and not overwrite:
        print '%s already created, not overwriting!' % filename
        return

    smoother = h_w.HelmstetterEtAl2007(grid_lims,
                                       config,
                                       catalogue,
                                       storage_file=("Aus1_tmp2%.3f_%s.hdf5" %
                                                     (config['bvalue'], run)))
    smoother._get_catalogue_completeness_weights(completeness_table)
    smoother.build_distance_arrays()
    smoother.build_catalogue_2_grid_array()
    # Exhaustive smoothing
    exhaustive = False
    if exhaustive == True:
        params, poiss_llh = smoother.exhaustive_smoothing(
            np.arange(2, 10, 1), np.arange(1.0e-6, 1.0e-5, 2.0e-6))
        print params, poiss_llh
        smoother.config["k"] = params[0]
        smoother.config["r_min"] = params[1]
    #print 'Exiting now, re-run using optimised parameters'
    #sys.exit()
    d_i = smoother.optimise_bandwidths()
    smoother.run_smoothing(config["r_min"], d_i)
    data = np.column_stack([smoother.grid, smoother.rates])
    np.savetxt(
        smoother_filename,
        data,
        #               np.column_stack([smoother.grid, smoother.rates]),
        delimiter=",",
        fmt=["%.4f", "%.4f", "%.8e"],
        header="longitude,latitude,rate")

    # Creating a basemap - input a cconfiguration and (if desired) a title
    title = 'Smoothed seismicity rate for learning \nperiod %i %i, K=%i, Mmin=%.1f' % (
        config['learning_start'], config['learning_end'], smoother.config['k'],
        smoother.config['mmin'])
    basemap1 = HMTKBaseMap(map_config, title)
    basemap1.m.drawmeridians(
        np.arange(map_config['min_lat'], map_config['max_lat'], 5))
    basemap1.m.drawparallels(
        np.arange(map_config['min_lon'], map_config['max_lon'], 5))
    # Adding the smoothed grip to the basemap
    sym = (2., 3., 'cx')
    x, y = basemap1.m(smoother.grid[:, 0], smoother.grid[:, 1])
    if smoother.config['mmin'] == 3.5:
        vmax = -1.0
    elif smoother.config['mmin'] == 4.0:
        vmax = -2.5
    else:
        vmax = -1.0
    basemap1.m.scatter(x,
                       y,
                       marker='s',
                       c=np.log10(smoother.rates),
                       cmap=plt.cm.coolwarm,
                       zorder=10,
                       lw=0,
                       vmin=-7.0,
                       vmax=vmax)
    basemap1.m.drawcoastlines(linewidth=1, zorder=50)  # Add coastline on top
    #basemap1.m.drawmeridians(np.arange(llat, ulat, 5))
    #basemap1.m.drawparallels(np.arange(llon, ulon, 5))
    plt.colorbar(label='Log10(Smoothed rate per cell)')
    #plt.colorbar()#label='log10(Smoothed rate per cell)')
    plt.legend()
    #basemap1.m.scatter(x, y, marker = 's', c = smoother.data[:,4], cmap = plt.cm.coolwarm, zorder=10)
    #basemap1.m.scatter([150],[22], marker='o')
    #basemap1.fig.show()

    #(smoother.data[0], smoother.data[1])
    #basemap1.add_catalogue(catalogue_depth_clean, erlay=False)
    figname = smoother_filename[:-4] + '_smoothed_rates_map.png'
    plt.savefig(figname)

    source_list = []
    #i=0
    min_mag = 4.5
    max_mag = 7.2
    # Read in data again to solve number fomatting issue in smoother.data
    # For some reason it just returns 0 for all a values
    #data = np.genfromtxt(smoother_filename, delimiter = ',', skip_header = 1)

    tom = PoissonTOM(
        50)  # Dummy temporal occurence model for building pt sources
    msr = Leonard2014_SCR()
    for j in range(len(data[:, 2])):
        identifier = 'ASS' + str(j) + '_' + str(run)
        name = 'Helmstetter' + str(j) + '_' + str(run)
        point = Point(data[j, 0], data[j, 1], 10)
        rate = data[j, 2]
        # Convert rate to a value
        aval = np.log10(rate) + config['bvalue'] * config["mmin"]

        mfd = TruncatedGRMFD(min_mag, max_mag, 0.1, aval, config['bvalue'])
        hypo_depth_dist = PMF([(0.5, 10.0), (0.25, 5.0), (0.25, 15.0)])
        nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                                (0.2, NodalPlane(90, 30, 90)),
                                (0.3, NodalPlane(180, 30, 90)),
                                (0.2, NodalPlane(270, 30, 90))])
        point_source = PointSource(identifier, name, 'Non_cratonic', mfd, 2,
                                   msr, 2.0, tom, 0.1, 20.0, point,
                                   nodal_plane_dist, hypo_depth_dist)
        source_list.append(point_source)

    mod_name = "Australia_Adaptive_K%i_b%.3f" % (smoother.config['k'],
                                                 smoother.config['bvalue'])
    nodes = list(map(obj_to_node, sorted(source_list)))
    source_model = Node("sourceModel", {"name": name}, nodes=nodes)
    with open(filename, 'wb') as f:
        nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
예제 #31
0
def expo2csv(job_ini):
    """
    Convert an exposure in XML format into CSV format
    """
    oq = readinput.get_oqparam(job_ini)
    exposure = readinput.get_exposure(oq)
    rows = []
    header = ['id', 'lon', 'lat', 'number']
    area = exposure.area['type'] != '?'
    if area:
        header.append('area')
    for costname in exposure.cost_types['name']:
        if costname != 'occupants':
            header.append(costname)
            if exposure.deductible_is_absolute is not None:
                header.append(costname + '-deductible')
            if exposure.insurance_limit_is_absolute is not None:
                header.append(costname + '-insured_limit')
    if exposure.retrofitted:
        header.append('retrofitted')
    header.extend(exposure.occupancy_periods)
    header.extend(exposure.tagcol.tagnames)
    for asset, asset_ref in zip(exposure.assets, exposure.asset_refs):
        row = [asset_ref.decode('utf8'), asset.location[0], asset.location[1],
               asset.number]
        if area:
            row.append(asset.area)
        for costname in exposure.cost_types['name']:
            if costname != 'occupants':
                row.append(asset.values[costname])
                if exposure.deductible_is_absolute is not None:
                    row.append(asset.deductibles[costname])
                if exposure.insurance_limit_is_absolute is not None:
                    row.append(asset.insurance_limits[costname])
        if exposure.retrofitted:
            row.append(asset._retrofitted)
        for time_event in exposure.occupancy_periods:
            row.append(asset.values['occupants_' + time_event])
        for tagname, tagidx in zip(exposure.tagcol.tagnames, asset.tagidxs):
            tags = getattr(exposure.tagcol, tagname)
            row.append(tags[tagidx])
        rows.append(row)

    with performance.Monitor('expo2csv') as mon:
        # save exposure data as csv
        csvname = oq.inputs['exposure'].replace('.xml', '.csv')
        print('Saving %s' % csvname)
        with codecs.open(csvname, 'wb', encoding='utf8') as f:
            writer = csv.writer(f)
            writer.writerow(header)
            for row in rows:
                writer.writerow(row)

        # save exposure header as xml
        head = nrml.read(oq.inputs['exposure'], stop='assets')
        xmlname = oq.inputs['exposure'].replace('.xml', '-header.xml')
        print('Saving %s' % xmlname)
        head[0].assets.text = os.path.basename(csvname)
        with open(xmlname, 'wb') as f:
            nrml.write(head, f)
    print(mon)
예제 #32
0
    def serialize(self, data):
        """
        Serialize a collection of loss curves.

        :param data:
            An iterable of loss curve objects. Each object should:

            * define an attribute `location`, which is itself an object
              defining two attributes, `x` containing the longitude value
              and `y` containing the latitude value.
            * define an attribute `asset_ref`, which contains the unique
              identifier of the asset related to the loss curve.
            * define an attribute `poes`, which is a list of floats
              describing the probabilities of exceedance.
            * define an attribute `losses`, which is a list of floats
              describing the losses.
            * define an attribute `loss_ratios`, which is a list of floats
              describing the loss ratios.
            * define an attribute `average_loss`, which is a float
              describing the average loss associated to the loss curve
            * define an attribute `stddev_loss`, which is a float
              describing the standard deviation of losses if the loss curve
              has been computed with an event based approach. Otherwise,
              it is None

            All attributes must be defined, except for `loss_ratios` that
            can be `None` since it is optional in the schema.

            Also, `poes`, `losses` and `loss_ratios` values must be indexed
            coherently, i.e.: the loss (and optionally loss ratio) at index
            zero is related to the probability of exceedance at the same
            index.
        """

        _assert_valid_input(data)

        with open(self._dest, 'wb') as output:
            root = et.Element("nrml")

            for curve in data:
                if self._loss_curves is None:
                    self._create_loss_curves_elem(root)

                loss_curve = et.SubElement(self._loss_curves, "lossCurve")

                _append_location(loss_curve, curve.location)
                loss_curve.set("assetRef", curve.asset_ref)

                poes = et.SubElement(loss_curve, "poEs")
                poes.text = " ".join(FIVEDIGITS % p for p in curve.poes
                                     if notnan(p))

                losses = et.SubElement(loss_curve, "losses")
                losses.text = " ".join(FIVEDIGITS % p for p in curve.losses
                                       if notnan(p))

                if curve.loss_ratios is not None:
                    loss_ratios = et.SubElement(loss_curve, "lossRatios")

                    loss_ratios.text = " ".join(
                        ['%.3f' % p for p in curve.loss_ratios if notnan(p)])

                losses = et.SubElement(loss_curve, "averageLoss")
                losses.text = FIVEDIGITS % curve.average_loss

                if curve.stddev_loss is not None:
                    losses = et.SubElement(loss_curve, "stdDevLoss")
                    losses.text = FIVEDIGITS % curve.stddev_loss

            nrml.write(list(root), output)
예제 #33
0
    for col_str in header:
        col = col_str.strip().split(':')
        n = len(col)
        if n == 1:  # default dtype and no shape
            col = [col[0], 'float32', '']
        elif n == 2:
            if castable_to_int(col[1]):  # default dtype and shape
                col = [col[0], 'float32', col[1]]
            else:  # dtype and no shape
                col = [col[0], col[1], '']
        elif n > 3:
            raise ValueError('Invalid column description: %s' % col_str)
        field = col[0]
        numpytype = col[1]
        shape = () if not col[2].strip() else (int(col[2]),)
        triples.append((field, numpytype, shape))
        fields.append(field)
    return fields, numpy.dtype(triples)


if __name__ == '__main__':  # pretty print of NRML files
    import sys
    import shutil
    from openquake.hazardlib import nrml
    nrmlfiles = sys.argv[1:]
    for fname in nrmlfiles:
        node = nrml.read(fname)
        shutil.copy(fname, fname + '.bak')
        with open(fname, 'w') as out:
            nrml.write(list(node), out)
예제 #34
0
    def serialize(self, data):
        """
        Serialize an aggregation loss curve.

        :param data:
            An object representing an aggregate loss curve. This object should:

            * define an attribute `poes`, which is a list of floats
              describing the probabilities of exceedance.
            * define an attribute `losses`, which is a list of floats
              describing the losses.
            * define an attribute `average_loss`, which is a float
              describing the average loss associated to the loss curve
            * define an attribute `stddev_loss`, which is a float
              describing the standard deviation of losses if the loss curve
              has been computed with an event based approach. Otherwise, it
              is None

            Also, `poes`, `losses` values must be indexed coherently,
            i.e.: the loss at index zero is related to the probability
            of exceedance at the same index.
        """

        if data is None:
            raise ValueError("You can not serialize an empty document")

        with open(self._dest, 'wb') as output:
            root = et.Element("nrml")

            aggregate_loss_curve = et.SubElement(root, "aggregateLossCurve")

            aggregate_loss_curve.set("investigationTime",
                                     str(self._investigation_time))

            aggregate_loss_curve.set("riskInvestigationTime",
                                     str(self._risk_investigation_time))

            if self._source_model_tree_path is not None:
                aggregate_loss_curve.set("sourceModelTreePath",
                                         str(self._source_model_tree_path))

            if self._gsim_tree_path is not None:
                aggregate_loss_curve.set("gsimTreePath",
                                         str(self._gsim_tree_path))

            if self._statistics is not None:
                aggregate_loss_curve.set("statistics", str(self._statistics))

            if self._quantile_value is not None:
                aggregate_loss_curve.set("quantileValue",
                                         str(self._quantile_value))

            if self._unit is not None:
                aggregate_loss_curve.set("unit", str(self._unit))

            aggregate_loss_curve.set("lossType", self._loss_type)

            poes = et.SubElement(aggregate_loss_curve, "poEs")
            poes.text = " ".join(FIVEDIGITS % p for p in data.poes)

            losses = et.SubElement(aggregate_loss_curve, "losses")
            losses.text = " ".join([FIVEDIGITS % p for p in data.losses])

            losses = et.SubElement(aggregate_loss_curve, "averageLoss")
            losses.text = FIVEDIGITS % data.average_loss

            if data.stddev_loss is not None:
                losses = et.SubElement(aggregate_loss_curve, "stdDevLoss")
                losses.text = FIVEDIGITS % data.stddev_loss

            nrml.write(list(root), output)
예제 #35
0
def reduce_source_model(smlt_file, source_ids, remove=True):
    """
    Extract sources from the composite source model.

    :param smlt_file: path to a source model logic tree file
    :param source_ids: dictionary source_id -> records (src_id, code)
    :param remove: if True, remove sm.xml files containing no sources
    :returns: the number of sources satisfying the filter vs the total
    """
    if isinstance(source_ids, dict):  # in oq reduce_sm
        def ok(src_node):
            code = tag2code[re.search(r'\}(\w\w)', src_node.tag).group(1)]
            arr = source_ids.get(src_node['id'])
            if arr is None:
                return False
            return (arr['code'] == code).any()
    else:  # list of source IDs, in extract_source
        def ok(src_node):
            return src_node['id'] in source_ids

    good, total = 0, 0
    to_remove = set()
    for paths in logictree.collect_info(smlt_file).smpaths.values():
        for path in paths:
            logging.info('Reading %s', path)
            root = nrml.read(path)
            model = Node('sourceModel', root[0].attrib)
            origmodel = root[0]
            if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4':
                for src_node in origmodel:
                    total += 1
                    if ok(src_node):
                        good += 1
                        model.nodes.append(src_node)
            else:  # nrml/0.5
                for src_group in origmodel:
                    sg = copy.copy(src_group)
                    sg.nodes = []
                    weights = src_group.get('srcs_weights')
                    if weights:
                        assert len(weights) == len(src_group.nodes)
                    else:
                        weights = [1] * len(src_group.nodes)
                    src_group['srcs_weights'] = reduced_weigths = []
                    for src_node, weight in zip(src_group, weights):
                        total += 1
                        if ok(src_node):
                            good += 1
                            sg.nodes.append(src_node)
                            reduced_weigths.append(weight)
                    if sg.nodes:
                        model.nodes.append(sg)
            shutil.copy(path, path + '.bak')
            if model:
                with open(path, 'wb') as f:
                    nrml.write([model], f, xmlns=root['xmlns'])
            elif remove:  # remove the files completely reduced
                to_remove.add(path)
    if good:
        for path in to_remove:
            os.remove(path)
    return good, total
예제 #36
0
# this is simple and without error checking for the moment
def read_array(fname, sep=','):
    r"""
    Convert a CSV file without header into a numpy array of floats.

    >>> from openquake.baselib.general import writetmp
    >>> print(read_array(writetmp('.1 .2, .3 .4, .5 .6\n')))
    [[[ 0.1  0.2]
      [ 0.3  0.4]
      [ 0.5  0.6]]]
    """
    with open(fname) as f:
        records = []
        for line in f:
            row = line.split(sep)
            record = [list(map(float, col.split())) for col in row]
            records.append(record)
        return numpy.array(records)


if __name__ == '__main__':  # pretty print of NRML files
    import sys
    import shutil
    from openquake.hazardlib import nrml
    nrmlfiles = sys.argv[1:]
    for fname in nrmlfiles:
        node = nrml.read(fname)
        shutil.copy(fname, fname + '.bak')
        with open(fname, 'w') as out:
            nrml.write(list(node), out)
예제 #37
0
    point_source = PointSource(identifier, name, 'Non_cratonic', mfd, 2, msr,
                               2.0, tom, 0.1, 20.0, point, nodal_plane_dist,
                               hypo_depth_dist)
    source_list.append(point_source)
#    i+=1
#    if j==1000:
#        break

filename = "smoothed_frankel_50_3_mmin_%.1f_b%.3f_0.1.xml" % (
    completeness_table_a[0][-1], bvalue)
mod_name = 'smoothed_frankel_50_3_mmin_%.1f_b%.3f_0.1' % (
    completeness_table_a[0][-1], bvalue)
nodes = list(map(obj_to_node, sorted(source_list)))
source_model = Node("sourceModel", {"name": name}, nodes=nodes)
with open(filename, 'wb') as f:
    nrml.write([source_model], f, '%s', xmlns=NAMESPACE)

#source_model = mtkSourceModel(identifier=0, name='Frankel_50_3',
#                              sources = source_list)
#source_model.serialise_to_nrml('smoothed_frankel_50_3_mmin_%.1f_b%.3f_0.1_full.xml' % (completeness_table_a[0][-1], bvalue))

# In[ ]:

# Map configuration
#llon, ulon, llat, ulat = source.catalogue.get_bounding_box()
#map_config = {'min_lon': np.floor(llon), 'max_lon': np.ceil(ulon),
#              'min_lat': np.floor(llat), 'max_lat': np.ceil(ulat), 'resolution':'c'}
#map_config = {'min_lon': np.floor(105), 'max_lon': np.ceil(155),
#              'min_lat': np.floor(-45), 'max_lat': np.ceil(-9), 'resolution':'c'}
# Creating a basemap - input a cconfiguration and (if desired) a title
#basemap1 = HMTKBaseMap(map_config, 'Smoothed seismicity rate')
예제 #38
0
def write_source_model(dest,
                       sources_or_groups,
                       name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        groups = sources_or_groups.src_groups
        attrs = dict(name=sources_or_groups.name,
                     investigation_time=sources_or_groups.investigation_time)
    elif isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
        attrs = dict(investigation_time=investigation_time)
    else:  # passed a list of sources
        srcs_by_trt = groupby(sources_or_groups,
                              operator.attrgetter('tectonic_region_type'))
        groups = [
            sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
            for trt in srcs_by_trt
        ]
        attrs = dict(investigation_time=investigation_time)
    if name or 'name' not in attrs:
        attrs['name'] = name or os.path.splitext(os.path.basename(dest))[0]
    if attrs['investigation_time'] is None:
        del attrs['investigation_time']
    nodes = list(map(obj_to_node, groups))
    ddict = extract_ddict(groups)
    if ddict:
        # remove duplicate content from nodes
        for grp_node in nodes:
            for src_node in grp_node:
                if src_node["id"] in ddict:
                    src_node.nodes = []
        # save HDF5 file
        dest5 = os.path.splitext(dest)[0] + '.hdf5'
        with hdf5.File(dest5, 'w') as h:
            for src_id, dic in ddict.items():
                for k, v in dic.items():
                    key = '%s/%s' % (src_id, k)
                    if isinstance(v, numpy.ndarray):
                        h.create_dataset(key,
                                         v.shape,
                                         v.dtype,
                                         compression='gzip',
                                         compression_opts=9)
                        h[key][:] = v
                    else:
                        h[key] = v

    source_model = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    if ddict:
        return [dest, dest5]
    else:
        return [dest]
예제 #39
0
def save_bak(fname, node, num_nodes, total):
    shutil.copy(fname, fname + '.bak')
    print('Copied the original file in %s.bak' % fname)
    with open(fname, 'wb') as f:
        nrml.write(node, f, xmlns=node['xmlns'])
    print('Extracted %d nodes out of %d' % (num_nodes, total))
예제 #40
0
def run_smoothing(grid_lims,
                  smoothing_config,
                  catalogue,
                  completeness_table,
                  map_config,
                  run,
                  overwrite=True):
    """Run all the smoothing
    """
    ystart = completeness_table[-1][0]
    yend = catalogue.end_year
    catalogue_comp = deepcopy(catalogue)
    # Ensuring that catalogue is cleaned of earthquakes outside of
    # completeness period
    index = catalogue_comp.data['year'] >= ystart
    catalogue_comp.purge_catalogue(index)

    completeness_string = 'comp'
    for ym in completeness_table:
        completeness_string += '_%i_%.1f' % (ym[0], ym[1])
    smoother_filename = 'Australia_Fixed_%i_%i_b%.3f_mmin_%.1f_0.1%s.csv' % (
        smoothing_config["BandWidth"], smoothing_config["Length_Limit"],
        bvalue, completeness_table[0][1], completeness_string)
    filename = smoother_filename[:-4] + '.xml'
    if os.path.exists(filename) and not overwrite:
        print '%s already created, not overwriting!' % filename
        return
    smoother = SmoothedSeismicity(
        [105., 160., 0.1, -47., -5, 0.1, 0., 20., 20.],
        bvalue=smoothing_config['bvalue'])
    print 'Running smoothing'
    smoothed_grid = smoother.run_analysis(
        catalogue_comp,
        smoothing_config,
        completeness_table=completeness_table)

    smoother.write_to_csv(smoother_filename)

    from openquake.hazardlib.nrml import SourceModelParser, write, NAMESPACE
    from openquake.baselib.node import Node
    from openquake.hazardlib import nrml
    from openquake.hazardlib.sourcewriter import obj_to_node
    # Build nrml input file of point sources
    source_list = []
    #i=0
    min_mag = 4.5
    max_mag = 7.8
    bval = bvalue  # just define as 1 for time being
    # Read in data again to solve number fomatting issue in smoother.data
    # For some reason it just returns 0 for all a values
    try:
        data = np.genfromtxt(smoother_filename, delimiter=',', skip_header=1)
    except ValueError:
        print 'Something wrong with file %s' % smoother_filename
        sys.exit()
    tom = PoissonTOM(
        50)  # Dummy temporal occurence model for building pt sources
    msr = Leonard2014_SCR()
    for j in range(len(data[:, 4])):
        #    print smoother.data[j,:]
        identifier = 'FSS' + str(j) + '_' + str(run)
        name = 'Frankel' + str(j) + '_' + str(run)
        point = Point(data[j, 0], data[j, 1], data[j, 2])
        annual_rate = data[j, 4] / (yend - ystart + 1)
        aval = np.log10(annual_rate) + smoothing_config[
            'bvalue'] * completeness_table[0][1]
        mfd = TruncatedGRMFD(min_mag, max_mag, 0.1, aval, bval)
        hypo_depth_dist = PMF([(0.5, 10.0), (0.25, 5.0), (0.25, 15.0)])
        nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                                (0.2, NodalPlane(90, 30, 90)),
                                (0.3, NodalPlane(180, 30, 90)),
                                (0.2, NodalPlane(270, 30, 90))])
        point_source = PointSource(identifier, name, 'Non_cratonic', mfd, 2,
                                   msr, 2.0, tom, 0.1, 20.0, point,
                                   nodal_plane_dist, hypo_depth_dist)
        source_list.append(point_source)

    nodes = list(map(obj_to_node, sorted(source_list)))
    source_model = Node("sourceModel", {"name": name}, nodes=nodes)
    with open(filename, 'wb') as f:
        nrml.write([source_model], f, '%s', xmlns=NAMESPACE)

    # Creating a basemap - input a cconfiguration and (if desired) a title
    title = 'Smoothed seismicity rate for learning \nperiod %i 2017, Mmin = %.1f' % (
        completeness_table[0][0], completeness_table[0][1])
    basemap1 = HMTKBaseMap(map_config, 'Smoothed seismicity rate')
    # Adding the smoothed grip to the basemap
    sym = (2., 3., 'cx')
    x, y = basemap1.m(smoother.data[:, 0], smoother.data[:, 1])
    basemap1.m.scatter(x,
                       y,
                       marker='s',
                       c=np.log10(smoother.data[:, 4]),
                       cmap=plt.cm.coolwarm,
                       zorder=10,
                       lw=0,
                       vmin=-6.5,
                       vmax=1.5)
    basemap1.m.drawcoastlines(linewidth=1, zorder=50)  # Add coastline on top
    basemap1.m.drawmeridians(
        np.arange(map_config['min_lat'], map_config['max_lat'], 5))
    basemap1.m.drawparallels(
        np.arange(map_config['min_lon'], map_config['max_lon'], 5))
    plt.colorbar(label='log10(Smoothed rate per cell)')
    plt.legend()
    figname = smoother_filename[:-4] + '_smoothed_rates_map.png'
    plt.savefig(figname)
예제 #41
0
def pt2fault_distance(pt_sources,
                      fault_sources,
                      min_distance=5.0,
                      filename='source_model.xml',
                      buffer_distance=100.,
                      nrml_version='04',
                      name=None):
    """Calculate distances from a pt source rupture plane
    to the fault sources to then reduce Mmax on events that are 
    within a certain distance
    :param pt_sources:
        list of PointSource objects
    :param fault_sources:
        List of FaultSource objects
    :param min_distance:
        Minimum distance (km) within which we want a point source 
        rupture to be from a fault.
    :param filename:
        Name of output nrml file for revised pt source model
    :param buffer_distance:
        Km, initial filter to only process pts within this
        distance from the fault
    """

    if name is None:
        name = filename[:-4] + '_geom_filtered'
    id_index = 0  # We need to re-number all sources to avoid duplicate ids
    # Extract the points of the fault source mesh
    fault_lons = []
    fault_lats = []
    fault_depths = []
    for fault in fault_sources:
        whole_fault_surface = SimpleFaultSurface.from_fault_data(
            fault.fault_trace, fault.upper_seismogenic_depth,
            fault.lower_seismogenic_depth, fault.dip,
            fault.rupture_mesh_spacing)
        fault_lons.append(whole_fault_surface.mesh.lons.flatten())
        fault_lats.append(whole_fault_surface.mesh.lats.flatten())
        fault_depths.append(whole_fault_surface.mesh.depths.flatten())
    fault_lons = np.concatenate(fault_lons)
    fault_lats = np.concatenate(fault_lats)
    fault_depths = np.concatenate(fault_depths)
    min_fault_lon = np.min(fault_lons)
    max_fault_lon = np.max(fault_lons)
    min_fault_lat = np.min(fault_lats)
    max_fault_lat = np.max(fault_lats)

    # Generate ruptures for point sources
    minimum_distance_list = []
    revised_point_sources = {
        'Cratonic': [],
        'Non_cratonic': [],
        'Extended': [],
        'Banda': []
    }
    for pt in pt_sources:
        print 'Looping over point sources'
        # For speeding things up filter based on initial distances
        # to find points very far from or very close to a fault
        mfd_type = type(pt.mfd).__name__
        pt_depths = []
        for probs, depths in pt.hypocenter_distribution.data:
            pt_depths.append(depths)
        np_probs = []
        np_list = []
        for prob, nodal_plane in pt.nodal_plane_distribution.data:
            np_probs.append(prob)
            np_list.append(nodal_plane)
        centroid_distances = []
        for pt_depth in pt_depths:
            centroid_distances.append(
                distance(pt.location.longitude, pt.location.latitude, pt_depth,
                         fault_lons, fault_lats, fault_depths))
        centroid_distances = np.array(centroid_distances).flatten()
        #      print 'Minimum distance', min(centroid_distances)
        #      print 'Maximum distance', max(centroid_distances)
        if (min(centroid_distances)) > buffer_distance:
            # Keep point as it, not within buffer distance of any faults
            revised_point_sources[pt.tectonic_region_type].append(pt)
            continue
        if (min(centroid_distances)) < min_distance:
            # Discard point sources as too close to a fault
            print 'Discarding point source, too close to a fault'
            continue
        rupture_mags = []
        rupture_lons = []
        rupture_lats = []
        rupture_depths = []
        rupture_strikes = []
        rupture_dips = []
        ruptures = pt.iter_ruptures()
        for rupture in ruptures:
            rupture_mags.append(rupture.mag)
            rupture_lons.append(rupture.surface.corner_lons)
            rupture_lats.append(rupture.surface.corner_lats)
            rupture_depths.append(rupture.surface.corner_depths)
            rupture_strikes.append(rupture.surface.strike)
            rupture_dips.append(rupture.surface.dip)
        rupture_mags = np.array(rupture_mags).flatten()
        # make the same length as the corners
        rupture_mags = np.repeat(rupture_mags, 4)
        rupture_strikes = np.repeat(rupture_strikes, 4)
        rupture_dips = np.repeat(rupture_dips, 4)
        rupture_lons = np.array(rupture_lons).flatten()
        rupture_lats = np.array(rupture_lats).flatten()
        rupture_depths = np.array(rupture_depths).flatten()
        print 'Doing meshgrid'
        lons1, lons2 = np.meshgrid(fault_lons, rupture_lons)
        lats1, lats2 = np.meshgrid(fault_lats, rupture_lats)
        depths1, depths2 = np.meshgrid(fault_depths, rupture_depths)

        # Calculate distance from pt to all fault
        print 'Distance calculations'
        distances = distance(lons1, lats1, depths1, lons2, lats2, depths2)
        closest_distance_to_faults = np.min(distances)
        print 'Shortest pt to fault distance is', closest_distance_to_faults
        minimum_distance_list.append(closest_distance_to_faults)

        # Find where the distance is less than the threshold min_distance
        too_close_lons = lons2[np.where(distances < min_distance)]
        too_close_lats = lats2[np.where(distances < min_distance)]
        if too_close_lons.size > 0:
            lon_indices = np.where(np.in1d(rupture_lons, too_close_lons))[0]
            lat_indices = np.where(np.in1d(rupture_lats, too_close_lats))[0]
            too_close_mags = rupture_mags[np.intersect1d(
                lon_indices, lat_indices)]
            too_close_strikes = rupture_strikes[np.intersect1d(
                lon_indices, lat_indices)]
            too_close_dips = rupture_dips[np.intersect1d(
                lon_indices, lat_indices)]
            #    print 'Magnitudes of rupture close to fault', too_close_mags
            #    print 'Strikes of rupture close to fault', too_close_strikes
            #    print 'Dips of rupture close to fault', too_close_dips
            unique_strikes = np.unique(rupture_strikes)
            unique_dips = np.unique(rupture_dips)
            src_name_index = 0
            for prob, nodal_plane in pt.nodal_plane_distribution.data:
                id_index += 1
                src_name_index += 1
                # We are now splitting the source into many with different
                # combinations of Mmaxs and nodal planes
                new_pt = copy.deepcopy(pt)
                new_pt.source_id = "%i" % id_index
                new_pt.name = new_pt.name + ("_%i" % src_name_index)
                new_np = NodalPlane(nodal_plane.strike, nodal_plane.dip,
                                    nodal_plane.rake)
                new_np_distribution = PMF([
                    (1.0, new_np)
                ])  # weight of nodal plane is 1 as making
                # a separate source
                # Calculate new rates based on probability of original nodal plane
                new_pt.nodal_plane_distribution = new_np_distribution
                if mfd_type == 'TruncatedGRMFD':
                    b_val = pt.mfd.b_val
                    # rescale a value in log sapce
                    a_val = np.log10(np.power(10, pt.mfd.a_val) *
                                     prob)  #*area_src_weight))
                    new_pt.mfd.modify_set_ab(a_val, b_val)
                elif mfd_type == 'EvenlyDiscretizedMFD':
                    mag_bins, rates = zip(
                        *pt.mfd.get_annual_occurrence_rates())
                    mag_bins = np.array(mag_bins)
                    rates = np.array(rates)
                    new_rates = rates * prob  #*area_src_weight)
                    new_pt.mfd.modify_set_mfd(new_pt.mfd.min_mag,
                                              new_pt.mfd.bin_width,
                                              list(new_rates))
                else:
                    msg = 'Weighting method for mfd type %s not yet defined' % mfd_type
                    raise (msg)
                pair_index = np.where(
                    np.logical_and(too_close_strikes == nodal_plane.strike,
                                   too_close_dips == nodal_plane.dip))
                # Deal with intersecting cases
                if len(pair_index[0]) > 0:
                    intersecting_magnitudes = too_close_mags[pair_index]
                    minimum_magnitude_intersecting_fault = min(
                        intersecting_magnitudes)
                    if minimum_magnitude_intersecting_fault >= \
                            (pt.mfd.min_mag + pt.mfd.bin_width):
                        new_mmax = minimum_magnitude_intersecting_fault - \
                                pt.mfd.bin_width
                        if mfd_type == 'TruncatedGRMFD':
                            new_pt.mfd.max_mag = new_mmax
                        if mfd_type == 'EvenlyDiscretizedMFD':
                            trimmed_rates = new_rates[np.where(
                                mag_bins <= new_mmax)]
                    else:
                        print 'Minimum magnitude intersects fault, discarding source'
                        continue

                else:
                    pass
                # Append revised source for given nodal plane distribution to
                # list of revised sources
                print 'Appending revised source'
                revised_point_sources[pt.tectonic_region_type].append(new_pt)
        else:
            id_index += 1
            pt.source_id = "%i" % id_index
            'Appending original source'
            revised_point_sources[pt.tectonic_region_type].append(pt)
    if len(minimum_distance_list) > 0:
        print 'Overall minimum distance (km):', min(minimum_distance_list)

    # Write pts to source model on their own
    source_model_file = filename
    print 'Writing to source model file %s' % source_model_file
    if nrml_version == '04':
        source_list = []
        for trt, sources in revised_point_sources.iteritems():
            for source in sources:
                source_list.append(source)
        nodes = list(map(obj_to_node, sorted(source_list)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(source_model_file, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
    elif nrml_version == '05':
        source_group_list = []
        id = 0
        for trt, sources in revised_point_sources.iteritems():
            source_group = SourceGroup(trt, sources=sources, id=id)
            id += 1
            source_group_list.append(source_group)
        write_source_model(source_model_file, source_group_list, name=name)
    else:
        print 'Warning: nrml version not specfied, xml not created'

    # Write pts to source model with faults
    source_model_file = filename[:-4] + '_inc_faults.xml'
    name = name + '_inc_faults'
    write_combined_faults_points(revised_point_sources,
                                 fault_sources,
                                 source_model_file,
                                 name,
                                 nrml_version='04')
예제 #42
0
def write_combined_faults_points(point_sources,
                                 fault_sources,
                                 filename,
                                 name,
                                 nrml_version='04'):
    """Write pts and fault sources to file
    :param point_sources:
       list without trt or dict with trt key of point sources
    """
    print 'Writing to source model file %s' % filename
    ps_id_index = 1
    fs_id_index = 1
    if nrml_version == '04':
        if type(point_sources) == dict:
            source_list = []
            for trt, sources in point_sources.iteritems():
                for source in sources:
                    source.source_id = 'PS_%i' % ps_id_index
                    source_list.append(source)
                    ps_id_index += 1
#                    id_index = max(id_index, source.source_id)
        elif type(point_sources) == list:
            source_list = point_sources
            for source in source_list:
                source.source_id = 'PS_%i' % ps_id_index
                source_list.append(source)
                ps_id_index += 1


#                id_index = max(id_index, source.source_id)
        for fault_source in fault_sources:
            #            id_index += 1
            fault_source.source_id = "FS_%i" % fs_id_index
            fs_id_index += 1
            source_list.append(fault_source)
        nodes = list(map(obj_to_node, sorted(source_list)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(filename, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
    elif nrml_version == '05':
        if type(point_sources) == dict:
            source_group_list = []
            id = 0
            for trt, sources in point_sources.iteritems():
                for source in sources:
                    id_index = max(id_index, source.source_id)
            for trt, sources in point_sources.iteritems():
                for fault_source in fault_sources:
                    if fault_source.tectonic_region_type == trt:
                        id_index += 1
                        fault_source.source_id = "%i" % id_index
                        sources.append(fault_source)
                source_group = SourceGroup(trt, sources=sources, id=id)
                id += 1
                source_group_list.append(source_group)
            write_source_model(filename, source_group_list, name=name)
        elif type(point_sources) == list:
            msg = 'Method not yet implemented for nrml version 0.5'
            raise (msg)
    else:
        print 'Warning: nrml version not specfied, xml not created'
예제 #43
0
    def serialize(self, data, investigation_time):
        """
        Serialize a collection of stochastic event sets to XML.

        :param data:
            A dictionary src_group_id -> list of
            :class:`openquake.commonlib.calc.Rupture` objects.
            Each Rupture should have the following attributes:

            * `rupid`
            * `events_by_ses`
            * `magnitude`
            * `strike`
            * `dip`
            * `rake`
            * `tectonic_region_type`
            * `is_from_fault_source` (a `bool`)
            * `is_multi_surface` (a `bool`)
            * `lons`
            * `lats`
            * `depths`

            If `is_from_fault_source` is `True`, the rupture originated from a
            simple or complex fault sources. In this case, `lons`, `lats`, and
            `depths` should all be 2D arrays (of uniform shape). These
            coordinate triples represent nodes of the rupture mesh.

            If `is_from_fault_source` is `False`, the rupture originated from a
            point or area source. In this case, the rupture is represented by a
            quadrilateral planar surface. This planar surface is defined by 3D
            vertices. In this case, the rupture should have the following
            attributes:

            * `top_left_corner`
            * `top_right_corner`
            * `bottom_right_corner`
            * `bottom_left_corner`

            Each of these should be a triple of `lon`, `lat`, `depth`.

            If `is_multi_surface` is `True`, the rupture originated from a
            multi-surface source. In this case, `lons`, `lats`, and `depths`
            should have uniform length. The length should be a multiple of 4,
            where each segment of 4 represents the corner points of a planar
            surface in the following order:

            * top left
            * top right
            * bottom left
            * bottom right

            Each of these should be a triple of `lon`, `lat`, `depth`.

        :param investigation_time:
            Investigation time parameter specified in the job.ini
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            ses_container = et.SubElement(root, 'ruptureCollection')
            ses_container.set('investigationTime', str(investigation_time))
            for grp_id in sorted(data):
                attrs = dict(
                    id=grp_id,
                    tectonicRegion=data[grp_id][0].tectonic_region_type)
                sg = et.SubElement(ses_container, 'ruptureGroup', attrs)
                for rupture in data[grp_id]:
                    rupture_to_element(rupture, sg)
            nrml.write(list(root), fh)
예제 #44
0
    def serialize(self, data, investigation_time):
        """
        Serialize a collection of stochastic event sets to XML.

        :param data:
            A dictionary src_group_id -> list of
            :class:`openquake.commonlib.calc.Rupture` objects.
            Each Rupture should have the following attributes:

            * `rupid`
            * `events_by_ses`
            * `magnitude`
            * `strike`
            * `dip`
            * `rake`
            * `tectonic_region_type`
            * `is_from_fault_source` (a `bool`)
            * `is_multi_surface` (a `bool`)
            * `lons`
            * `lats`
            * `depths`

            If `is_from_fault_source` is `True`, the rupture originated from a
            simple or complex fault sources. In this case, `lons`, `lats`, and
            `depths` should all be 2D arrays (of uniform shape). These
            coordinate triples represent nodes of the rupture mesh.

            If `is_from_fault_source` is `False`, the rupture originated from a
            point or area source. In this case, the rupture is represented by a
            quadrilateral planar surface. This planar surface is defined by 3D
            vertices. In this case, the rupture should have the following
            attributes:

            * `top_left_corner`
            * `top_right_corner`
            * `bottom_right_corner`
            * `bottom_left_corner`

            Each of these should be a triple of `lon`, `lat`, `depth`.

            If `is_multi_surface` is `True`, the rupture originated from a
            multi-surface source. In this case, `lons`, `lats`, and `depths`
            should have uniform length. The length should be a multiple of 4,
            where each segment of 4 represents the corner points of a planar
            surface in the following order:

            * top left
            * top right
            * bottom left
            * bottom right

            Each of these should be a triple of `lon`, `lat`, `depth`.

        :param investigation_time:
            Investigation time parameter specified in the job.ini
        """
        with open(self.dest, 'wb') as fh:
            root = et.Element('nrml')
            ses_container = et.SubElement(root, 'ruptureCollection')
            ses_container.set('investigationTime', str(investigation_time))
            for grp_id in sorted(data):
                attrs = dict(
                    id=grp_id,
                    tectonicRegion=data[grp_id][0].tectonic_region_type)
                sg = et.SubElement(ses_container, 'ruptureGroup', attrs)
                for rupture in data[grp_id]:
                    rupture_to_element(rupture, sg)
            nrml.write(list(root), fh)
예제 #45
0
def fix_source_node(node):
    if node.tag.endswith('complexFaultSource'):
        geom = node.complexFaultGeometry
        top = geom.faultTopEdge
        intermediate = [edge for edge in geom.getnodes('intermediateEdge')]
        bottom = geom.faultBottomEdge
        edges = map(make_edge, [top] + intermediate + [bottom])
        try:
            ComplexFaultSurface.from_fault_data(edges, mesh_spacing=4.)
        except ValueError as excp:
            if AKI_RICH_ERR_MSG in str(excp):
                print(excp)
                print('Reverting edges ...')
                reverse(geom.faultTopEdge)
                reverse(geom.faultBottomEdge)
            elif WRONG_ORDER_ERR_MSG in str(excp):
                print(excp)
                print('reverting bottom edge ...')
                reverse(geom.faultBottomEdge)
            else:
                raise

if __name__ == '__main__':
    fname = sys.argv[1]
    src_model = node_from_xml(fname).sourceModel
    for src_node in src_model:
        fix_source_node(src_node)
    with open(fname, 'wb') as f:
        nrml.write([src_model], f, xmlns=nrml.NAMESPACE)
예제 #46
0
def combine_ss_models(filename_stem,
                      domains_shp,
                      params,
                      lt,
                      bval_key,
                      output_dir='./',
                      nrml_version='04',
                      weight=1.):  #, id_base = 'ASS'):
    """ Combine smoothed seismicity models based on tectonic region types
    :params filename_stem:
        String for the start of the xml filename for the source model,
        assuming generic components (non generic are inferred, 
        e.g. bvalue and completeness model)
    :params domains_shp:
        shapefile defining tectonic domain regions
    :params params:
        list of dicts containing parameters derivded from the shapefile
     :bval_key
         key for the dicts in params  as we are merging by 
         bvalues  (best, lower, upper)
    :params lt:
        LogicTree object containing relevant values and weights for Mmax
    :params outfile:
        output nrml formatted file
    """

    dsf = shapefile.Reader(domains_shp)
    dom_shapes = dsf.shapes()
    # Get indicies of relevant fields
    for i, f in enumerate(dsf.fields):
        if f[0] == 'CODE':
            code_index = i - 1
        if f[0] == 'TRT':
            trt_index = i - 1

    hypo_depth_dist_nc = PMF([(0.5, 10.0), (0.25, 5.0), (0.25, 15.0)])
    hypo_depth_dist_c = PMF([(0.5, 5.0), (0.25, 2.5), (0.25, 10.0)])
    hypo_depth_dist_ex = hypo_depth_dist_c
    hypo_depth_dict = {
        'Cratonic': hypo_depth_dist_c,
        'Non_cratonic': hypo_depth_dist_nc,
        'Extended': hypo_depth_dist_ex
    }
    # FIXME! - Temporary solution until nodal plan logic tree
    # info can be read directly from shapefile attributes
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])

    merged_pts = []

    # Get mmax values and weights
    mmaxs = {}
    mmaxs_w = {}
    for dom in params:
        print 'Processing source %s' % dom['CODE']
        print dom['TRT']
        if dom['TRT'] == 'NCratonic':
            dom['TRT'] = 'Non_cratonic'
        # For the moment, only consider regions within AUstralia
        if dom['TRT'] == 'Active' or dom['TRT'] == 'Interface' or \
                dom['TRT'] == 'Oceanic' or \
                dom['TRT'] == 'Intraslab' or dom['CODE'] == 'NECS' or \
                dom['CODE'] == 'NWO':
            print 'Source %s not on continental Australia, skipping' % dom[
                'CODE']
            continue
        elif dom['TRT'] == 'Cratonic':
            if dom['DOMAIN'] == 1:
                mmax_values, mmax_weights = lt.get_weights('Mmax', 'Archean')
            else:
                mmax_values, mmax_weights = lt.get_weights(
                    'Mmax', 'Proterozoic')


#        elif dom['TRT'] == 'Active':
#            print 'MMax logic tree not yet defined for active crust, using extended crust'
#            mmax_values, mmax_weights = lt.get_weights('Mmax', 'Extended')
        else:
            mmax_values, mmax_weights = lt.get_weights('Mmax', dom['TRT'])
        mmax_values = [float(i) for i in mmax_values]
        mmax_weights = [float(i) for i in mmax_weights]
        print mmax_values
        print mmax_weights
        mmaxs[dom['CODE']] = mmax_values
        mmaxs_w[dom['CODE']] = mmax_weights

        pt_ids = []
        #for trt, filename in filedict.iteritems():
        #    print trt
        completeness_string = 'comp'
        for ym in dom['COMPLETENESS']:
            completeness_string += '_%i_%.1f' % (ym[0], ym[1])
        mmin = dom['COMPLETENESS'][0][1]
        filename = "%s_b%.3f_mmin%.1f_%s.xml" % (filename_stem, dom[bval_key],
                                                 mmin, completeness_string)
        print 'Parsing %s' % filename

        # TA kluge - hardwire jdg547 path
        jdgpath = '/short/w84/NSHA18/sandpit/jdg547/NSHA2018/source_models/smoothed_seismicity/'

        print filename

        # Only keep points within domain
        pts = read_pt_source(filename)

        #        shapes = np.where(trt_types
        for shape in dsf.shapeRecords():
            #            print code_index
            print shape.record[code_index]
            if shape.record[code_index] == dom['CODE']:
                # Check for undefined depths (-999 values)
                if dom['DEP_BEST'] < 0:
                    print 'Setting best depth to 10 km'
                    dom['DEP_BEST'] = 10
                if dom['DEP_UPPER'] < 0:
                    print 'Setting upper depth to 5 km'
                    dom['DEP_UPPER'] = 5
                if dom['DEP_LOWER'] < 0:
                    print 'Setting lower depth to 15 km'
                    dom['DEP_LOWER'] = 15
                hypo_depth_dist = PMF([(0.5, dom['DEP_BEST']),
                                       (0.25, dom['DEP_LOWER']),
                                       (0.25, dom['DEP_UPPER'])])
                # Define nodal planes as thrusts except for special cases
                str1 = dom['SHMAX'] + 90.
                str2 = dom['SHMAX'] + 270.
                str3 = dom['SHMAX'] + dom['SHMAX_SIG'] + 90.
                str4 = dom['SHMAX'] + dom['SHMAX_SIG'] + 270.
                str5 = dom['SHMAX'] - dom['SHMAX_SIG'] + 90.
                str6 = dom['SHMAX'] - dom['SHMAX_SIG'] + 270.
                strikes = [str1, str2, str3, str4, str5, str6]
                for i, strike in enumerate(strikes):
                    if strike >= 360:
                        strikes[i] = strike - 360
        #           if strikes[i] >=360:
        #               strikes[i]=strikes[i]-360
                nodal_plan_dist = PMF([(0.34, NodalPlane(strikes[0], 30, 90)),
                                       (0.34, NodalPlane(strikes[1], 30, 90)),
                                       (0.08, NodalPlane(strikes[2], 30, 90)),
                                       (0.08, NodalPlane(strikes[3], 30, 90)),
                                       (0.08, NodalPlane(strikes[4], 30, 90)),
                                       (0.08, NodalPlane(strikes[5], 30, 90))])
                if dom['CODE'] == 'WARM' or dom['CODE'] == 'WAPM':
                    print 'Define special case for WARM'
                    nodal_plan_dist = PMF([
                        (0.75, NodalPlane(45, 90, 0)),
                        (0.125, NodalPlane(strikes[0], 30, 90)),
                        (0.125, NodalPlane(strikes[1], 30, 90))
                    ])
                if dom['CODE'] == 'FMLR':
                    print 'Define special case for FMLR, 0.5 thrust, 0.5 SS'
                    nodal_plan_dist = PMF([
                        (0.17, NodalPlane(strikes[0], 30, 90)),
                        (0.17, NodalPlane(strikes[1], 30, 90)),
                        (0.04, NodalPlane(strikes[2], 30, 90)),
                        (0.04, NodalPlane(strikes[3], 30, 90)),
                        (0.04, NodalPlane(strikes[4], 30, 90)),
                        (0.04, NodalPlane(strikes[5], 30, 90)),
                        (0.17, NodalPlane(strikes[0], 90, 0)),
                        (0.17, NodalPlane(strikes[1], 90, 0)),
                        (0.04, NodalPlane(strikes[2], 90, 0)),
                        (0.04, NodalPlane(strikes[3], 90, 0)),
                        (0.04, NodalPlane(strikes[4], 90, 0)),
                        (0.04, NodalPlane(strikes[5], 90, 0))
                    ])
                dom_poly = Polygon(shape.shape.points)
                for pt in pts:
                    pt_loc = Point(pt.location.x, pt.location.y)
                    if pt_loc.within(dom_poly):
                        pt.tectonic_region_type = dom['TRT']
                        pt.nodal_plane_distribution = nodal_plane_dist  # FIXME! update based on data extracted from shapefile
                        pt.hypocenter_distribution = hypo_depth_dist
                        pt.rupture_aspect_ratio = 2
                        mfd = pt.mfd
                        new_mfd = gr2inc_mmax(mfd, mmaxs[dom['CODE']],
                                              mmaxs_w[dom['CODE']], weight)
                        pt.mfd = new_mfd
                        if pt.source_id in pt_ids:
                            print 'Point source %s already exists!' % pt.source_id
                            print 'Skipping this source for trt %s' % zone_trt
                        else:
                            merged_pts.append(pt)
                            pt_ids.append(pt.source_id)

    outfile = "%s_%s.xml" % (filename_stem, bval_key)
    outfile = os.path.join(output_dir, outfile)
    name = outfile.rstrip('.xml')
    if nrml_version == '04':
        nodes = list(map(obj_to_node, sorted(merged_pts)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
    return outfile
예제 #47
0
def save_bak(fname, node, num_nodes, total):
    shutil.copy(fname, fname + '.bak')
    print('Copied the original file in %s.bak' % fname)
    with open(fname, 'wb') as f:
        nrml.write(node, f, xmlns=node['xmlns'])
    print('Extracted %d nodes out of %d' % (num_nodes, total))