Esempio n. 1
0
def upgrade_file(path, multipoint):
    """Upgrade to the latest NRML version"""
    node0 = nrml.read(path, chatty=False)[0]
    shutil.copy(path, path + '.bak')  # make a backup of the original file
    tag = striptag(node0.tag)
    gml = True
    if tag == 'vulnerabilityModel':
        vf_dict, cat_dict = get_vulnerability_functions_04(path)
        # below I am converting into a NRML 0.5 vulnerabilityModel
        node0 = Node(
            'vulnerabilityModel', cat_dict,
            nodes=[obj_to_node(val) for val in vf_dict.values()])
        gml = False
    elif tag == 'fragilityModel':
        node0 = read_nrml.convert_fragility_model_04(
            nrml.read(path)[0], path)
        gml = False
    elif tag == 'sourceModel':
        node0 = nrml.read(path)[0]
        dic = groupby(node0.nodes, operator.itemgetter('tectonicRegion'))
        node0.nodes = [Node('sourceGroup',
                            dict(tectonicRegion=trt, name="group %s" % i),
                            nodes=srcs)
                       for i, (trt, srcs) in enumerate(dic.items(), 1)]
        if multipoint:
            sourceconverter.update_source_model(node0, path + '.bak')
    with open(path, 'wb') as f:
        nrml.write([node0], f, gml=gml)
Esempio n. 2
0
def mfds2multimfd(mfds):
    """
    Convert a list of MFD nodes into a single MultiMFD node
    """
    _, kind = mfds[0].tag.split('}')
    node = Node('multiMFD', dict(kind=kind))
    lengths = None
    for field in mfd.multi_mfd.ASSOC[kind][1:]:
        alias = mfd.multi_mfd.ALIAS.get(field, field)
        if field in ('magnitudes', 'occurRates'):
            data = [~getattr(m, field) for m in mfds]
            lengths = [len(d) for d in data]
            data = sum(data, [])  # the list has to be flat
        else:
            try:
                data = [m[alias] for m in mfds]
            except KeyError:
                if alias == 'binWidth':
                    # missing bindWidth in GR MDFs is ok
                    continue
                else:
                    raise
        node.append(Node(field, text=data))
        if lengths:  # this is the last field if present
            node.append(Node('lengths', text=lengths))
    return node
Esempio n. 3
0
 def surface_nodes(self):
     """
     A single element list containing a planarSurface node
     """
     node = Node('planarSurface')
     for name, lon, lat, depth in zip(
             'topLeft topRight bottomLeft bottomRight'.split(),
             self.corner_lons, self.corner_lats, self.corner_depths):
         node.append(Node(name, dict(lon=lon, lat=lat, depth=depth)))
     return [node]
Esempio n. 4
0
def build_slip_list_node(slip_list):
    """
    :param slip_list:
       an array of shape (N, 2) with columns (slip, weight)
    :returns:
        a hypoList node containing N slip nodes
    """
    sliplist = Node('slipList', {})
    for row in slip_list:
        sliplist.append(
            Node('slip', dict(weight=row[1]), row[0]))
    return sliplist
Esempio n. 5
0
def build_hypo_list_node(hypo_list):
    """
    :param hypo_list:
       an array of shape (N, 3) with columns (alongStrike, downDip, weight)
    :returns:
        a hypoList node containing N hypo nodes
    """
    hypolist = Node('hypoList', {})
    for row in hypo_list:
        n = Node(
            'hypo', dict(alongStrike=row[0], downDip=row[1], weight=row[2]))
        hypolist.append(n)
    return hypolist
Esempio n. 6
0
    def serialize(self, data, fmt='%10.7E'):
        """
        Serialize a collection of ground motion fields to XML.

        :param data:
            An iterable of "GMF set" objects.
            Each "GMF set" object should:

            * have an `investigation_time` attribute
            * have an `stochastic_event_set_id` attribute
            * be iterable, yielding a sequence of "GMF" objects

            Each "GMF" object should:

            * have an `imt` attribute
            * have an `sa_period` attribute (only if `imt` is 'SA')
            * have an `sa_damping` attribute (only if `imt` is 'SA')
            * have a `event_id` attribute (to indicate which rupture
              contributed to this gmf)
            * be iterable, yielding a sequence of "GMF node" objects

            Each "GMF node" object should have:

            * a `gmv` attribute (to indicate the ground motion value
            * `lon` and `lat` attributes (to indicate the geographical location
              of the ground motion field)
        """
        gmf_set_nodes = []
        for gmf_set in data:
            gmf_set_node = Node('gmfSet')
            if gmf_set.investigation_time:
                gmf_set_node['investigationTime'] = str(
                    gmf_set.investigation_time)
            gmf_set_node['stochasticEventSetId'] = str(
                gmf_set.stochastic_event_set_id)
            gmf_set_node.nodes = gen_gmfs(gmf_set)
            gmf_set_nodes.append(gmf_set_node)

        gmf_container = Node('gmfCollection')
        gmf_container[SM_TREE_PATH] = self.sm_lt_path
        gmf_container[GSIM_TREE_PATH] = self.gsim_lt_path
        gmf_container.nodes = gmf_set_nodes

        with open(self.dest, 'wb') as dest:
            nrml.write([gmf_container], dest, fmt)
Esempio n. 7
0
def gen_gmfs(gmf_set):
    """
    Generate GMF nodes from a gmf_set
    :param gmf_set: a sequence of GMF objects with attributes
    imt, sa_period, sa_damping, event_id and containing a list
    of GMF nodes with attributes gmv and location. The nodes
    are sorted by lon/lat.
    """
    for gmf in gmf_set:
        gmf_node = Node('gmf')
        gmf_node['IMT'] = gmf.imt
        if gmf.imt == 'SA':
            gmf_node['saPeriod'] = str(gmf.sa_period)
            gmf_node['saDamping'] = str(gmf.sa_damping)
        gmf_node['ruptureId'] = gmf.event_id
        sorted_nodes = sorted(gmf)
        gmf_node.nodes = (
            Node('node', dict(gmv=n.gmv, lon=n.location.x, lat=n.location.y))
            for n in sorted_nodes)
        yield gmf_node
Esempio n. 8
0
def build_multi_mfd(mfd):
    """
    Parses the MultiMFD as a Node

    :param mfd:
        MFD as instance of :class:
        `openquake.hazardlib.mfd.multi_mfd.MultiMFD`
    :returns:
        Instance of :class:`openquake.baselib.node.Node`
    """
    node = Node("multiMFD", dict(kind=mfd.kind))
    for name in sorted(mfd.kwargs):
        values = mfd.kwargs[name]
        if name in ('magnitudes', 'occurRates'):
            values = sum(values, [])
        node.append(Node(name, text=values))
    if 'occurRates' in mfd.kwargs:
        lengths = [len(rates) for rates in mfd.kwargs['occurRates']]
        node.append(Node('lengths', text=lengths))
    return node
Esempio n. 9
0
 def _read_csv(self):
     """
     :yields: asset nodes
     """
     expected_header = self._csv_header()
     for fname in self.datafiles:
         with open(fname, encoding='utf-8') as f:
             fields = next(csv.reader(f))
             header = set(fields)
             if len(header) < len(fields):
                 raise InvalidFile(
                     '%s: The header %s contains a duplicated field' %
                     (fname, header))
             elif expected_header - header - {'exposure', 'country'}:
                 raise InvalidFile(
                     'Unexpected header in %s\nExpected: %s\nGot: %s' %
                     (fname, sorted(expected_header), sorted(header)))
     occupancy_periods = self.occupancy_periods.split()
     for fname in self.datafiles:
         with open(fname, encoding='utf-8') as f:
             for i, dic in enumerate(csv.DictReader(f), 1):
                 asset = Node('asset', lineno=i)
                 with context(fname, asset):
                     asset['id'] = dic['id']
                     asset['number'] = valid.positivefloat(dic['number'])
                     asset['taxonomy'] = dic['taxonomy']
                     if 'area' in dic:  # optional attribute
                         asset['area'] = dic['area']
                     loc = Node('location',
                                dict(lon=valid.longitude(dic['lon']),
                                     lat=valid.latitude(dic['lat'])))
                     costs = Node('costs')
                     for cost in self.cost_types['name']:
                         a = dict(type=cost, value=dic[cost])
                         if 'retrofitted' in dic:
                             a['retrofitted'] = dic['retrofitted']
                         costs.append(Node('cost', a))
                     occupancies = Node('occupancies')
                     for period in occupancy_periods:
                         a = dict(occupants=float(dic[period]),
                                  period=period)
                         occupancies.append(Node('occupancy', a))
                     tags = Node('tags')
                     for tagname in self.tagcol.tagnames:
                         if tagname not in (
                                 'taxonomy', 'exposure', 'country'):
                             tags.attrib[tagname] = dic[tagname]
                     asset.nodes.extend([loc, costs, occupancies, tags])
                 yield asset
Esempio n. 10
0
def complex_fault_node(edges):
    """
    :param edges: a list of lists of points
    :returns: a Node of kind complexFaultGeometry
    """
    node = Node('complexFaultGeometry')
    node.append(edge_node('faultTopEdge', edges[0]))
    for edge in edges[1:-1]:
        node.append(edge_node('intermediateEdge', edge))
    node.append(edge_node('faultBottomEdge', edges[-1]))
    return node
Esempio n. 11
0
def simple_fault_node(fault_trace, dip, upper_depth, lower_depth):
    """
    :param fault_trace: an object with an attribute .points
    :param dip: dip parameter
    :param upper_depth: upper seismogenic depth
    :param lower_depth: lower seismogenic depth
    :returns: a Node of kind simpleFaultGeometry
    """
    node = Node('simpleFaultGeometry')
    line = []
    for p in fault_trace.points:
        line.append(p.longitude)
        line.append(p.latitude)
    node.append(Node('gml:LineString', nodes=[Node('gml:posList', {}, line)]))
    node.append(Node('dip', {}, dip))
    node.append(Node('upperSeismoDepth', {}, upper_depth))
    node.append(Node('lowerSeismoDepth', {}, lower_depth))
    return node
Esempio n. 12
0
def convert_fragility_model_04(node, fname, fmcounter=itertools.count(1)):
    """
    :param node:
        an :class:`openquake.commonib.node.Node` in NRML 0.4
    :param fname:
        path of the fragility file
    :returns:
        an :class:`openquake.commonib.node.Node` in NRML 0.5
    """
    convert_type = {"lognormal": "logncdf"}
    new = Node('fragilityModel',
               dict(assetCategory='building',
                    lossCategory='structural',
                    id='fm_%d_converted_from_NRML_04' %
                    next(fmcounter)))
    with context(fname, node):
        fmt = node['format']
        descr = ~node.description
        limit_states = ~node.limitStates
    new.append(Node('description', {}, descr))
    new.append((Node('limitStates', {}, ' '.join(limit_states))))
    for ffs in node[2:]:
        IML = ffs.IML
        # NB: noDamageLimit = None is different than zero
        nodamage = ffs.attrib.get('noDamageLimit')
        ff = Node('fragilityFunction', {'format': fmt})
        ff['id'] = ~ffs.taxonomy
        ff['shape'] = convert_type[ffs.attrib.get('type', 'lognormal')]
        if fmt == 'continuous':
            with context(fname, IML):
                attr = dict(imt=IML['IMT'],
                            minIML=IML['minIML'],
                            maxIML=IML['maxIML'])
                if nodamage is not None:
                    attr['noDamageLimit'] = nodamage
                ff.append(Node('imls', attr))
            for ffc in ffs[2:]:
                with context(fname, ffc):
                    ls = ffc['ls']
                    param = ffc.params
                with context(fname, param):
                    m, s = param['mean'], param['stddev']
                ff.append(Node('params', dict(ls=ls, mean=m, stddev=s)))
        else:  # discrete
            with context(fname, IML):
                imls = ' '.join(map(str, (~IML)[1]))
                attr = dict(imt=IML['IMT'])
            if nodamage is not None:
                attr['noDamageLimit'] = nodamage
            ff.append(Node('imls', attr, imls))
            for ffd in ffs[2:]:
                ls = ffd['ls']
                with context(fname, ffd):
                    poes = ' '.join(map(str, ~ffd.poEs))
                ff.append(Node('poes', dict(ls=ls), poes))
        new.append(ff)
    return new
Esempio n. 13
0
    def _add_asset(self, idx, asset_node, param):
        values = {}
        deductibles = {}
        insurance_limits = {}
        retrofitteds = {}
        asset_id = asset_node['id'].encode('utf8')
        with context(param['fname'], asset_node):
            self.asset_refs.append(asset_id)
            taxonomy = asset_node['taxonomy']
            if 'damage' in param['calculation_mode']:
                # calculators of 'damage' kind require the 'number'
                # if it is missing a KeyError is raised
                number = asset_node['number']
            else:
                # some calculators ignore the 'number' attribute;
                # if it is missing it is considered 1, since we are going
                # to multiply by it
                try:
                    number = asset_node['number']
                except KeyError:
                    number = 1
                else:
                    if 'occupants' in param['all_cost_types']:
                        values['occupants_None'] = number
            location = asset_node.location['lon'], asset_node.location['lat']
            if param['region'] and not geometry.Point(*location).within(
                    param['region']):
                param['out_of_region'] += 1
                return
            tagnode = getattr(asset_node, 'tags', None)
            dic = {} if tagnode is None else tagnode.attrib.copy()
            with context(param['fname'], tagnode):
                dic['taxonomy'] = taxonomy
                idxs = self.tagcol.add_tags(dic)
        try:
            costs = asset_node.costs
        except AttributeError:
            costs = Node('costs', [])
        try:
            occupancies = asset_node.occupancies
        except AttributeError:
            occupancies = Node('occupancies', [])
        for cost in costs:
            with context(param['fname'], cost):
                cost_type = cost['type']
                if cost_type in param['relevant_cost_types']:
                    values[cost_type] = cost['value']
                    retrovalue = cost.get('retrofitted')
                    if retrovalue is not None:
                        retrofitteds[cost_type] = retrovalue
                    if param['insured_losses']:
                        deductibles[cost_type] = cost['deductible']
                        insurance_limits[cost_type] = cost['insuranceLimit']

        # check we are not missing a cost type
        missing = param['relevant_cost_types'] - set(values)
        if missing and missing <= param['ignore_missing_costs']:
            logging.warn('Ignoring asset %s, missing cost type(s): %s',
                         asset_id, ', '.join(missing))
            for cost_type in missing:
                values[cost_type] = None
        elif missing and 'damage' not in param['calculation_mode']:
            # missing the costs is okay for damage calculators
            with context(param['fname'], asset_node):
                raise ValueError("Invalid Exposure. "
                                 "Missing cost %s for asset %s" %
                                 (missing, asset_id))
        tot_occupants = 0
        for occupancy in occupancies:
            with context(param['fname'], occupancy):
                occupants = 'occupants_%s' % occupancy['period']
                values[occupants] = occupancy['occupants']
                tot_occupants += values[occupants]
        if occupancies:  # store average occupants
            values['occupants_None'] = tot_occupants / len(occupancies)
        area = float(asset_node.get('area', 1))
        ass = asset.Asset(idx, idxs, number, location, values, area,
                          deductibles, insurance_limits, retrofitteds,
                          self.cost_calculator)
        self.assets.append(ass)
Esempio n. 14
0
def _get_exposure(fname, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = exposure.occupancyPeriods.text or ''
    except AttributeError:
        occupancy_periods = ''
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ['id'] + (~tagNames or [])
    if set(tagnames) & {'taxonomy', 'exposure', 'country'}:
        raise InvalidFile('taxonomy, exposure and country are reserved names '
                          'you cannot use it in <tagNames>: %s' % fname)
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    retrofitted = False
    for ct in conversions.costTypes:
        with context(fname, ct):
            ctname = ct['name']
            if ctname == 'structural' and 'retrofittedType' in ct.attrib:
                if ct['retrofittedType'] != ct['type']:
                    raise ValueError(
                        'The retrofittedType %s is different from the type'
                        '%s' % (ct['retrofittedType'], ct['type']))
                if ct['retrofittedUnit'] != ct['unit']:
                    raise ValueError(
                        'The retrofittedUnit %s is different from the unit'
                        '%s' % (ct['retrofittedUnit'], ct['unit']))
                retrofitted = True
            cost_types.append(
                (ctname, valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    cc = CostCalculator({}, {}, {},
                        {name: i
                         for i, name in enumerate(tagnames)})
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    exp = Exposure(exposure['id'], exposure['category'], description.text,
                   cost_types, occupancy_periods, retrofitted, area.attrib, [],
                   [], cc, TagCollection(tagnames))
    assets_text = exposure.assets.text.strip()
    if assets_text:
        # the <assets> tag contains a list of file names
        dirname = os.path.dirname(fname)
        exp.datafiles = [os.path.join(dirname, f) for f in assets_text.split()]
    else:
        exp.datafiles = []
    return exp, exposure.assets
Esempio n. 15
0
def _pointsources2multipoints(srcs, i):
    allsources = []
    for key, sources in groupby(srcs, get_key).items():
        if len(sources) == 1:  # there is a single source
            allsources.extend(sources)
            continue
        msr, rar, usd, lsd, hd, npd = key
        mfds = [src[3] for src in sources]
        points = []
        for src in sources:
            points.extend(~src.pointGeometry.Point.pos)
        geom = Node('multiPointGeometry')
        geom.append(Node('gml:posList', text=points))
        geom.append(Node('upperSeismoDepth', text=usd))
        geom.append(Node('lowerSeismoDepth', text=lsd))
        node = Node(
            'multiPointSource',
            dict(id='mps-%d' % i, name='multiPointSource-%d' % i),
            nodes=[geom])
        node.append(Node("magScaleRel", text=msr))
        node.append(Node("ruptAspectRatio", text=rar))
        node.append(mfds2multimfd(mfds))
        node.append(Node('nodalPlaneDist', nodes=[
            Node('nodalPlane', dict(probability=prob, rake=rake,
                                    strike=strike, dip=dip))
            for prob, rake, strike, dip in npd]))
        node.append(Node('hypoDepthDist', nodes=[
            Node('hypoDepth', dict(depth=depth, probability=prob))
            for prob, depth in hd]))
        allsources.append(node)
        i += 1
    return i, allsources
Esempio n. 16
0
 def _read_csv(self, csvnames, dirname):
     """
     :param csvnames: names of csv files, space separated
     :param dirname: the directory where the csv files are
     :yields: asset nodes
     """
     expected_header = self._csv_header()
     fnames = [os.path.join(dirname, f) for f in csvnames.split()]
     for fname in fnames:
         with open(fname) as f:
             header = set(next(csv.reader(f)))
             if expected_header - header:
                 raise InvalidFile(
                     'Unexpected header in %s\nExpected: %s\nGot: %s' %
                     (fname, expected_header, header))
     for fname in fnames:
         with open(fname) as f:
             for i, dic in enumerate(csv.DictReader(f), 1):
                 asset = Node('asset', lineno=i)
                 with context(fname, asset):
                     asset['id'] = dic['id']
                     asset['number'] = float(dic['number'])
                     asset['taxonomy'] = dic['taxonomy']
                     if 'area' in dic:  # optional attribute
                         asset['area'] = dic['area']
                     loc = Node(
                         'location',
                         dict(lon=valid.longitude(dic['lon']),
                              lat=valid.latitude(dic['lat'])))
                     costs = Node('costs')
                     for cost in self.cost_types['name']:
                         a = dict(type=cost, value=dic[cost])
                         costs.append(Node('cost', a))
                     occupancies = Node('occupancies')
                     for period in self.occupancy_periods:
                         a = dict(occupants=dic[period], period=period)
                         occupancies.append(Node('occupancy', a))
                     tags = Node('tags')
                     for tagname in self.tagcol.tagnames:
                         if tagname != 'taxonomy':
                             tags[tagname] = dic[tagname]
                     asset.nodes.extend([loc, costs, occupancies, tags])
                     if i % 100000 == 0:
                         logging.info('Read %d assets', i)
                 yield asset
Esempio n. 17
0
def simple_fault_node(fault_trace, dip, upper_depth, lower_depth):
    """
    :param fault_trace: an object with an attribute .points
    :param dip: dip parameter
    :param upper_depth: upper seismogenic depth
    :param lower_depth: lower seismogenic depth
    :returns: a Node of kind simpleFaultGeometry
    """
    node = Node('simpleFaultGeometry')
    line = []
    for p in fault_trace.points:
        line.append(p.longitude)
        line.append(p.latitude)
    node.append(Node('gml:LineString', nodes=[Node('gml:posList', {}, line)]))
    node.append(Node('dip', {}, dip))
    node.append(Node('upperSeismoDepth', {}, upper_depth))
    node.append(Node('lowerSeismoDepth', {}, lower_depth))
    return node
Esempio n. 18
0
def combine_ss_models(filedict,
                      domains_shp,
                      lt,
                      outfile,
                      nrml_version='04',
                      weight=1.):  #, id_base = 'ASS'):
    """ Combine smoothed seismicity models based on tectonic region types
    :params filedict:
        dict of form filedict[trt] = filename specifying input file for that region
    :params domains_shp:
        shapefile defining tectonic domain regions
    :params lt:
        LogicTree object containing relevant values and weights for Mmax
    :params outfile:
        output nrml formatted file
    """
    print 'Getting tectonic region type from %s' % domains_shp
    driver = ogr.GetDriverByName("ESRI Shapefile")
    data_source = driver.Open(domains_shp, 0)
    dsf = data_source.GetLayer()
    trt_types = []
    for feature in dsf:
        trt_types.append(feature.GetField('TRT'))
    dsf = shapefile.Reader(domains_shp)
    dom_shapes = dsf.shapes()

    hypo_depth_dist_nc = PMF([(0.5, 10.0), (0.25, 5.0), (0.25, 15.0)])
    hypo_depth_dist_c = PMF([(0.5, 5.0), (0.25, 2.5), (0.25, 10.0)])
    hypo_depth_dist_ex = hypo_depth_dist_c
    hypo_depth_dict = {
        'Cratonic': hypo_depth_dist_c,
        'Non_cratonic': hypo_depth_dist_nc,
        'Extended': hypo_depth_dist_ex
    }
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])

    merged_pts = []

    # Get mmax values and weights
    mmaxs = {}
    mmaxs_w = {}
    for trt, filename in filedict.iteritems():
        if trt == 'Cratonic':
            mmax_values, mmax_weights = lt.get_weights('Mmax', 'Proterozoic')
        else:
            mmax_values, mmax_weights = lt.get_weights('Mmax', trt)
        mmax_values = [float(i) for i in mmax_values]
        mmax_weights = [float(i) for i in mmax_weights]
        print mmax_values
        print mmax_weights
        mmaxs[trt] = mmax_values
        mmaxs_w[trt] = mmax_weights

    pt_ids = []
    for trt, filename in filedict.iteritems():
        print trt
        print 'Parsing %s' % filename
        # Only keep points within domain
        pts = read_pt_source(filename)
        #        shapes = np.where(trt_types
        for zone_trt, dom_shape in zip(trt_types, dom_shapes):
            print zone_trt
            print dom_shape
            if zone_trt == trt:
                print 'TRT %s, procesing shape %s' % (zone_trt, dom_shape)
                dom_poly = Polygon(dom_shape.points)
                for pt in pts:
                    pt_loc = Point(pt.location.x, pt.location.y)
                    if pt_loc.within(dom_poly):
                        pt.tectonic_region_type = zone_trt
                        pt.nodal_plane_distribution = nodal_plane_dist
                        pt.hypocenter_distribution = hypo_depth_dict[zone_trt]
                        pt.rupture_aspect_ratio = 2
                        mfd = pt.mfd
                        new_mfd = gr2inc_mmax(mfd, mmaxs[trt], mmaxs_w[trt],
                                              weight)
                        pt.mfd = new_mfd
                        if pt.source_id in pt_ids:
                            print 'Point source %s already exists!' % pt.source_id
                            print 'Skipping this source for trt %s' % zone_trt
                        else:
                            merged_pts.append(pt)
                            pt_ids.append(pt.source_id)

    name = outfile.rstrip('.xml')
    if nrml_version == '04':
        nodes = list(map(obj_to_node, sorted(merged_pts)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
Esempio n. 19
0
def _pointsources2multipoints(srcs, i):
    # converts pointSources with the same hpdist, npdist and msr into a
    # single multiPointSource.
    allsources = []
    for (hd, npd, msr), sources in groupby(srcs, dists).items():
        if len(sources) == 1:  # there is a single source
            allsources.extend(sources)
            continue
        mfds = [src[3] for src in sources]
        points = []
        usd = []
        lsd = []
        rar = []
        for src in sources:
            pg = src.pointGeometry
            points.extend(~pg.Point.pos)
            usd.append(~pg.upperSeismoDepth)
            lsd.append(~pg.lowerSeismoDepth)
            rar.append(~src.ruptAspectRatio)
        geom = Node('multiPointGeometry')
        geom.append(Node('gml:posList', text=points))
        geom.append(Node('upperSeismoDepth', text=collapse(usd)))
        geom.append(Node('lowerSeismoDepth', text=collapse(lsd)))
        node = Node(
            'multiPointSource',
            dict(id='mps-%d' % i, name='multiPointSource-%d' % i),
            nodes=[geom])
        node.append(Node("magScaleRel", text=collapse(msr)))
        node.append(Node("ruptAspectRatio", text=collapse(rar)))
        node.append(mfds2multimfd(mfds))
        node.append(Node('nodalPlaneDist', nodes=[
            Node('nodalPlane', dict(probability=prob, rake=rake,
                                    strike=strike, dip=dip))
            for prob, rake, strike, dip in npd]))
        node.append(Node('hypoDepthDist', nodes=[
            Node('hypoDepth', dict(depth=depth, probability=prob))
            for prob, depth in hd]))
        allsources.append(node)
        i += 1
    return i, allsources
Esempio n. 20
0
def _pointsources2multipoints(srcs, i):
    # converts pointSources with the same hpdist, npdist and msr into a
    # single multiPointSource.
    allsources = []
    for (hd, npd, msr), sources in groupby(srcs, dists).items():
        if len(sources) == 1:  # there is a single source
            allsources.extend(sources)
            continue
        mfds = [src[3] for src in sources]
        points = []
        usd = []
        lsd = []
        rar = []
        for src in sources:
            pg = src.pointGeometry
            points.extend(~pg.Point.pos)
            usd.append(~pg.upperSeismoDepth)
            lsd.append(~pg.lowerSeismoDepth)
            rar.append(~src.ruptAspectRatio)
        geom = Node('multiPointGeometry')
        geom.append(Node('gml:posList', text=points))
        geom.append(Node('upperSeismoDepth', text=collapse(usd)))
        geom.append(Node('lowerSeismoDepth', text=collapse(lsd)))
        node = Node('multiPointSource',
                    dict(id='mps-%d' % i, name='multiPointSource-%d' % i),
                    nodes=[geom])
        node.append(Node("magScaleRel", text=collapse(msr)))
        node.append(Node("ruptAspectRatio", text=collapse(rar)))
        node.append(mfds2multimfd(mfds))
        node.append(
            Node('nodalPlaneDist',
                 nodes=[
                     Node(
                         'nodalPlane',
                         dict(probability=prob,
                              rake=rake,
                              strike=strike,
                              dip=dip)) for prob, rake, strike, dip in npd
                 ]))
        node.append(
            Node('hypoDepthDist',
                 nodes=[
                     Node('hypoDepth', dict(depth=depth, probability=prob))
                     for prob, depth in hd
                 ]))
        allsources.append(node)
        i += 1
    return i, allsources
Esempio n. 21
0
 def _read_csv(self):
     """
     :yields: asset nodes
     """
     expected_header = self._csv_header()
     for fname in self.datafiles:
         with open(fname, encoding='utf-8') as f:
             fields = next(csv.reader(f))
             header = set(fields)
             if len(header) < len(fields):
                 raise InvalidFile(
                     '%s: The header %s contains a duplicated field' %
                     (fname, header))
             elif expected_header - header - {'exposure', 'country'}:
                 raise InvalidFile(
                     'Unexpected header in %s\nExpected: %s\nGot: %s' %
                     (fname, sorted(expected_header), sorted(header)))
     occupancy_periods = self.occupancy_periods.split()
     for fname in self.datafiles:
         with open(fname, encoding='utf-8') as f:
             for i, dic in enumerate(csv.DictReader(f), 1):
                 asset = Node('asset', lineno=i)
                 with context(fname, asset):
                     asset['id'] = dic['id']
                     asset['number'] = valid.positivefloat(dic['number'])
                     asset['taxonomy'] = dic['taxonomy']
                     if 'area' in dic:  # optional attribute
                         asset['area'] = dic['area']
                     loc = Node('location',
                                dict(lon=valid.longitude(dic['lon']),
                                     lat=valid.latitude(dic['lat'])))
                     costs = Node('costs')
                     for cost in self.cost_types['name']:
                         a = dict(type=cost, value=dic[cost])
                         if 'retrofitted' in dic:
                             a['retrofitted'] = dic['retrofitted']
                         costs.append(Node('cost', a))
                     occupancies = Node('occupancies')
                     for period in occupancy_periods:
                         a = dict(occupants=float(dic[period]),
                                  period=period)
                         occupancies.append(Node('occupancy', a))
                     tags = Node('tags')
                     for tagname in self.tagcol.tagnames:
                         if tagname not in (
                                 'taxonomy', 'exposure', 'country'):
                             tags.attrib[tagname] = dic[tagname]
                     asset.nodes.extend([loc, costs, occupancies, tags])
                 yield asset
Esempio n. 22
0
def weighted_pt_source(pt_sources,
                       weights,
                       name,
                       filename=None,
                       nrml_version='04'):
    """Scales rates by weights for collapsing logic trees
    :param pt_sources:
        list of PointSource objects
    :param weights:
        dict contains weights for each tectonic
        region type, e.g. weights[trt] = 0.2
    :param filenam:
        path to output file, if provided will
        be written to nrml format as defined by
        nrml_version
    :param  nrml_version:
        version of nrml schema to use
    :returns weighted_pt_sources:
        list of PointSource objects with activity rates
        scaled by weights
    """
    weighted_point_sources = []
    for pt in pt_sources:
        new_pt = copy.deepcopy(
            pt)  # Copy sources to avoid messing with original data
        mfd_type = type(pt.mfd).__name__
        trt = pt.tectonic_region_type
        weight = weights[trt]
        if mfd_type == 'TruncatedGRMFD':
            b_val = pt.mfd.b_val
            # rescale a value in log sapce
            a_val = np.log10(np.power(10, pt.mfd.a_val) * weight)
            new_pt.mfd.modify_set_ab(a_val, b_val)
        elif mfd_type == 'EvenlyDiscretizedMFD':
            mag_bins, rates = zip(*pt.mfd.get_annual_occurrence_rates())
            mag_bins = np.array(mag_bins)
            rates = np.array(rates)
            new_rates = rates * weight
            new_pt.mfd.modify_set_mfd(new_pt.mfd.min_mag, new_pt.mfd.bin_width,
                                      list(new_rates))
        else:
            msg = 'Weighting method for mfd type %s not yet defined' % mfd_type
            raise (msg)
        weighted_point_sources.append(new_pt)
    # Now write out
    if filename is not None:
        source_model_file = filename
        print 'Writing to source model file %s' % source_model_file
        if nrml_version == '04':
            #            source_list = []
            #            for source in weighted_point_sources:
            #                source_list.append(source)
            nodes = list(map(obj_to_node, sorted(weighted_point_sources)))
            source_model = Node("sourceModel", {"name": name}, nodes=nodes)
            with open(source_model_file, 'wb') as f:
                nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
        elif nrml_version == '05':
            msg = 'Method not yet implemented for nrml version 0.5'
            raise (msg)


#            source_group_list = []
#            id = 0
#            for trt, sources in weighted_sources.iteritems():
#                source_group = SourceGroup(trt, sources = sources, id=id)
#                id +=1
#                source_group_list.append(source_group)
#            write_source_model(nrml_pt_file, source_group_list,
#                               name = name)
        else:
            print 'Warning: nrml version not specfied, xml not created'
    return weighted_point_sources
Esempio n. 23
0
def write_source_model(dest,
                       sources_or_groups,
                       name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        groups = sources_or_groups.src_groups
        attrs = dict(name=sources_or_groups.name,
                     investigation_time=sources_or_groups.investigation_time)
    elif isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
        attrs = dict(investigation_time=investigation_time)
    else:  # passed a list of sources
        srcs_by_trt = groupby(sources_or_groups,
                              operator.attrgetter('tectonic_region_type'))
        groups = [
            sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
            for trt in srcs_by_trt
        ]
        attrs = dict(investigation_time=investigation_time)
    if name or 'name' not in attrs:
        attrs['name'] = name or os.path.splitext(os.path.basename(dest))[0]
    if attrs['investigation_time'] is None:
        del attrs['investigation_time']
    nodes = list(map(obj_to_node, groups))
    ddict = extract_ddict(groups)
    if ddict:
        # remove duplicate content from nodes
        for grp_node in nodes:
            for src_node in grp_node:
                if src_node["id"] in ddict:
                    src_node.nodes = []
        # save HDF5 file
        dest5 = os.path.splitext(dest)[0] + '.hdf5'
        with hdf5.File(dest5, 'w') as h:
            for src_id, dic in ddict.items():
                for k, v in dic.items():
                    key = '%s/%s' % (src_id, k)
                    if isinstance(v, numpy.ndarray):
                        h.create_dataset(key,
                                         v.shape,
                                         v.dtype,
                                         compression='gzip',
                                         compression_opts=9)
                        h[key][:] = v
                    else:
                        h[key] = v

    source_model = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([source_model], f, '%s')
    if ddict:
        return [dest, dest5]
    else:
        return [dest]
Esempio n. 24
0
def build_source_model(csm):
    nodes = [obj_to_node(sg) for sg in csm.src_groups]
    return Node('compositeSourceModel', {}, nodes=nodes)
Esempio n. 25
0
    identifier = 'ASS' + str(j)
    name = 'Helmstetter' + str(j)
    point = Point(data[j,0],data[j,1],
                10)
    rate = data[j,2]
    # Convert rate to a value
    aval = np.log10(rate) + bvalue*config["mmin"]

    mfd = TruncatedGRMFD(min_mag, max_mag, 0.1, aval, bvalue)
    hypo_depth_dist = PMF([(0.5, 10.0),
                          (0.25, 5.0),
                          (0.25, 15.0)])
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])
    point_source = PointSource(identifier, name, 'Non_cratonic',
                               mfd, 2, msr,
                               2.0, tom, 0.1, 20.0, point,
                               nodal_plane_dist, hypo_depth_dist)
    source_list.append(point_source)

filename = "Australia_Adaptive_K%i_b%.3f_mmin%.1f.xml" % (smoother.config['k'], smoother.config['bvalue'], smoother.config['mmin'])
mod_name = "Australia_Adaptive_K%i_b%.3f" % (smoother.config['k'], smoother.config['bvalue'])   
nodes = list(map(obj_to_node, sorted(source_list)))
source_model = Node("sourceModel", {"name": name}, nodes=nodes)
with open(filename, 'wb') as f:
    nrml.write([source_model], f, '%s', xmlns = NAMESPACE)


Esempio n. 26
0
def build_site_model(site):
    return Node('site', get_site_attributes(site))
Esempio n. 27
0
def area2pt_source(area_source_file,
                   sources=None,
                   investigation_time=50,
                   rupture_mesh_spacing=10.,
                   width_of_mfd_bin=0.1,
                   area_source_discretisation=10.,
                   filename=None,
                   nrml_version='04',
                   name=None):
    """Calls OpenQuake parsers to read area source model
    from source_mode.xml type file, convert to point sources
    and write to a new nrml source model file.
    :params area_source_file:
        nrml format file of the area source
    :params discretisation:
        Grid size (km) for the area source discretisation, 
        which defines the distance between resulting point
        sources.
    """
    if sources is None:
        sources = nrml2sourcelist(
            area_source_file,
            investigation_time=investigation_time,
            rupture_mesh_spacing=rupture_mesh_spacing,
            width_of_mfd_bin=width_of_mfd_bin,
            area_source_discretisation=area_source_discretisation)
    if name is None:
        name = '%s_points' % filename
    new_pt_sources = {}
    for source in sources:
        pt_sources = area_to_point_sources(source)
        for pt in pt_sources:
            pt.source_id = pt.source_id.replace(':', '')
            pt.name = pt.name.replace(':', '_')
            try:
                new_pt_sources[pt.tectonic_region_type].append(pt)
            except KeyError:
                new_pt_sources[pt.tectonic_region_type] = [pt]
        # print [method for method in dir(pt) if callable(getattr(pt, method))]
        #  print [attribute for attribute in dir(pt)]
    nrml_pt_file = area_source_file[:-4] + '_pts.xml'
    source_group_list = []
    id = 0
    for trt, sources in new_pt_sources.iteritems():
        source_group = SourceGroup(trt, sources=sources, id=id)
        id += 1
        source_group_list.append(source_group)
    if filename is not None:
        if nrml_version == '04':
            source_list = []
            for trt, sources in new_pt_sources.iteritems():
                for source in sources:
                    source_list.append(source)
            nodes = list(map(obj_to_node, sorted(source_list)))
            source_model = Node("sourceModel", {"name": name}, nodes=nodes)
            with open(nrml_pt_file, 'wb') as f:
                nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
        # This will write version 0.5
        elif nrml_version == '05':
            write_source_model(nrml_pt_file, source_group_list, name=filename)
        else:
            print 'Warning: nrml version not specfied, xml not created'
    return source_group_list
Esempio n. 28
0
def _pointsources2multipoints(srcs, i):
    allsources = []
    for key, sources in groupby(srcs, get_key).items():
        if len(sources) == 1:  # there is a single source
            allsources.extend(sources)
            continue
        msr, rar, usd, lsd, hd, npd = key
        mfds = [src[3] for src in sources]
        points = []
        for src in sources:
            points.extend(~src.pointGeometry.Point.pos)
        geom = Node('multiPointGeometry')
        geom.append(Node('gml:posList', text=points))
        geom.append(Node('upperSeismoDepth', text=usd))
        geom.append(Node('lowerSeismoDepth', text=lsd))
        node = Node(
            'multiPointSource',
            dict(id='mps-%d' % i, name='multiPointSource-%d' % i),
            nodes=[geom])
        node.append(Node("magScaleRel", text=msr))
        node.append(Node("ruptAspectRatio", text=rar))
        node.append(mfds2multimfd(mfds))
        node.append(Node('nodalPlaneDist', nodes=[
            Node('nodalPlane', dict(probability=prob, rake=rake,
                                    strike=strike, dip=dip))
            for prob, rake, strike, dip in npd]))
        node.append(Node('hypoDepthDist', nodes=[
            Node('hypoDepth', dict(depth=depth, probability=prob))
            for prob, depth in hd]))
        allsources.append(node)
        i += 1
    return i, allsources
Esempio n. 29
0
def get_exposure(oqparam):
    """
    Read the full exposure in memory and build a list of
    :class:`openquake.risklib.riskmodels.Asset` instances.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :returns:
        an :class:`Exposure` instance
    """
    out_of_region = 0
    if oqparam.region_constraint:
        region = wkt.loads(oqparam.region_constraint)
    else:
        region = None
    all_cost_types = set(oqparam.all_cost_types)
    fname = oqparam.inputs['exposure']
    exposure, assets_node = _get_exposure(fname, all_cost_types)
    relevant_cost_types = all_cost_types - set(['occupants'])
    asset_refs = set()
    ignore_missing_costs = set(oqparam.ignore_missing_costs)

    for idx, asset in enumerate(assets_node):
        values = {}
        deductibles = {}
        insurance_limits = {}
        retrofitteds = {}
        with context(fname, asset):
            asset_id = asset['id'].encode('utf8')
            if asset_id in asset_refs:
                raise read_nrml.DuplicatedID(asset_id)
            asset_refs.add(asset_id)
            exposure.asset_refs.append(asset_id)
            taxonomy = asset['taxonomy']
            if 'damage' in oqparam.calculation_mode:
                # calculators of 'damage' kind require the 'number'
                # if it is missing a KeyError is raised
                number = asset.attrib['number']
            else:
                # some calculators ignore the 'number' attribute;
                # if it is missing it is considered 1, since we are going
                # to multiply by it
                try:
                    number = asset['number']
                except KeyError:
                    number = 1
                else:
                    if 'occupants' in all_cost_types:
                        values['occupants_None'] = number
            location = asset.location['lon'], asset.location['lat']
            if region and not geometry.Point(*location).within(region):
                out_of_region += 1
                continue
        try:
            costs = asset.costs
        except AttributeError:
            costs = Node('costs', [])
        try:
            occupancies = asset.occupancies
        except AttributeError:
            occupancies = Node('occupancies', [])
        for cost in costs:
            with context(fname, cost):
                cost_type = cost['type']
                if cost_type in relevant_cost_types:
                    values[cost_type] = cost['value']
                    retrovalue = cost.attrib.get('retrofitted')
                    if retrovalue is not None:
                        retrofitteds[cost_type] = retrovalue
                    if oqparam.insured_losses:
                        deductibles[cost_type] = cost['deductible']
                        insurance_limits[cost_type] = cost['insuranceLimit']

        # check we are not missing a cost type
        missing = relevant_cost_types - set(values)
        if missing and missing <= ignore_missing_costs:
            logging.warn('Ignoring asset %s, missing cost type(s): %s',
                         asset_id, ', '.join(missing))
            for cost_type in missing:
                values[cost_type] = None
        elif missing and 'damage' not in oqparam.calculation_mode:
            # missing the costs is okay for damage calculators
            with context(fname, asset):
                raise ValueError("Invalid Exposure. "
                                 "Missing cost %s for asset %s" %
                                 (missing, asset_id))
        tot_occupants = 0
        for occupancy in occupancies:
            with context(fname, occupancy):
                exposure.time_events.add(occupancy['period'])
                occupants = 'occupants_%s' % occupancy['period']
                values[occupants] = occupancy['occupants']
                tot_occupants += values[occupants]
        if occupancies:  # store average occupants
            values['occupants_None'] = tot_occupants / len(occupancies)
        area = float(asset.attrib.get('area', 1))
        ass = riskmodels.Asset(idx, taxonomy, number, location, values, area,
                               deductibles, insurance_limits, retrofitteds,
                               exposure.cost_calculator)
        exposure.assets.append(ass)
        exposure.taxonomies.add(taxonomy)
    if region:
        logging.info(
            'Read %d assets within the region_constraint '
            'and discarded %d assets outside the region', len(exposure.assets),
            out_of_region)
        if len(exposure.assets) == 0:
            raise RuntimeError('Could not find any asset within the region!')

    # sanity check
    values = any(len(ass.values) + ass.number for ass in exposure.assets)
    assert values, 'Could not find any value??'
    return exposure
Esempio n. 30
0
def combine_ss_models(filename_stem, domains_shp, params,lt, bval_key, output_dir='./',
                      nrml_version = '04', weight=1.):#, id_base = 'ASS'):
    """ Combine smoothed seismicity models based on tectonic region types
    :params filename_stem:
        String for the start of the xml filename for the source model,
        assuming generic components (non generic are inferred, 
        e.g. bvalue and completeness model)
    :params domains_shp:
        shapefile defining tectonic domain regions
    :params params:
        list of dicts containing parameters derivded from the shapefile
     :bval_key
         key for the dicts in params  as we are merging by 
         bvalues  (best, lower, upper)
    :params lt:
        LogicTree object containing relevant values and weights for Mmax
    :params outfile:
        output nrml formatted file
    """

    dsf = shapefile.Reader(domains_shp)
    dom_shapes = dsf.shapes()    
    # Get indicies of relevant fields
    for i, f in enumerate(dsf.fields):
        if f[0]=='CODE':
            code_index = i-1
        if f[0]=='TRT':
            trt_index = i-1

    hypo_depth_dist_nc = PMF([(0.5, 10.0),
                              (0.25, 5.0),
                              (0.25, 15.0)])
    hypo_depth_dist_c = PMF([(0.5, 5.0),
                             (0.25, 2.5),
                             (0.25, 10.0)])
    hypo_depth_dist_ex = hypo_depth_dist_c
    hypo_depth_dict = {'Cratonic': hypo_depth_dist_c,
                       'Non_cratonic': hypo_depth_dist_nc,
                       'Extended': hypo_depth_dist_ex}
    # FIXME! - Temporary solution until nodal plan logic tree
    # info can be read directly from shapefile attributes
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])

    merged_pts = []
    pt_ids = []

    # Get mmax values and weights
    mmaxs = {}
    mmaxs_w = {}
    for dom in params:
        print 'Processing source %s' % dom['CODE']
        print dom['TRT']
        if dom['TRT'] == 'NCratonic' or dom['TRT'] == 'Extended':
            dom['TRT'] = 'Non_cratonic'
        # For the moment, only consider regions within AUstralia
        if dom['TRT'] == 'Active' or dom['TRT'] == 'Interface' or \
                dom['TRT'] == 'Oceanic' or \
                dom['TRT'] == 'Intraslab' or dom['CODE'] == 'NECS' or \
                dom['CODE'] == 'NWO': 
            print 'Source %s not on continental Australia, skipping' % dom['CODE']
            continue
        elif dom['TRT'] == 'Cratonic':
            if dom['DOMAIN'] == 1:
                mmax_values, mmax_weights = lt.get_weights('Mmax', 'Archean')
            else:
                mmax_values, mmax_weights = lt.get_weights('Mmax', 'Proterozoic')
#        elif dom['TRT'] == 'Active':
#            print 'MMax logic tree not yet defined for active crust, using extended crust'
#            mmax_values, mmax_weights = lt.get_weights('Mmax', 'Extended')
        else:
            mmax_values, mmax_weights = lt.get_weights('Mmax', dom['TRT'])
        mmax_values = [float(i) for i in mmax_values]
        mmax_weights = [float(i) for i in mmax_weights]
        print mmax_values
        print mmax_weights
        mmaxs[dom['CODE']] = mmax_values
        mmaxs_w[dom['CODE']] = mmax_weights

        #pt_ids = []
    #for trt, filename in filedict.iteritems():
    #    print trt
        completeness_table = np.array([dom['COMPLETENESS'][0]])
        completeness_string = 'comp'
        for ym in completeness_table:
            completeness_string += '_%i_%.1f' % (ym[0], ym[1])
        mmin = dom['COMPLETENESS'][0][1]
        filename = "%s_b%.3f_mmin_%.1f_0.1%s.xml" % (
            filename_stem, dom[bval_key], mmin,
            completeness_string)
        print 'Parsing %s' % filename
        # Only keep points within domain
        pts = read_pt_source(filename)
        #shapes = np.where(trt_types
        for shape in dsf.shapeRecords():
            print shape.record[code_index]
            if shape.record[code_index] == dom['CODE']:
                # Check for undefined depths  (-999 values)
                if dom['DEP_BEST'] < 0:
                    print 'Setting best depth to 10 km'
                    dom['DEP_BEST']=10
                if dom['DEP_UPPER'] < 0:
                    print 'Setting upper depth to 5 km'
                    dom['DEP_UPPER']=5
                if dom['DEP_LOWER'] < 0:
                    print 'Setting lower depth to 15 km'
                    dom['DEP_LOWER']=15
                hypo_depth_dist = PMF([(0.5, dom['DEP_BEST']),
                             (0.25, dom['DEP_LOWER']),
                             (0.25, dom['DEP_UPPER'])])
                # Define nodal planes as thrusts except for special cases
                str1 = dom['SHMAX'] + 90.
                str2 = dom['SHMAX'] + 270.
                str3 = dom['SHMAX'] + dom['SHMAX_SIG'] + 90.
                str4 = dom['SHMAX']+ dom['SHMAX_SIG'] + 270.
                str5 = dom['SHMAX'] - dom['SHMAX_SIG'] + 90.
                str6 = dom['SHMAX'] - dom['SHMAX_SIG'] + 270.
                strikes = [str1,str2,str3,str4,str5,str6]
                for i,strike in enumerate(strikes):
                    if strike >=360:
                        strikes[i]=strike-360
                nodal_plane_dist = PMF([(0.34, NodalPlane(strikes[0], 30, 90)),
                                       (0.34, NodalPlane(strikes[1], 30, 90)),
                                       (0.08, NodalPlane(strikes[2], 30, 90)),
                                       (0.08, NodalPlane(strikes[3], 30, 90)),
                                       (0.08, NodalPlane(strikes[4], 30, 90)),
                                       (0.08, NodalPlane(strikes[5], 30, 90))])
                if dom['CODE'] == 'WARM' or dom['CODE'] == 'WAPM':
                    print 'Define special case for WARM'
                    nodal_plane_dist = PMF([(0.75, NodalPlane(45, 90, 0)),
                                           (0.125, NodalPlane(strikes[0], 30, 90)),
                                           (0.125, NodalPlane(strikes[1], 30, 90))])
                if dom['CODE'] == 'FMLR':
                    print 'Define special case for FMLR, 0.5 thrust, 0.5 SS'
                    nodal_plane_dist = PMF([(0.17, NodalPlane(strikes[0], 30, 90)),
                                           (0.17, NodalPlane(strikes[1], 30, 90)),
                                           (0.04, NodalPlane(strikes[2], 30, 90)),
                                           (0.04, NodalPlane(strikes[3], 30, 90)),
                                           (0.04, NodalPlane(strikes[4], 30, 90)),
                                           (0.04, NodalPlane(strikes[5], 30, 90)),
                                           (0.17, NodalPlane(strikes[0], 90, 0)),
                                           (0.17, NodalPlane(strikes[1], 90, 0)),
                                           (0.04, NodalPlane(strikes[2], 90, 0)),
                                           (0.04, NodalPlane(strikes[3], 90, 0)),
                                           (0.04, NodalPlane(strikes[4], 90, 0)),
                                           (0.04, NodalPlane(strikes[5], 90, 0))])
                dom_poly = Polygon(shape.shape.points)
                for pt in pts:
                    pt_loc = Point(pt.location.x, pt.location.y)
                    if pt_loc.within(dom_poly):
#                        pt.tectonic_region_type = dom['TRT']
                        pt.tectonic_region_type = dom['GMM_TRT']
                        pt.nodal_plane_distribution = nodal_plane_dist # FIXME! update based on data extracted from shapefile
                        pt.hypocenter_distribution = hypo_depth_dist
                        pt.rupture_aspect_ratio=2
                        mfd = pt.mfd
                        new_mfd = gr2inc_mmax(mfd, mmaxs[dom['CODE']], mmaxs_w[dom['CODE']], weight)
                        pt.mfd = new_mfd
                        if pt.source_id in pt_ids:
                            print 'Point source %s already exists!' % pt.source_id
                            print 'Skipping this source for trt %s' % dom['TRT']
                        else:
                            merged_pts.append(pt)
                            pt_ids.append(pt.source_id)
    outfile = "%s_%s.xml" % (
            filename_stem, bval_key)
    outfile = os.path.join(output_dir, outfile)
    name = outfile.rstrip('.xml')
    if nrml_version == '04':
        nodes = list(map(obj_to_node, sorted(merged_pts)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns = NAMESPACE)
    return outfile
Esempio n. 31
0
def _get_exposure(fname, ok_cost_types, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param ok_cost_types:
        a set of cost types (as strings)
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except AttributeError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except AttributeError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = ~exposure.occupancyPeriods or ''
    except AttributeError:
        occupancy_periods = 'day night transit'
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ~tagNames or []
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    for ct in conversions.costTypes:
        if not ok_cost_types or ct['name'] in ok_cost_types:
            with context(fname, ct):
                cost_types.append(
                    (ct['name'], valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in ok_cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    insurance_limit_is_absolute = inslimit.get('isAbsolute', True)
    deductible_is_absolute = deductible.get('isAbsolute', True)
    tagi = {name: i for i, name in enumerate(tagnames)}
    cc = asset.CostCalculator({}, {}, {}, deductible_is_absolute,
                              insurance_limit_is_absolute, tagi)
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    assets = []
    asset_refs = []
    exp = Exposure(exposure['id'],
                   exposure['category'], ~description, cost_types,
                   occupancy_periods.split(), insurance_limit_is_absolute,
                   deductible_is_absolute, area.attrib, assets, asset_refs, cc,
                   asset.TagCollection(tagnames))
    return exp, exposure.assets
Esempio n. 32
0
def write_combined_faults_points(point_sources, fault_sources,
                                 filename, name, area_sources = None,
                                 nrml_version='04'):
    """Write pts, area and fault sources to file
    :param point_sources:
       list without trt or dict with trt key of point sources
    """
    print 'Writing to source model file %s' % filename
    ps_id_index = 1
    fs_id_index = 1
    if nrml_version == '04':
        if type(point_sources) == dict:
            source_list = []
            for trt, sources in point_sources.iteritems():
                for source in sources:
                    source.source_id = 'PS_%i' % ps_id_index
                    source_list.append(source)
                    ps_id_index += 1
#                    id_index = max(id_index, source.source_id)
        elif type(point_sources) == list:
            source_list = copy.deepcopy(point_sources)
            for source in source_list:
                source.source_id = 'PS_%i' % ps_id_index
#                source_list.append(source)
                ps_id_index += 1
#                id_index = max(id_index, source.source_id)
        for fault_source in fault_sources:
#            id_index += 1
            fault_source.source_id = "FS_%i" % fs_id_index
            fs_id_index += 1
            source_list.append(fault_source)
        if area_sources is not None:
            for area_source in area_sources:
                source_list.append(area_source)
        nodes = list(map(obj_to_node, sorted(source_list)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(filename, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns = NAMESPACE)
    elif nrml_version == '05':
        if type(point_sources) == dict:
            source_group_list = []
            id = 0
            for trt, sources in point_sources.iteritems():
                for source in sources:
                    id_index = max(id_index, source.source_id)
            for trt, sources in point_sources.iteritems():
                for fault_source in fault_sources:
                    if fault_source.tectonic_region_type == trt:
                        id_index += 1
                        fault_source.source_id = "%i" % id_index
                        sources.append(fault_source)
                if area_sources is not None:
                    for area_source in area_sources:
                        if area_source.tectonic_region_type == trt:
                            sources.append(area_source)
                source_group = SourceGroup(trt, sources = sources, id=id)
                id +=1
                source_group_list.append(source_group)
            write_source_model(filename, source_group_list,
                               name = name)
        elif type(point_sources) == list:
            msg = 'Method not yet implemented for nrml version 0.5'
            raise(msg)
    else:
        print 'Warning: nrml version not specfied, xml not created'
Esempio n. 33
0
def convert_fragility_model_04(node, fname, fmcounter=itertools.count(1)):
    """
    :param node:
        an :class:`openquake.commonib.node.Node` in NRML 0.4
    :param fname:
        path of the fragility file
    :returns:
        an :class:`openquake.commonib.node.Node` in NRML 0.5
    """
    convert_type = {"lognormal": "logncdf"}
    new = Node('fragilityModel',
               dict(assetCategory='building',
                    lossCategory='structural',
                    id='fm_%d_converted_from_NRML_04' %
                    next(fmcounter)))
    with context(fname, node):
        fmt = node['format']
        descr = ~node.description
        limit_states = ~node.limitStates
    new.append(Node('description', {}, descr))
    new.append((Node('limitStates', {}, ' '.join(limit_states))))
    for ffs in node[2:]:
        IML = ffs.IML
        # NB: noDamageLimit = None is different than zero
        nodamage = ffs.attrib.get('noDamageLimit')
        ff = Node('fragilityFunction', {'format': fmt})
        ff['id'] = ~ffs.taxonomy
        ff['shape'] = convert_type[ffs.attrib.get('type', 'lognormal')]
        if fmt == 'continuous':
            with context(fname, IML):
                attr = dict(imt=IML['IMT'],
                            minIML=IML['minIML'],
                            maxIML=IML['maxIML'])
                if nodamage is not None:
                    attr['noDamageLimit'] = nodamage
                ff.append(Node('imls', attr))
            for ffc in ffs[2:]:
                with context(fname, ffc):
                    ls = ffc['ls']
                    param = ffc.params
                with context(fname, param):
                    m, s = param['mean'], param['stddev']
                ff.append(Node('params', dict(ls=ls, mean=m, stddev=s)))
        else:  # discrete
            with context(fname, IML):
                imls = ' '.join(map(str, (~IML)[1]))
                attr = dict(imt=IML['IMT'])
            if nodamage is not None:
                attr['noDamageLimit'] = nodamage
            ff.append(Node('imls', attr, imls))
            for ffd in ffs[2:]:
                ls = ffd['ls']
                with context(fname, ffd):
                    poes = ' '.join(map(str, ~ffd.poEs))
                ff.append(Node('poes', dict(ls=ls), poes))
        new.append(ff)
    return new
Esempio n. 34
0
def _get_exposure(fname, stop=None):
    """
    :param fname:
        path of the XML file containing the exposure
    :param stop:
        node at which to stop parsing (or None)
    :returns:
        a pair (Exposure instance, list of asset nodes)
    """
    [exposure] = nrml.read(fname, stop=stop)
    if not exposure.tag.endswith('exposureModel'):
        raise InvalidFile('%s: expected exposureModel, got %s' %
                          (fname, exposure.tag))
    description = exposure.description
    try:
        conversions = exposure.conversions
    except AttributeError:
        conversions = Node('conversions', nodes=[Node('costTypes', [])])
    try:
        inslimit = conversions.insuranceLimit
    except AttributeError:
        inslimit = Node('insuranceLimit', text=True)
    try:
        deductible = conversions.deductible
    except AttributeError:
        deductible = Node('deductible', text=True)
    try:
        area = conversions.area
    except AttributeError:
        # NB: the area type cannot be an empty string because when sending
        # around the CostCalculator object we would run into this numpy bug
        # about pickling dictionaries with empty strings:
        # https://github.com/numpy/numpy/pull/5475
        area = Node('area', dict(type='?'))
    try:
        occupancy_periods = exposure.occupancyPeriods.text or ''
    except AttributeError:
        occupancy_periods = ''
    try:
        tagNames = exposure.tagNames
    except AttributeError:
        tagNames = Node('tagNames', text='')
    tagnames = ~tagNames or []
    if set(tagnames) & {'taxonomy', 'exposure', 'country'}:
        raise InvalidFile('taxonomy, exposure and country are reserved names '
                          'you cannot use it in <tagNames>: %s' % fname)
    tagnames.insert(0, 'taxonomy')

    # read the cost types and make some check
    cost_types = []
    retrofitted = False
    for ct in conversions.costTypes:
        with context(fname, ct):
            ctname = ct['name']
            if ctname == 'structural' and 'retrofittedType' in ct.attrib:
                if ct['retrofittedType'] != ct['type']:
                    raise ValueError(
                        'The retrofittedType %s is different from the type'
                        '%s' % (ct['retrofittedType'], ct['type']))
                if ct['retrofittedUnit'] != ct['unit']:
                    raise ValueError(
                        'The retrofittedUnit %s is different from the unit'
                        '%s' % (ct['retrofittedUnit'], ct['unit']))
                retrofitted = True
            cost_types.append(
                (ctname, valid.cost_type_type(ct['type']), ct['unit']))
    if 'occupants' in cost_types:
        cost_types.append(('occupants', 'per_area', 'people'))
    cost_types.sort(key=operator.itemgetter(0))
    cost_types = numpy.array(cost_types, cost_type_dt)
    insurance_limit_is_absolute = il = inslimit.get('isAbsolute')
    deductible_is_absolute = de = deductible.get('isAbsolute')
    cc = CostCalculator(
        {}, {}, {},
        True if de is None else de,
        True if il is None else il,
        {name: i for i, name in enumerate(tagnames)},
    )
    for ct in cost_types:
        name = ct['name']  # structural, nonstructural, ...
        cc.cost_types[name] = ct['type']  # aggregated, per_asset, per_area
        cc.area_types[name] = area['type']
        cc.units[name] = ct['unit']
    assets = []
    asset_refs = []
    exp = Exposure(
        exposure['id'], exposure['category'],
        description.text, cost_types, occupancy_periods,
        insurance_limit_is_absolute, deductible_is_absolute, retrofitted,
        area.attrib, assets, asset_refs, cc, TagCollection(tagnames))
    assets_text = exposure.assets.text.strip()
    if assets_text:
        # the <assets> tag contains a list of file names
        dirname = os.path.dirname(fname)
        exp.datafiles = [os.path.join(dirname, f) for f in assets_text.split()]
    else:
        exp.datafiles = []
    return exp, exposure.assets
Esempio n. 35
0
def pt2fault_distance(pt_sources, fault_sources, min_distance = 5.0,
                      filename = 'source_model.xml',
                      buffer_distance = 100., nrml_version = '04',
                      name=None):

    """Calculate distances from a pt source rupture plane
    to the fault sources to then reduce Mmax on events that are 
    within a certain distance
    :param pt_sources:
        list of PointSource objects
    :param fault_sources:
        List of FaultSource objects
    :param min_distance:
        Minimum distance (km) within which we want a point source 
        rupture to be from a fault.
    :param filename:
        Name of output nrml file for revised pt source model
    :param buffer_distance:
        Km, initial filter to only process pts within this
        distance from the fault
    """

    if name is None:
        name = filename[:-4] + '_geom_filtered'
    id_index = 0 # We need to re-number all sources to avoid duplicate ids
    # Extract the points of the fault source mesh
    fault_lons = []
    fault_lats = []
    fault_depths = []
    for fault in fault_sources:
        whole_fault_surface = SimpleFaultSurface.from_fault_data(
            fault.fault_trace, fault.upper_seismogenic_depth,
            fault.lower_seismogenic_depth, fault.dip, 
            fault.rupture_mesh_spacing)
        fault_lons.append(whole_fault_surface.mesh.lons.flatten())
        fault_lats.append(whole_fault_surface.mesh.lats.flatten())
        fault_depths.append(whole_fault_surface.mesh.depths.flatten())
    fault_lons = np.concatenate(fault_lons)
    fault_lats = np.concatenate(fault_lats)
    fault_depths = np.concatenate(fault_depths)
    min_fault_lon = np.min(fault_lons)
    max_fault_lon = np.max(fault_lons)
    min_fault_lat = np.min(fault_lats)
    max_fault_lat = np.max(fault_lats)

    # Generate ruptures for point sources
    minimum_distance_list = []
    revised_point_sources = {'Cratonic': [], 'Non_cratonic': [], 
                             'Extended': [], 'Subduction': []}
    for pt in pt_sources:
        print 'Looping over point sources'
        # For speeding things up filter based on initial distances
        # to find points very far from or very close to a fault
        mfd_type = type(pt.mfd).__name__
        pt_depths = []
        for probs, depths in pt.hypocenter_distribution.data:
            pt_depths.append(depths)
        np_probs = []
        np_list = []
        for prob, nodal_plane in pt.nodal_plane_distribution.data:
            np_probs.append(prob)
            np_list.append(nodal_plane)
        centroid_distances = []
        for pt_depth in pt_depths:
            centroid_distances.append(distance(pt.location.longitude, pt.location.latitude,
                                      pt_depth, fault_lons, fault_lats, fault_depths))
        centroid_distances = np.array(centroid_distances).flatten()
  #      print 'Minimum distance', min(centroid_distances)
  #      print 'Maximum distance', max(centroid_distances)
        if (min(centroid_distances)) > buffer_distance:
            # Keep point as it, not within buffer distance of any faults
            revised_point_sources[pt.tectonic_region_type].append(pt)
            continue
        if (min(centroid_distances)) < min_distance:
            # Discard point sources as too close to a fault
            print 'Discarding point source, too close to a fault'
            continue
        rupture_mags = []
        rupture_lons = []
        rupture_lats = []
        rupture_depths = []
        rupture_strikes = []
        rupture_dips = []
        ruptures = pt.iter_ruptures()
        for rupture in ruptures:
            rupture_mags.append(rupture.mag)
            rupture_lons.append(rupture.surface.corner_lons)
            rupture_lats.append(rupture.surface.corner_lats)
            rupture_depths.append(rupture.surface.corner_depths)
            rupture_strikes.append(rupture.surface.strike)
            rupture_dips.append(rupture.surface.dip)
        rupture_mags = np.array(rupture_mags).flatten()
        # make the same length as the corners
        rupture_mags = np.repeat(rupture_mags, 4)
        rupture_strikes = np.repeat(rupture_strikes, 4)
        rupture_dips = np.repeat(rupture_dips, 4)
        rupture_lons = np.array(rupture_lons).flatten()
        rupture_lats = np.array(rupture_lats).flatten()
        rupture_depths = np.array(rupture_depths).flatten()
        print 'Doing meshgrid'
        lons1,lons2 = np.meshgrid(fault_lons, rupture_lons)
        lats1,lats2 = np.meshgrid(fault_lats, rupture_lats)
        depths1, depths2 = np.meshgrid(fault_depths, rupture_depths)

        # Calculate distance from pt to all fault
        print 'Distance calculations'
        distances = distance(lons1, lats1, depths1, lons2, lats2, depths2)
        closest_distance_to_faults = np.min(distances)
        print 'Shortest pt to fault distance is', closest_distance_to_faults
        minimum_distance_list.append(closest_distance_to_faults)

        # Find where the distance is less than the threshold min_distance
        too_close_lons = lons2[np.where(distances < min_distance)]
        too_close_lats = lats2[np.where(distances < min_distance)]
        if too_close_lons.size > 0:
            lon_indices = np.where(np.in1d(rupture_lons, too_close_lons))[0]
            lat_indices = np.where(np.in1d(rupture_lats, too_close_lats))[0]
            too_close_mags = rupture_mags[np.intersect1d(
                    lon_indices, lat_indices)]
            too_close_strikes = rupture_strikes[np.intersect1d(
                    lon_indices, lat_indices)]
            too_close_dips = rupture_dips[np.intersect1d(
                    lon_indices, lat_indices)]
        #    print 'Magnitudes of rupture close to fault', too_close_mags
        #    print 'Strikes of rupture close to fault', too_close_strikes
        #    print 'Dips of rupture close to fault', too_close_dips
            unique_strikes = np.unique(rupture_strikes)
            unique_dips = np.unique(rupture_dips)
            src_name_index = 0
            for prob, nodal_plane in pt.nodal_plane_distribution.data:
                id_index += 1
                src_name_index += 1
                # We are now splitting the source into many with different 
                # combinations of Mmaxs and nodal planes
                new_pt = copy.deepcopy(pt)
                new_pt.source_id ="%i" % id_index
                new_pt.name = new_pt.name + ("_%i" % src_name_index)
                new_np = NodalPlane(nodal_plane.strike, nodal_plane.dip, nodal_plane.rake)
                new_np_distribution = PMF([(1.0, new_np)]) # weight of nodal plane is 1 as making 
                # a separate source
                # Calculate new rates based on probability of original nodal plane
                new_pt.nodal_plane_distribution = new_np_distribution
                if mfd_type == 'TruncatedGRMFD':
                    b_val = pt.mfd.b_val
                    # rescale a value in log sapce
                    a_val = np.log10(np.power(10, pt.mfd.a_val)*prob)#*area_src_weight))
                    new_pt.mfd.modify_set_ab(a_val, b_val)
                elif mfd_type == 'EvenlyDiscretizedMFD':
                    mag_bins, rates = zip(*pt.mfd.get_annual_occurrence_rates())
                    mag_bins = np.array(mag_bins)
                    rates = np.array(rates)
                    new_rates = rates*prob#*area_src_weight)
                    new_pt.mfd.modify_set_mfd(new_pt.mfd.min_mag, new_pt.mfd.bin_width,
                                              list(new_rates))
                else:
                    msg = 'Weighting method for mfd type %s not yet defined' % mfd_type
                    raise(msg)
                pair_index = np.where(np.logical_and(too_close_strikes == nodal_plane.strike,
                                                         too_close_dips == nodal_plane.dip))
                 # Deal with intersecting cases
                if len(pair_index[0]) > 0:
                    intersecting_magnitudes = too_close_mags[pair_index]
                    minimum_magnitude_intersecting_fault = min(intersecting_magnitudes)
                    if minimum_magnitude_intersecting_fault >= \
                            (pt.mfd.min_mag + pt.mfd.bin_width):
                        new_mmax = minimum_magnitude_intersecting_fault - \
                                pt.mfd.bin_width
                        if mfd_type == 'TruncatedGRMFD':
                            new_pt.mfd.max_mag = new_mmax
                        if mfd_type == 'EvenlyDiscretizedMFD':
                            trimmed_rates = new_rates[np.where(mag_bins <= new_mmax)]
                    else:
                        print 'Minimum magnitude intersects fault, discarding source'
                        continue
                            
                else:
                    pass
                # Append revised source for given nodal plane distribution to 
                # list of revised sources
                print 'Appending revised source'
                revised_point_sources[pt.tectonic_region_type].append(new_pt)
        else:
            id_index += 1
            pt.source_id = "%i" % id_index
            'Appending original source'
            revised_point_sources[pt.tectonic_region_type].append(pt)
    if len(minimum_distance_list) > 0:
        print 'Overall minimum distance (km):', min(minimum_distance_list)

    # Write pts to source model on their own
    source_model_file = filename 
    print 'Writing to source model file %s' % source_model_file 
    if nrml_version == '04':
        source_list = []
        for trt, sources in revised_point_sources.iteritems():
            for source in sources:
                source_list.append(source)
        nodes = list(map(obj_to_node, sorted(source_list)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(source_model_file, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns = NAMESPACE)
    elif nrml_version == '05':
        source_group_list = []
        id = 0
        for trt, sources in revised_point_sources.iteritems():
            source_group = SourceGroup(trt, sources = sources, id=id)
            id +=1
            source_group_list.append(source_group)
        write_source_model(source_model_file, source_group_list,
                           name = name)
    else:
        print 'Warning: nrml version not specfied, xml not created'

    # Write pts to source model with faults
    source_model_file = filename[:-4] +'_inc_faults.xml'
    name = name +'_inc_faults'
    write_combined_faults_points(revised_point_sources, fault_sources,
                                source_model_file, name, nrml_version='04')
Esempio n. 36
0
def build_trunc_gr_from_shp(record):
    attribs = {"aValue": float(record["a_val"]),
               "bValue": float(record["b_val"]),
               "minMag": float(record["min_mag"]),
               "maxMag": float(record["max_mag"])}
    return Node("truncGutenbergRichterMFD", attribs)
Esempio n. 37
0
def write_source_model(dest, sources_or_groups, name=None,
                       investigation_time=None):
    """
    Writes a source model to XML.

    :param dest:
        Destination path
    :param sources_or_groups:
        Source model in different formats
    :param name:
        Name of the source model (if missing, extracted from the filename)
    :returns:
        the list of generated filenames
    """
    if isinstance(sources_or_groups, nrml.SourceModel):
        groups = sources_or_groups.src_groups
        attrs = dict(name=sources_or_groups.name,
                     investigation_time=sources_or_groups.investigation_time)
    elif isinstance(sources_or_groups[0], sourceconverter.SourceGroup):
        groups = sources_or_groups
        attrs = dict(investigation_time=investigation_time)
    else:  # passed a list of sources
        srcs_by_trt = groupby(
            sources_or_groups, operator.attrgetter('tectonic_region_type'))
        groups = [sourceconverter.SourceGroup(trt, srcs_by_trt[trt])
                  for trt in srcs_by_trt]
        attrs = dict(investigation_time=investigation_time)
    if name or 'name' not in attrs:
        attrs['name'] = name or os.path.splitext(os.path.basename(dest))[0]
    if attrs['investigation_time'] is None:
        del attrs['investigation_time']
    nodes = list(map(obj_to_node, groups))
    ddict = extract_ddict(groups)
    out = [dest]
    if ddict:
        # remove duplicate content from nodes
        for grp_node in nodes:
            for src_node in grp_node:
                if src_node["id"] in ddict:
                    src_node.nodes = []
        # save HDF5 file
        dest5 = os.path.splitext(dest)[0] + '.hdf5'
        with hdf5.File(dest5, 'w') as h:
            for src_id, dic in ddict.items():
                for k, v in dic.items():
                    key = '%s/%s' % (src_id, k)
                    if isinstance(v, numpy.ndarray):
                        h.create_dataset(key, v.shape, v.dtype,
                                         compression='gzip',
                                         compression_opts=9)
                        h[key][:] = v
                    else:
                        h[key] = v
        out.append(dest5)

    # produce a geometryModel if there are MultiFaultSources
    sections = {}
    for group in groups:
        for src in group:
            if hasattr(src, 'sections'):
                sections.update(src.sections)
    sections = {sid: sections[sid] for sid in sections}
    smodel = Node("sourceModel", attrs, nodes=nodes)
    with open(dest, 'wb') as f:
        nrml.write([smodel], f, '%s')
    if sections:
        secnodes = [obj_to_node(sec) for sec in sections.values()]
        gmodel = Node("geometryModel", attrs, nodes=secnodes)
        with open(dest[:-4] + '_sections.xml', 'wb') as f:
            nrml.write([gmodel], f, '%s')
            out.append(f.name)
    return out