def get_vulnerability_functions(fname): """ :param fname: path of the vulnerability filter :returns: a dictionary imt, taxonomy -> vulnerability function """ # NB: the vulnerabilitySetID is not an unique ID! # it is right to have several vulnerability sets with the same ID # the IMTs can also be duplicated and with different levels, each # vulnerability function in a set will get its own levels imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function node = nrml.read(fname) if node['xmlns'] == 'http://openquake.org/xmlns/nrml/0.5': vmodel = node[0] for vfun in vmodel[1:]: # the first node is the description imt = vfun.imls['imt'] imls = numpy.array(~vfun.imls) taxonomy = vfun['id'] loss_ratios, probs = [], [] for probabilities in vfun[1:]: loss_ratios.append(probabilities['lr']) probs.append(valid.probabilities(~probabilities)) probs = numpy.array(probs) assert probs.shape == (len(loss_ratios), len(imls)) vf_dict[imt, taxonomy] = scientific.VulnerabilityFunctionWithPMF( taxonomy, imt, imls, numpy.array(loss_ratios), probs) return vf_dict # otherwise, read the old format (NRML 0.4) for vset in read_nodes(fname, filter_vset, nodefactory['vulnerabilityModel']): imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) return vf_dict
def get_consequence_model(node, fname): with context(fname, node): description = ~node.description # make sure it is there limitStates = ~node.limitStates # make sure it is there # ASK: is the 'id' mandatory? node['assetCategory'] # make sure it is there node['lossCategory'] # make sure it is there cfs = node[2:] functions = {} for cf in cfs: with context(fname, cf): params = [] if len(limitStates) != len(cf): raise ValueError( 'Expected %d limit states, got %d' % (len(limitStates), len(cf))) for ls, param in zip(limitStates, cf): with context(fname, param): if param['ls'] != ls: raise ValueError('Expected %r, got %r' % (ls, param['ls'])) params.append((param['mean'], param['stddev'])) functions[cf['id']] = scientific.ConsequenceFunction( cf['id'], cf['dist'], params) attrs = node.attrib.copy() attrs.update(description=description, limitStates=limitStates) cmodel = scientific.ConsequenceModel(**attrs) cmodel.update(functions) return cmodel
def get_vulnerability_functions_05(node, fname): """ :param node: a vulnerabilityModel node :param fname: path of the vulnerability filter :returns: a dictionary imt, taxonomy -> vulnerability function """ # NB: the IMTs can be duplicated and with different levels, each # vulnerability function in a set will get its own levels taxonomies = set() vmodel = scientific.VulnerabilityModel(**node.attrib) # imt, taxonomy -> vulnerability function for vfun in node.getnodes('vulnerabilityFunction'): with context(fname, vfun): imt = vfun.imls['imt'] imls = numpy.array(~vfun.imls) taxonomy = vfun['id'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) if vfun['dist'] == 'PM': loss_ratios, probs = [], [] for probabilities in vfun[1:]: loss_ratios.append(probabilities['lr']) probs.append(valid.probabilities(~probabilities)) probs = numpy.array(probs) assert probs.shape == (len(loss_ratios), len(imls)) vmodel[imt, taxonomy] = ( scientific.VulnerabilityFunctionWithPMF( taxonomy, imt, imls, numpy.array(loss_ratios), probs)) # the seed will be set by readinput.get_risk_model else: with context(fname, vfun): loss_ratios = ~vfun.meanLRs coefficients = ~vfun.covLRs if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.meanLRs.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, ' 'line %d' % (len(coefficients), len(imls), fname, vfun.covLRs.lineno)) with context(fname, vfun): vmodel[imt, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt, imls, loss_ratios, coefficients, vfun['dist']) return vmodel
def ffconvert(fname, limit_states, ff): """ Convert a fragility function into a numpy array plus a bunch of attributes. :param fname: path to the fragility model file :param limit_states: expected limit states :param ff: fragility function node :returns: a pair (array, dictionary) """ with context(fname, ff): ffs = ff[1:] imls = ff.imls with context(fname, imls): attrs = dict(format=ff['format'], imt=imls['imt'], nodamage=imls.attrib.get('noDamageLimit')) LS = len(limit_states) if LS != len(ffs): with context(fname, ff): raise InvalidFile('expected %d limit states, found %d' % (LS, len(ffs))) if ff['format'] == 'continuous': attrs['minIML'] = float(imls['minIML']) attrs['maxIML'] = float(imls['maxIML']) array = numpy.zeros(LS, [('mean', F64), ('stddev', F64)]) for i, ls, node in zip(range(LS), limit_states, ff[1:]): if ls != node['ls']: with context(fname, node): raise InvalidFile('expected %s, found' % (ls, node['ls'])) array['mean'][i] = node['mean'] array['stddev'][i] = node['stddev'] elif ff['format'] == 'discrete': attrs['imls'] = valid.positivefloats(~imls) valid.check_levels(attrs['imls'], attrs['imt']) num_poes = len(attrs['imls']) array = numpy.zeros((LS, num_poes)) for i, ls, node in zip(range(LS), limit_states, ff[1:]): with context(fname, node): if ls != node['ls']: raise InvalidFile('expected %s, found' % (ls, node['ls'])) poes = (~node if isinstance(~node, list) else valid.probabilities(~node)) if len(poes) != num_poes: raise InvalidFile('expected %s, found' % (num_poes, len(poes))) array[i, :] = poes # NB: the format is constrained in nrml.FragilityNode to be either # discrete or continuous, there is no third option return array, attrs
def get_vulnerability_functions_04(fname): """ Parse the vulnerability model in NRML 0.4 format. :param fname: path of the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function + vset """ categories = dict(assetCategory=set(), lossCategory=set(), vulnerabilitySetID=set()) imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function for vset in read_nodes(fname, filter_vset, nodefactory['vulnerabilityModel']): categories['assetCategory'].add(vset['assetCategory']) categories['lossCategory'].add(vset['lossCategory']) categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID']) imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID'])) del categories['vulnerabilitySetID'] return vf_dict, categories
def get_fragility_functions(fname, continuous_fragility_discretization): """ :param fname: path of the fragility file :returns: damage_states list and dictionary taxonomy -> functions """ [fmodel] = read_nodes( fname, lambda el: el.tag.endswith('fragilityModel'), nodefactory['fragilityModel']) # ~fmodel.description is ignored limit_states = ~fmodel.limitStates tag = 'ffc' if fmodel['format'] == 'continuous' else 'ffd' fragility_functions = AccumDict() # taxonomy -> functions for ffs in fmodel.getnodes('ffs'): nodamage = ffs.attrib.get('noDamageLimit') taxonomy = ~ffs.taxonomy imt_str, imls, min_iml, max_iml, imlUnit = ~ffs.IML if continuous_fragility_discretization and not imls: imls = numpy.linspace(min_iml, max_iml, continuous_fragility_discretization + 1) fragility_functions[taxonomy] = FragilityFunctionList( [], imt=imt_str, imls=imls) lstates = [] for ff in ffs.getnodes(tag): ls = ff['ls'] # limit state lstates.append(ls) if tag == 'ffc': with context(fname, ff): mean_stddev = ~ff.params fragility_functions[taxonomy].append( scientific.FragilityFunctionContinuous(ls, *mean_stddev)) else: # discrete with context(fname, ff): poes = ~ff.poEs if nodamage is None: fragility_functions[taxonomy].append( scientific.FragilityFunctionDiscrete( ls, imls, poes, imls[0])) else: fragility_functions[taxonomy].append( scientific.FragilityFunctionDiscrete( ls, [nodamage] + imls, [0.0] + poes, nodamage)) if lstates != limit_states: raise InvalidFile("Expected limit states %s, got %s in %s" % (limit_states, lstates, fname)) fragility_functions.damage_states = ['no_damage'] + limit_states return fragility_functions
def get_vulnerability_functions_04(fname): """ Parse the vulnerability model in NRML 0.4 format. :param fname: path of the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function + vset """ categories = dict(assetCategory=set(), lossCategory=set(), vulnerabilitySetID=set()) imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function for vset in nrml.read(fname).vulnerabilityModel: categories['assetCategory'].add(vset['assetCategory']) categories['lossCategory'].add(vset['lossCategory']) categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID']) IML = vset.IML imt_str = IML['IMT'] imls = ~IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID'])) del categories['vulnerabilitySetID'] return vf_dict, categories
def convert_mfdist(self, node): """ Convert the given node into a Magnitude-Frequency Distribution object. :param node: a node of kind incrementalMFD or truncGutenbergRichterMFD :returns: a :class:`openquake.hazardlib.mdf.EvenlyDiscretizedMFD.` or :class:`openquake.hazardlib.mdf.TruncatedGRMFD` instance """ with context(self.fname, node): [mfd_node] = [ subnode for subnode in node if subnode.tag.endswith(("incrementalMFD", "truncGutenbergRichterMFD")) ] if mfd_node.tag.endswith("incrementalMFD"): return mfd.EvenlyDiscretizedMFD( min_mag=mfd_node["minMag"], bin_width=mfd_node["binWidth"], occurrence_rates=~mfd_node.occurRates ) elif mfd_node.tag.endswith("truncGutenbergRichterMFD"): return mfd.TruncatedGRMFD( a_val=mfd_node["aValue"], b_val=mfd_node["bValue"], min_mag=mfd_node["minMag"], max_mag=mfd_node["maxMag"], bin_width=self.width_of_mfd_bin, )
def _get_exposure(fname, ok_cost_types, stop=None): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :param stop: node at which to stop parsing (or None) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read(fname, stop=stop) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = Node('conversions', nodes=[Node('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = Node('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = Node('deductible', text=True) try: area = conversions.area except NameError: # NB: the area type cannot be an empty string because when sending # around the CostCalculator object we would run into this numpy bug # about pickling dictionaries with empty strings: # https://github.com/numpy/numpy/pull/5475 area = Node('area', dict(type='?')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid.cost_type_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() exp = Exposure( exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, inslimit.attrib.get('isAbsolute', True), deductible.attrib.get('isAbsolute', True), area.attrib, [], set(), []) cc = riskmodels.CostCalculator( {}, {}, {}, exp.deductible_is_absolute, exp.insurance_limit_is_absolute) for ct in exp.cost_types: name = ct['name'] # structural, nonstructural, ... cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area cc.area_types[name] = exp.area['type'] cc.units[name] = ct['unit'] return exp, exposure.assets, cc
def convert_complexFaultSource(self, node): """ Convert the given node into a complex fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.ComplexFaultSource` instance """ geom = node.complexFaultGeometry edges = self.geo_lines(geom) mfd = self.convert_mfdist(node) msr = valid.SCALEREL[~node.magScaleRel]() with context(self.fname, node): cmplx = source.ComplexFaultSource( source_id=node['id'], name=node['name'], tectonic_region_type=node['tectonicRegion'], mfd=mfd, rupture_mesh_spacing=self.complex_fault_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, edges=edges, rake=~node.rake, temporal_occurrence_model=self.tom) return cmplx
def convert_mfdist(self, node): """ Convert the given node into a Magnitude-Frequency Distribution object. :param node: a node of kind incrementalMFD or truncGutenbergRichterMFD :returns: a :class:`openquake.hazardlib.mdf.EvenlyDiscretizedMFD.` or :class:`openquake.hazardlib.mdf.TruncatedGRMFD` instance """ with context(self.fname, node): [mfd_node] = [ subnode for subnode in node if subnode.tag.endswith(('incrementalMFD', 'truncGutenbergRichterMFD')) ] if mfd_node.tag.endswith('incrementalMFD'): return mfd.EvenlyDiscretizedMFD( min_mag=mfd_node['minMag'], bin_width=mfd_node['binWidth'], occurrence_rates=~mfd_node.occurRates) elif mfd_node.tag.endswith('truncGutenbergRichterMFD'): return mfd.TruncatedGRMFD(a_val=mfd_node['aValue'], b_val=mfd_node['bValue'], min_mag=mfd_node['minMag'], max_mag=mfd_node['maxMag'], bin_width=self.width_of_mfd_bin)
def get_fragility_model(node, fname): """ :param node: a vulnerabilityModel node :param fname: path to the vulnerability file :returns: a dictionary imt, taxonomy -> fragility function list """ with context(fname, node): fid = node['id'] asset_category = node['assetCategory'] loss_type = node['lossCategory'] description = ~node.description limit_states = ~node.limitStates ffs = node[2:] fmodel = scientific.FragilityModel( fid, asset_category, loss_type, description, limit_states) for ff in ffs: imt_taxo = ff.imls['imt'], ff['id'] array, attrs = ffconvert(fname, limit_states, ff) ffl = scientific.FragilityFunctionList(array) vars(ffl).update(attrs) fmodel[imt_taxo] = ffl return fmodel
def get_vulnerability_functions_04(node, fname): """ :param node: a vulnerabilityModel node :param fname: path to the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function """ logging.warn('Please upgrade %s to NRML 0.5', fname) # NB: the IMTs can be duplicated and with different levels, each # vulnerability function in a set will get its own levels imts = set() taxonomies = set() # imt, taxonomy -> vulnerability function vmodel = scientific.VulnerabilityModel(**node.attrib) for vset in node: imt_str = vset.IML['IMT'] imls = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vmodel[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) return vmodel
def get_source_model_05(node, fname, converter): converter.fname = fname groups = [] # expect a sequence of sourceGroup nodes for src_group in node: with context(fname, src_group): if 'sourceGroup' not in src_group.tag: raise ValueError('expected sourceGroup') groups.append(converter.convert_node(src_group)) return sorted(groups)
def get_vulnerability_functions_04(node, fname): """ :param node: a vulnerabilityModel node :param fname: path to the vulnerability file :returns: a dictionary imt, taxonomy -> vulnerability function """ logging.warn('Please upgrade %s to NRML 0.5', fname) # NB: the IMTs can be duplicated and with different levels, each # vulnerability function in a set will get its own levels imts = set() taxonomies = set() # imt, taxonomy -> vulnerability function vmodel = scientific.VulnerabilityModel(**node.attrib) for vset in node: imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vmodel[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) return vmodel
def geo_line(self, edge): """ Utility function to convert a node of kind edge into a :class:`openquake.hazardlib.geo.Line` instance. :param edge: a node describing an edge """ with context(self.fname, edge.LineString.posList) as plist: coords = split_coords_2d(~plist) return geo.Line([geo.Point(*p) for p in coords])
def convert_node(self, node): """ Convert the given node into a hazardlib source, depending on the node tag. :param node: a node representing a source """ with context(self.fname, node): convert_source = getattr(self, 'convert_' + striptag(node.tag)) return convert_source(node)
def convert_hpdist(self, node): """ Convert the given node into a probability mass function for the hypo depth distribution. :param node: a hypoDepthDist node :returns: a :class:`openquake.hazardlib.pmf.PMF` instance """ with context(self.fname, node): return pmf.PMF([~hd for hd in node.hypoDepthDist])
def get_vulnerability_functions(fname): """ :param fname: path of the vulnerability filter :returns: a dictionary imt, taxonomy -> vulnerability function """ # NB: the vulnerabilitySetID is not an unique ID! # it is right to have several vulnerability sets with the same ID # the IMTs can also be duplicated and with different levels, each # vulnerability function in a set will get its own levels imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function for vset in read_nodes(fname, filter_vset, nodefactory['vulnerabilityModel']): imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) return vf_dict
def get_exposure_lazy(fname, ok_cost_types): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: # NB: the area type cannot be an empty string because when sending # around the CostCalculator object one runs into this numpy bug on # pickling dictionaries with empty strings: # https://github.com/numpy/numpy/pull/5475 area = LiteralNode('area', dict(type='?')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid_cost_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() exp = Exposure(exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, ~inslimit, ~deductible, area.attrib, [], set(), []) cc = riskmodels.CostCalculator({}, {}, exp.deductible_is_absolute, exp.insurance_limit_is_absolute) for ct in exp.cost_types: name = ct['name'] # structural, nonstructural, ... cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area cc.area_types[name] = exp.area['type'] return exp, exposure.assets, cc
def convert_node(self, node): """ Convert the given rupture node into a hazardlib rupture, depending on the node tag. :param node: a node representing a rupture """ with context(self.fname, node): convert_rupture = getattr(self, 'convert_' + striptag(node.tag)) mag = ~node.magnitude rake = ~node.rake hypocenter = ~node.hypocenter return convert_rupture(node, mag, rake, hypocenter)
def convert_npdist(self, node): """ Convert the given node into a Nodal Plane Distribution. :param node: a nodalPlaneDist node :returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance """ with context(self.fname, node): npdist = [] for np in node.nodalPlaneDist: prob, strike, dip, rake = ~np npdist.append((prob, geo.NodalPlane(strike, dip, rake))) return pmf.PMF(npdist)
def geo_lines(self, edges): """ Utility function to convert a list of edges into a list of :class:`openquake.hazardlib.geo.Line` instances. :param edge: a node describing an edge """ lines = [] for edge in edges: with context(self.fname, edge): coords = split_coords_3d(~edge.LineString.posList) lines.append(geo.Line([geo.Point(*p) for p in coords])) return lines
def convert_node(self, node): """ Convert the given rupture node into a hazardlib rupture, depending on the node tag. :param node: a node representing a rupture """ with context(self.fname, node): convert_rupture = getattr(self, "convert_" + striptag(node.tag)) mag = ~node.magnitude rake = ~node.rake h = node.hypocenter hypocenter = geo.Point(h["lon"], h["lat"], h["depth"]) return convert_rupture(node, mag, rake, hypocenter)
def convert_node(self, node): """ Convert the given rupture node into a hazardlib rupture, depending on the node tag. :param node: a node representing a rupture """ with context(self.fname, node): convert_rupture = getattr(self, 'convert_' + striptag(node.tag)) mag = ~node.magnitude rake = ~node.rake h = node.hypocenter hypocenter = geo.Point(h['lon'], h['lat'], h['depth']) return convert_rupture(node, mag, rake, hypocenter)
def convert_fragility_model_04(node, fname, fmcounter=itertools.count(1)): """ :param node: an :class:`openquake.commonib.node.LiteralNode` in NRML 0.4 :param fname: path of the fragility file :returns: an :class:`openquake.commonib.node.LiteralNode` in NRML 0.5 """ convert_type = {"lognormal": "logncdf"} new = LiteralNode('fragilityModel', dict(assetCategory='building', lossCategory='structural', id='fm_%d_converted_from_NRML_04' % next(fmcounter))) with context(fname, node): fmt = node['format'] descr = ~node.description limit_states = ~node.limitStates new.append(LiteralNode('description', {}, descr)) new.append((LiteralNode('limitStates', {}, ' '.join(limit_states)))) for ffs in node[2:]: IML = ffs.IML # NB: noDamageLimit = None is different than zero nodamage = ffs.attrib.get('noDamageLimit') ff = LiteralNode('fragilityFunction', {'format': fmt}) ff['id'] = ~ffs.taxonomy ff['shape'] = convert_type[ffs.attrib.get('type', 'lognormal')] if fmt == 'continuous': with context(fname, IML): attr = dict(imt=IML['IMT'], minIML=IML['minIML'], maxIML=IML['maxIML']) if nodamage is not None: attr['noDamageLimit'] = nodamage ff.append(LiteralNode('imls', attr)) for ffc in ffs[2:]: with context(fname, ffc): ls = ffc['ls'] param = ffc.params with context(fname, param): m, s = param['mean'], param['stddev'] ff.append(LiteralNode('params', dict(ls=ls, mean=m, stddev=s))) else: # discrete with context(fname, IML): imls = ' '.join(map(str, (~IML)[1])) attr = dict(imt=IML['IMT']) if nodamage is not None: attr['noDamageLimit'] = nodamage ff.append(LiteralNode('imls', attr, imls)) for ffd in ffs[2:]: ls = ffd['ls'] with context(fname, ffd): poes = ' '.join(map(str, ~ffd.poEs)) ff.append(LiteralNode('poes', dict(ls=ls), poes)) new.append(ff) return new
def convert_mfdist(self, node): """ Convert the given node into a Magnitude-Frequency Distribution object. :param node: a node of kind incrementalMFD or truncGutenbergRichterMFD :returns: a :class:`openquake.hazardlib.mdf.EvenlyDiscretizedMFD.` or :class:`openquake.hazardlib.mdf.TruncatedGRMFD` instance """ with context(self.fname, node): [mfd_node] = [ subnode for subnode in node if subnode.tag.endswith(('incrementalMFD', 'truncGutenbergRichterMFD', 'arbitraryMFD', 'YoungsCoppersmithMFD')) ] if mfd_node.tag.endswith('incrementalMFD'): return mfd.EvenlyDiscretizedMFD( min_mag=mfd_node['minMag'], bin_width=mfd_node['binWidth'], occurrence_rates=~mfd_node.occurRates) elif mfd_node.tag.endswith('truncGutenbergRichterMFD'): return mfd.TruncatedGRMFD(a_val=mfd_node['aValue'], b_val=mfd_node['bValue'], min_mag=mfd_node['minMag'], max_mag=mfd_node['maxMag'], bin_width=self.width_of_mfd_bin) elif mfd_node.tag.endswith('arbitraryMFD'): return mfd.ArbitraryMFD(magnitudes=~mfd_node.magnitudes, occurrence_rates=~mfd_node.occurRates) elif mfd_node.tag.endswith('YoungsCoppersmithMFD'): if "totalMomentRate" in mfd_node.attrib.keys(): # Return Youngs & Coppersmith from the total moment rate return mfd.YoungsCoppersmith1985MFD.from_total_moment_rate( min_mag=mfd_node["minMag"], b_val=mfd_node["bValue"], char_mag=mfd_node["characteristicMag"], total_moment_rate=mfd_node["totalMomentRate"], bin_width=mfd_node["binWidth"]) elif "characteristicRate" in mfd_node.attrib.keys(): # Return Youngs & Coppersmith from the total moment rate return mfd.YoungsCoppersmith1985MFD.\ from_characteristic_rate( min_mag=mfd_node["minMag"], b_val=mfd_node["bValue"], char_mag=mfd_node["characteristicMag"], char_rate=mfd_node["characteristicRate"], bin_width=mfd_node["binWidth"])
def collect_source_model_paths(smlt): """ Given a path to a source model logic tree or a file-like, collect all of the soft-linked path names to the source models it contains and return them as a uniquified list (no duplicates). :param smlt: source model logic tree file """ for blevel in nrml.read(smlt).logicTree: with node.context(smlt, blevel): for bset in blevel: for br in bset: smfname = br.uncertaintyModel.text if smfname: yield smfname
def geo_planar(self, surface): """ Utility to convert a PlanarSurface node with subnodes topLeft, topRight, bottomLeft, bottomRight into a :class:`openquake.hazardlib.geo.PlanarSurface` instance. :param surface: PlanarSurface node """ with context(self.fname, surface): top_left = geo.Point(*~surface.topLeft) top_right = geo.Point(*~surface.topRight) bottom_left = geo.Point(*~surface.bottomLeft) bottom_right = geo.Point(*~surface.bottomRight) return geo.PlanarSurface.from_corner_points( self.rupture_mesh_spacing, top_left, top_right, bottom_right, bottom_left)
def convert_complexFaultRupture(self, node, mag, rake, hypocenter): """ Convert a complexFaultRupture node. :param node: the rupture node :param mag: the rupture magnitude :param rake: the rupture rake angle :param hypocenter: the rupture hypocenter """ with context(self.fname, node): surfaces = [node.complexFaultGeometry] rupt = source.rupture.Rupture( mag=mag, rake=rake, tectonic_region_type=None, hypocenter=geo.Point(*hypocenter), surface=self.convert_surfaces(surfaces), source_typology=source.ComplexFaultSource) return rupt
def convert_multiPlanesRupture(self, node, mag, rake, hypocenter): """ Convert a multiPlanesRupture node. :param node: the rupture node :param mag: the rupture magnitude :param rake: the rupture rake angle :param hypocenter: the rupture hypocenter """ with context(self.fname, node): surfaces = list(node.getnodes('planarSurface')) hrupt = source.rupture.Rupture( mag=mag, rake=rake, tectonic_region_type=None, hypocenter=geo.Point(*hypocenter), surface=self.convert_surfaces(surfaces), source_typology=source.NonParametricSeismicSource) return hrupt
def convert_complexFaultRupture(self, node, mag, rake, hypocenter): """ Convert a complexFaultRupture node. :param node: the rupture node :param mag: the rupture magnitude :param rake: the rupture rake angle :param hypocenter: the rupture hypocenter """ with context(self.fname, node): surfaces = [node.complexFaultGeometry] rupt = source.rupture.Rupture( mag=mag, rake=rake, tectonic_region_type=None, hypocenter=hypocenter, surface=self.convert_surfaces(surfaces), source_typology=source.ComplexFaultSource, surface_nodes=surfaces) return rupt
def get_exposure_lazy(fname, ok_cost_types): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: area = LiteralNode('area', dict(type='')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid_cost_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() return Exposure( exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, ~inslimit, ~deductible, area.attrib, [], set()), exposure.assets
def convert_mfdist(self, node): """ Convert the given node into a Magnitude-Frequency Distribution object. :param node: a node of kind incrementalMFD or truncGutenbergRichterMFD :returns: a :class:`openquake.hazardlib.mdf.EvenlyDiscretizedMFD.` or :class:`openquake.hazardlib.mdf.TruncatedGRMFD` instance """ with context(self.fname, node): [mfd_node] = [subnode for subnode in node if subnode.tag.endswith( ('incrementalMFD', 'truncGutenbergRichterMFD', 'arbitraryMFD', 'YoungsCoppersmithMFD'))] if mfd_node.tag.endswith('incrementalMFD'): return mfd.EvenlyDiscretizedMFD( min_mag=mfd_node['minMag'], bin_width=mfd_node['binWidth'], occurrence_rates=~mfd_node.occurRates) elif mfd_node.tag.endswith('truncGutenbergRichterMFD'): return mfd.TruncatedGRMFD( a_val=mfd_node['aValue'], b_val=mfd_node['bValue'], min_mag=mfd_node['minMag'], max_mag=mfd_node['maxMag'], bin_width=self.width_of_mfd_bin) elif mfd_node.tag.endswith('arbitraryMFD'): return mfd.ArbitraryMFD( magnitudes=~mfd_node.magnitudes, occurrence_rates=~mfd_node.occurRates) elif mfd_node.tag.endswith('YoungsCoppersmithMFD'): if "totalMomentRate" in mfd_node.attrib.keys(): # Return Youngs & Coppersmith from the total moment rate return mfd.YoungsCoppersmith1985MFD.from_total_moment_rate( min_mag=mfd_node["minMag"], b_val=mfd_node["bValue"], char_mag=mfd_node["characteristicMag"], total_moment_rate=mfd_node["totalMomentRate"], bin_width=mfd_node["binWidth"]) elif "characteristicRate" in mfd_node.attrib.keys(): # Return Youngs & Coppersmith from the total moment rate return mfd.YoungsCoppersmith1985MFD.\ from_characteristic_rate( min_mag=mfd_node["minMag"], b_val=mfd_node["bValue"], char_mag=mfd_node["characteristicMag"], char_rate=mfd_node["characteristicRate"], bin_width=mfd_node["binWidth"])
def convert_singlePlaneRupture(self, node, mag, rake, hypocenter): """ Convert a singlePlaneRupture node. :param node: the rupture node :param mag: the rupture magnitude :param rake: the rupture rake angle :param hypocenter: the rupture hypocenter """ with context(self.fname, node): surfaces = [node.planarSurface] rupt = source.rupture.Rupture( mag=mag, rake=rake, tectonic_region_type=None, hypocenter=hypocenter, surface=self.convert_surfaces(surfaces), source_typology=source.NonParametricSeismicSource, surface_nodes=surfaces) return rupt
def convert_multiPlanesRupture(self, node, mag, rake, hypocenter): """ Convert a multiPlanesRupture node. :param node: the rupture node :param mag: the rupture magnitude :param rake: the rupture rake angle :param hypocenter: the rupture hypocenter """ with context(self.fname, node): surfaces = list(node.getnodes('planarSurface')) rupt = source.rupture.Rupture( mag=mag, rake=rake, tectonic_region_type=None, hypocenter=hypocenter, surface=self.convert_surfaces(surfaces), source_typology=source.NonParametricSeismicSource, surface_nodes=surfaces) return rupt
def geo_planar(self, surface): """ Utility to convert a PlanarSurface node with subnodes topLeft, topRight, bottomLeft, bottomRight into a :class:`openquake.hazardlib.geo.PlanarSurface` instance. :param surface: PlanarSurface node """ with context(self.fname, surface): tl = surface.topLeft top_left = geo.Point(tl['lon'], tl['lat'], tl['depth']) tr = surface.topRight top_right = geo.Point(tr['lon'], tr['lat'], tr['depth']) bl = surface.bottomLeft bottom_left = geo.Point(bl['lon'], bl['lat'], bl['depth']) br = surface.bottomRight bottom_right = geo.Point(br['lon'], br['lat'], br['depth']) return geo.PlanarSurface.from_corner_points(self.rupture_mesh_spacing, top_left, top_right, bottom_right, bottom_left)
def geo_planar(self, surface): """ Utility to convert a PlanarSurface node with subnodes topLeft, topRight, bottomLeft, bottomRight into a :class:`openquake.hazardlib.geo.PlanarSurface` instance. :param surface: PlanarSurface node """ with context(self.fname, surface): tl = surface.topLeft top_left = geo.Point(tl['lon'], tl['lat'], tl['depth']) tr = surface.topRight top_right = geo.Point(tr['lon'], tr['lat'], tr['depth']) bl = surface.bottomLeft bottom_left = geo.Point(bl['lon'], bl['lat'], bl['depth']) br = surface.bottomRight bottom_right = geo.Point(br['lon'], br['lat'], br['depth']) return geo.PlanarSurface.from_corner_points( self.rupture_mesh_spacing, top_left, top_right, bottom_right, bottom_left)
def collect_source_model_paths(smlt): """ Given a path to a source model logic tree or a file-like, collect all of the soft-linked path names to the source models it contains and return them as a uniquified list (no duplicates). :param smlt: source model logic tree file """ n = nrml.read(smlt) try: blevels = n.logicTree except: raise InvalidFile('%s is not a valid source_model_logic_tree_file' % smlt) for blevel in blevels: with node.context(smlt, blevel): for bset in blevel: for br in bset: smfname = br.uncertaintyModel.text.strip() if smfname: yield smfname
def convert_simpleFaultSource(self, node): """ Convert the given node into a simple fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.SimpleFaultSource` instance """ geom = node.simpleFaultGeometry msr = valid.SCALEREL[~node.magScaleRel]() fault_trace = self.geo_line(geom) mfd = self.convert_mfdist(node) with context(self.fname, node): try: hypo_list = valid.hypo_list(node.hypoList) except NameError: hypo_list = () try: slip_list = valid.slip_list(node.slipList) except NameError: slip_list = () simple = source.SimpleFaultSource( source_id=node["id"], name=node["name"], tectonic_region_type=node["tectonicRegion"], mfd=mfd, rupture_mesh_spacing=self.rupture_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, upper_seismogenic_depth=~geom.upperSeismoDepth, lower_seismogenic_depth=~geom.lowerSeismoDepth, fault_trace=fault_trace, dip=~geom.dip, rake=~node.rake, temporal_occurrence_model=self.tom, hypo_list=hypo_list, slip_list=slip_list, ) return simple
def convert_simpleFaultSource(self, node): """ Convert the given node into a simple fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.SimpleFaultSource` instance """ geom = node.simpleFaultGeometry msr = valid.SCALEREL[~node.magScaleRel]() fault_trace = self.geo_line(geom) mfd = self.convert_mfdist(node) with context(self.fname, node): try: hypo_list = valid.hypo_list(node.hypoList) except NameError: hypo_list = () try: slip_list = valid.slip_list(node.slipList) except NameError: slip_list = () simple = source.SimpleFaultSource( source_id=node['id'], name=node['name'], tectonic_region_type=node['tectonicRegion'], mfd=mfd, rupture_mesh_spacing=self.rupture_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, upper_seismogenic_depth=~geom.upperSeismoDepth, lower_seismogenic_depth=~geom.lowerSeismoDepth, fault_trace=fault_trace, dip=~geom.dip, rake=~node.rake, temporal_occurrence_model=self.tom, hypo_list=hypo_list, slip_list=slip_list) return simple
def get_vulnerability_functions(fname): """ :param fname: path of the vulnerability filter :returns: a dictionary imt, taxonomy -> vulnerability function """ # NB: the IMTs can be duplicated and with different levels, each # vulnerability function in a set will get its own levels imts = set() taxonomies = set() vf_dict = {} # imt, taxonomy -> vulnerability function node = nrml.read(fname) if node['xmlns'] == nrml.NRML05: vmodel = node[0] for vfun in vmodel.getnodes('vulnerabilityFunction'): with context(fname, vfun): imt = vfun.imls['imt'] imls = numpy.array(~vfun.imls) taxonomy = vfun['id'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) if vfun['dist'] == 'PM': loss_ratios, probs = [], [] for probabilities in vfun[1:]: loss_ratios.append(probabilities['lr']) probs.append(valid.probabilities(~probabilities)) probs = numpy.array(probs) assert probs.shape == (len(loss_ratios), len(imls)) vf_dict[imt, taxonomy] = (scientific.VulnerabilityFunctionWithPMF( taxonomy, imt, imls, numpy.array(loss_ratios), probs)) else: with context(fname, vfun): loss_ratios = ~vfun.meanLRs coefficients = ~vfun.covLRs if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.meanLRs.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, ' 'line %d' % (len(coefficients), len(imls), fname, vfun.covLRs.lineno)) with context(fname, vfun): vf_dict[imt, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt, imls, loss_ratios, coefficients, vfun['dist']) return vf_dict # otherwise, read the old format (NRML 0.4) for vset in read_nodes(fname, filter_vset, nodefactory['vulnerabilityModel']): imt_str, imls, min_iml, max_iml, imlUnit = ~vset.IML imts.add(imt_str) for vfun in vset.getnodes('discreteVulnerability'): taxonomy = vfun['vulnerabilityFunctionID'] if taxonomy in taxonomies: raise InvalidFile( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % (taxonomy, fname, vfun.lineno)) taxonomies.add(taxonomy) with context(fname, vfun): loss_ratios = ~vfun.lossRatio coefficients = ~vfun.coefficientsVariation if len(loss_ratios) != len(imls): raise InvalidFile( 'There are %d loss ratios, but %d imls: %s, line %d' % (len(loss_ratios), len(imls), fname, vfun.lossRatio.lineno)) if len(coefficients) != len(imls): raise InvalidFile( 'There are %d coefficients, but %d imls: %s, line %d' % (len(coefficients), len(imls), fname, vfun.coefficientsVariation.lineno)) with context(fname, vfun): vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction( taxonomy, imt_str, imls, loss_ratios, coefficients, vfun['probabilisticDistribution']) return vf_dict
def get_exposure(oqparam): """ Read the full exposure in memory and build a list of :class:`openquake.risklib.riskmodels.Asset` instances. If you don't want to keep everything in memory, use get_exposure_lazy instead (for experts only). :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :returns: an :class:`Exposure` instance """ out_of_region = 0 if oqparam.region_constraint: region = wkt.loads(oqparam.region_constraint) else: region = None all_cost_types = set(oqparam.all_cost_types) fname = oqparam.inputs['exposure'] exposure, assets_node, cc = get_exposure_lazy(fname, all_cost_types) relevant_cost_types = all_cost_types - set(['occupants']) asset_refs = set() ignore_missing_costs = set(oqparam.ignore_missing_costs) for idx, asset in enumerate(assets_node): values = {} deductibles = {} insurance_limits = {} retrofitteds = {} with context(fname, asset): asset_id = asset['id'].encode('utf8') if asset_id in asset_refs: raise DuplicatedID(asset_id) asset_refs.add(asset_id) exposure.asset_refs.append(asset_id) taxonomy = asset['taxonomy'] if 'damage' in oqparam.calculation_mode: # calculators of 'damage' kind require the 'number' # if it is missing a KeyError is raised number = asset.attrib['number'] else: # some calculators ignore the 'number' attribute; # if it is missing it is considered 1, since we are going # to multiply by it try: number = asset['number'] except KeyError: number = 1 else: if 'occupants' in all_cost_types: values['occupants_None'] = number location = asset.location['lon'], asset.location['lat'] if region and not geometry.Point(*location).within(region): out_of_region += 1 continue try: costs = asset.costs except NameError: costs = LiteralNode('costs', []) try: occupancies = asset.occupancies except NameError: occupancies = LiteralNode('occupancies', []) for cost in costs: with context(fname, cost): cost_type = cost['type'] if cost_type in relevant_cost_types: values[cost_type] = cost['value'] retrovalue = cost.attrib.get('retrofitted') if retrovalue is not None: retrofitteds[cost_type] = retrovalue if oqparam.insured_losses: deductibles[cost_type] = cost['deductible'] insurance_limits[cost_type] = cost['insuranceLimit'] # check we are not missing a cost type missing = relevant_cost_types - set(values) if missing and missing <= ignore_missing_costs: logging.warn('Ignoring asset %s, missing cost type(s): %s', asset_id, ', '.join(missing)) for cost_type in missing: values[cost_type] = None elif missing and 'damage' not in oqparam.calculation_mode: # missing the costs is okay for damage calculators with context(fname, asset): raise ValueError("Invalid Exposure. " "Missing cost %s for asset %s" % (missing, asset_id)) tot_occupants = 0 for occupancy in occupancies: with context(fname, occupancy): exposure.time_events.add(occupancy['period']) occupants = 'occupants_%s' % occupancy['period'] values[occupants] = occupancy['occupants'] tot_occupants += values[occupants] if occupancies: # store average occupants values['occupants_None'] = tot_occupants / len(occupancies) area = float(asset.attrib.get('area', 1)) ass = riskmodels.Asset(idx, taxonomy, number, location, values, area, deductibles, insurance_limits, retrofitteds, cc) exposure.assets.append(ass) exposure.taxonomies.add(taxonomy) if region: logging.info( 'Read %d assets within the region_constraint ' 'and discarded %d assets outside the region', len(exposure.assets), out_of_region) if len(exposure.assets) == 0: raise RuntimeError('Could not find any asset within the region!') else: logging.info('Read %d assets', len(exposure.assets)) # sanity check values = any(len(ass.values) + ass.number for ass in exposure.assets) assert values, 'Could not find any value??' return exposure
def get_exposure(oqparam): """ Read the full exposure in memory and build a list of :class:`openquake.risklib.workflows.Asset` instances. If you don't want to keep everything in memory, use get_exposure_lazy instead (for experts only). :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :returns: an :class:`Exposure` instance """ out_of_region = 0 if oqparam.region_constraint: region = wkt.loads(oqparam.region_constraint) else: region = None fname = oqparam.inputs['exposure'] exposure, assets_node = get_exposure_lazy(fname) cc = workflows.CostCalculator({}, {}, exposure.deductible_is_absolute, exposure.insurance_limit_is_absolute) for ct in exposure.cost_types: name = ct['name'] # structural, nonstructural, ... cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area cc.area_types[name] = exposure.area['type'] all_cost_types = set(vulnerability_files(oqparam.inputs)) relevant_cost_types = all_cost_types - set(['occupants']) asset_refs = set() ignore_missing_costs = set(oqparam.ignore_missing_costs) for asset in assets_node: values = {} deductibles = {} insurance_limits = {} retrofitting_values = {} with context(fname, asset): asset_id = asset['id'].encode('utf8') if asset_id in asset_refs: raise DuplicatedID(asset_id) asset_refs.add(asset_id) taxonomy = asset['taxonomy'] if 'damage' in oqparam.calculation_mode: # calculators of 'damage' kind require the 'number' # if it is missing a KeyError is raised number = asset.attrib['number'] else: # some calculators ignore the 'number' attribute; # if it is missing it is considered 1, since we are going # to multiply by it try: number = asset['number'] except KeyError: number = 1 else: if 'occupants' in all_cost_types: values['fatalities_None'] = number location = asset.location['lon'], asset.location['lat'] if region and not geometry.Point(*location).within(region): out_of_region += 1 continue try: costs = asset.costs except NameError: costs = LiteralNode('costs', []) try: occupancies = asset.occupancies except NameError: occupancies = LiteralNode('occupancies', []) with context(fname, costs): for cost in costs: cost_type = cost['type'] if cost_type not in relevant_cost_types: continue values[cost_type] = cost['value'] deduct = cost.attrib.get('deductible') if deduct is not None: deductibles[cost_type] = deduct limit = cost.attrib.get('insuranceLimit') if limit is not None: insurance_limits[cost_type] = limit # check we are not missing a cost type missing = relevant_cost_types - set(values) if missing and missing <= ignore_missing_costs: logging.warn('Ignoring asset %s, missing cost type(s): %s', asset_id, ', '.join(missing)) for cost_type in missing: values[cost_type] = None elif missing: raise ValueError("Invalid Exposure. " "Missing cost %s for asset %s" % (missing, asset_id)) tot_fatalities = 0 for occupancy in occupancies: with context(fname, occupancy): fatalities = 'fatalities_%s' % occupancy['period'] values[fatalities] = occupancy['occupants'] tot_fatalities += values[fatalities] if occupancies: # store average fatalities values['fatalities_None'] = tot_fatalities / len(occupancies) area = float(asset.attrib.get('area', 1)) ass = workflows.Asset(asset_id, taxonomy, number, location, values, area, deductibles, insurance_limits, retrofitting_values, cc) exposure.assets.append(ass) exposure.taxonomies.add(taxonomy) if region: logging.info( 'Read %d assets within the region_constraint ' 'and discarded %d assets outside the region', len(exposure.assets), out_of_region) else: logging.info('Read %d assets', len(exposure.assets)) # sanity check values = any(len(ass.values) + ass.number for ass in exposure.assets) assert values, 'Could not find any value??' return exposure
def asset_gen(): # wrap the asset generation to get a nice error message with context(fname, assets_node): for asset in assets_node: yield asset
def get_fragility_functions(fname, continuous_fragility_discretization, steps_per_interval=None): """ :param fname: path of the fragility file :param continuous_fragility_discretization: continuous_fragility_discretization parameter :param steps_per_interval: steps_per_interval parameter :returns: damage_states list and dictionary taxonomy -> functions """ [fmodel] = read_nodes(fname, lambda el: el.tag.endswith('fragilityModel'), nodefactory['fragilityModel']) # ~fmodel.description is ignored limit_states = ~fmodel.limitStates tag = 'ffc' if fmodel['format'] == 'continuous' else 'ffd' fragility_functions = AccumDict() # taxonomy -> functions for ffs in fmodel.getnodes('ffs'): add_zero_value = False # NB: the noDamageLimit is only defined for discrete fragility # functions. It is a way to set the starting point of the functions: # if noDamageLimit is at the left of each IMLs, it means that the # function starts at zero at the given point, so we need to add # noDamageLimit to the list of IMLs and zero to the list of poes nodamage = ffs.attrib.get('noDamageLimit') taxonomy = ~ffs.taxonomy imt_str, imls, min_iml, max_iml, imlUnit = ~ffs.IML if fmodel['format'] == 'discrete': if nodamage is not None and nodamage < imls[0]: # discrete fragility imls = [nodamage] + imls add_zero_value = True if steps_per_interval: gen_imls = scientific.fine_graining(imls, steps_per_interval) else: gen_imls = imls else: # continuous: if min_iml is None: raise InvalidFile('Missing attribute minIML, line %d' % ffs.IML.lineno) elif max_iml is None: raise InvalidFile('Missing attribute maxIML, line %d' % ffs.IML.lineno) gen_imls = numpy.linspace(min_iml, max_iml, continuous_fragility_discretization) fragility_functions[taxonomy] = scientific.FragilityFunctionList( [], imt=imt_str, imls=list(gen_imls), no_damage_limit=nodamage, continuous_fragility_discretization= continuous_fragility_discretization, steps_per_interval=steps_per_interval) lstates = [] for ff in ffs.getnodes(tag): ls = ff['ls'] # limit state lstates.append(ls) if tag == 'ffc': with context(fname, ff): mean_stddev = ~ff.params fragility_functions[taxonomy].append( scientific.FragilityFunctionContinuous(ls, *mean_stddev)) else: # discrete with context(fname, ff): poes = ~ff.poEs if add_zero_value: poes = [0.] + poes fragility_functions[taxonomy].append( scientific.FragilityFunctionDiscrete( ls, imls, poes, nodamage)) if lstates != limit_states: raise InvalidFile("Expected limit states %s, got %s in %s" % (limit_states, lstates, fname)) fragility_functions.damage_states = ['no_damage'] + limit_states return fragility_functions