def parse_nrml_uhs_curves(nrml_uhs_map): """ Reads the NRML file and returns the metadata (as a dictionary), the periods (as a numpy array) and the uhs values as an array of [lon, lat, uhs] """ node_set = read_lazy(nrml_uhs_map, "IMLs")[0] # Read metadata metadata = { "smlt_path": node_set.attrib["sourceModelTreePath"], "investigation_time": float(node_set.attrib["investigationTime"]), "poe": float(node_set.attrib["poE"]), "gsimlt_path": node_set["gsimTreePath"]} for option, name in OPTIONAL_PATHS: if name in node_set.attrib: metadata[option] = node_set.attrib[name] else: metadata[option] = None periods = numpy.array(map(float, node_set.nodes[0].text.split())) values = [] for node in node_set.nodes[1:]: subnodes = list(node.nodes) lon, lat = map(float, subnodes[0].nodes[0].text.split()) uhs = [lon, lat] uhs.extend(map(float, subnodes[1].text.split())) values.append(uhs) return metadata, periods, numpy.array(values)
def get_exposure_lazy(fname): """ :param fname: path of the XML file containing the exposure :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit') try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible') try: area = conversions.area except NameError: area = LiteralNode('area', dict(type='')) return Exposure( exposure['id'], exposure['category'], ~description, [ct.attrib for ct in conversions.costTypes], ~inslimit, ~deductible, area.attrib, [], set()), exposure.assets
def get_exposure_lazy(fname): """ :param fname: path of the XML file containing the exposure :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: area = LiteralNode('area', dict(type='')) return Exposure(exposure['id'], exposure['category'], ~description, [ct.attrib for ct in conversions.costTypes], ~inslimit, ~deductible, area.attrib, [], set()), exposure.assets
def read_hazard_curves(filename): """ Reads the hazard curves from the NRML file and sorts the results into a dictionary of hazard curves information """ node_set = read_lazy(filename, "poEs")[0] hazard_curves = { "imt": node_set.attrib["IMT"], "investigation_time": float(node_set.attrib["investigationTime"]), "imls": numpy.array(node_set.nodes[0].text)} for option, name in OPTIONAL_PATHS: if name in node_set.attrib: hazard_curves[option] = node_set.attrib[name] else: hazard_curves[option] = None n_curves = len(node_set.nodes) - 1 locations = [] poes = [] for hc_node in node_set.nodes[1:]: # Get location info lon, lat = hc_node.nodes[0].nodes[0].text locations.append([lon, lat]) # Get PoEs poes.append(hc_node.nodes[1].text) hazard_curves["curves"] = numpy.column_stack([numpy.array(locations), numpy.array(poes)]) return hazard_curves
def get_exposure_lazy(fname, ok_cost_types): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: # NB: the area type cannot be an empty string because when sending # around the CostCalculator object one runs into this numpy bug on # pickling dictionaries with empty strings: # https://github.com/numpy/numpy/pull/5475 area = LiteralNode('area', dict(type='?')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid_cost_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() exp = Exposure( exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, ~inslimit, ~deductible, area.attrib, [], set(), []) cc = riskmodels.CostCalculator( {}, {}, exp.deductible_is_absolute, exp.insurance_limit_is_absolute) for ct in exp.cost_types: name = ct['name'] # structural, nonstructural, ... cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area cc.area_types[name] = exp.area['type'] return exp, exposure.assets, cc
def get_exposure_lazy(fname, ok_cost_types): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: # NB: the area type cannot be an empty string because when sending # around the CostCalculator object one runs into this numpy bug on # pickling dictionaries with empty strings: # https://github.com/numpy/numpy/pull/5475 area = LiteralNode('area', dict(type='?')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid_cost_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() exp = Exposure(exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, ~inslimit, ~deductible, area.attrib, [], set(), []) cc = riskmodels.CostCalculator({}, {}, exp.deductible_is_absolute, exp.insurance_limit_is_absolute) for ct in exp.cost_types: name = ct['name'] # structural, nonstructural, ... cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area cc.area_types[name] = exp.area['type'] return exp, exposure.assets, cc
def parse_gmfs_file(file_name): """ Parses the NRML 0.4 GMF set file """ node_set = read_lazy(file_name, "node")[0] gmf_set = [] gmfs = [] imt_list = [] rup_id_list = [] for gmfset_node in node_set: scenario_keys = [] for gmf_node in gmfset_node: # Parse field imt = gmf_node["IMT"] if "SA" in imt: imt = "SA({:s})".format(str(gmf_node["saPeriod"])) if not imt in imt_list: if len(rup_id_list): gmfs.append((imt_list[-1], OrderedDict(rup_id_list))) rup_id_list = [] imt_list.append(imt) rup_id = gmf_node["ruptureId"] if not rup_id in scenario_keys: scenario_keys.append(rup_id) gmf = [[val["lon"], val["lat"], val["gmv"]] for val in gmf_node] rup_id_list.append((rup_id, numpy.array(gmf))) gmfs.append((imt, OrderedDict(rup_id_list))) gmfs = OrderedDict(gmfs) # Re-order to give in terms of scenario ids scenarios = [] for rup_id in scenario_keys: # Determine the IMTs available for the scenario scenario_gmfs = { "IMT": ["lon", "lat"] + imt_list, "GMFs": numpy.zeros([len(gmf), len(imt_list) + 2])} for i, imt in enumerate(imt_list): scenario_gmfs["GMFs"][:, i + 2] = gmfs[imt][rup_id][:, 2] scenario_gmfs["GMFs"][:, :2] = gmfs[imt][rup_id][:, :2] scenarios.append((rup_id, scenario_gmfs)) gmf_set.append((gmfset_node["stochasticEventSetId"], OrderedDict(scenarios))) gmfs = [] imt_list = [] return OrderedDict(gmf_set)
def parse_gmfc_file(file_name): """ Parse NRML 0.4 GMF collection file. """ node_set = read_lazy(file_name, "node") gmfss = [] for element in node_set: if "gmfSet" in element.tag: gmfss.append(parse_gmf_set(element)) elif "gmfCollection" in element.tag: gmfc = GmfCollection(element.attrib["sourceModelTreePath"], element.attrib["gsimTreePath"], None) else: pass gmfc.gmfss = gmfss return gmfc
def get_exposure_lazy(fname, ok_cost_types): """ :param fname: path of the XML file containing the exposure :param ok_cost_types: a set of cost types (as strings) :returns: a pair (Exposure instance, list of asset nodes) """ [exposure] = nrml.read_lazy(fname, ['assets']) description = exposure.description try: conversions = exposure.conversions except NameError: conversions = LiteralNode('conversions', nodes=[LiteralNode('costTypes', [])]) try: inslimit = conversions.insuranceLimit except NameError: inslimit = LiteralNode('insuranceLimit', text=True) try: deductible = conversions.deductible except NameError: deductible = LiteralNode('deductible', text=True) try: area = conversions.area except NameError: area = LiteralNode('area', dict(type='')) # read the cost types and make some check cost_types = [] for ct in conversions.costTypes: if ct['name'] in ok_cost_types: with context(fname, ct): cost_types.append( (ct['name'], valid_cost_type(ct['type']), ct['unit'])) if 'occupants' in ok_cost_types: cost_types.append(('occupants', 'per_area', 'people')) cost_types.sort(key=operator.itemgetter(0)) time_events = set() return Exposure( exposure['id'], exposure['category'], ~description, numpy.array(cost_types, cost_type_dt), time_events, ~inslimit, ~deductible, area.attrib, [], set()), exposure.assets
def parse_gmfs_file(file_name): """ Parses the NRML 0.4 GMF set file """ gmfs = OrderedDict() node_set = read_lazy(file_name, "node")[0] for gmf_node in node_set.nodes: # Parse field gmf = [] imt = gmf_node.attrib["IMT"] for val in gmf_node.nodes: gmf.append([float(val.attrib["lon"]), float(val.attrib["lat"]), float(val.attrib["gmv"])]) if imt in gmfs.keys(): gmfs[imt].append(numpy.array(gmf)) else: gmfs[imt] = [numpy.array(gmf)] return gmfs
def parse_nrml_hazard_map(nrml_hazard_map): """ Reads the NRML file and returns the metadata as a dictionary and the value as a numpy array of [lon, lat, IML] """ node_set = read_lazy(nrml_hazard_map, "node")[0] metadata = { "imt": node_set.attrib["IMT"], "investigation_time": float(node_set.attrib["investigationTime"])} for option, name in OPTIONAL_PATHS: if name in node_set.attrib: metadata[option] = node_set.attrib[name] else: metadata[option] = None if "SA" in metadata["imt"]: metadata["sa_period"] = node_set.attrib['saPeriod'] metadata['sa_damping'] = node_set.attrib['saDamping'] values = [] for node in node_set.nodes: values.append([float(node.attrib["lon"]), float(node.attrib["lat"]), float(node.attrib["iml"])]) values = numpy.array(values) return metadata, values