def get_gmfs_from_txt(oqparam, fname): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: the full path of the CSV file :returns: a composite array of shape (N, R) read from a CSV file with format `etag indices [gmv1 ... gmvN] * num_imts` """ with open(fname) as csvfile: firstline = next(csvfile) try: coords = valid.coordinates(firstline) except: raise InvalidFile( 'The first line of %s is expected to contain comma separated' 'ordered coordinates, got %s instead' % (fname, firstline)) sitecol = sitecol_from_coords(oqparam, coords) if not oqparam.imtls: oqparam.set_risk_imtls(get_risk_models(oqparam)) imts = list(oqparam.imtls) imt_dt = numpy.dtype([(bytes(imt), F32) for imt in imts]) num_gmfs = oqparam.number_of_ground_motion_fields gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt) etags = [] for lineno, line in enumerate(csvfile, 2): row = line.split(',') try: indices = list(map(valid.positiveint, row[1].split())) except: raise InvalidFile( 'The second column in %s is expected to contain integer ' 'indices, got %s' % (fname, row[1])) r_sites = ( sitecol if not indices else site.FilteredSiteCollection(indices, sitecol)) for i in range(len(imts)): try: array = numpy.array(valid.positivefloats(row[i + 2])) # NB: i + 2 because the first 2 fields are etag and indices except: raise InvalidFile( 'The column #%d in %s is expected to contain positive ' 'floats, got %s instead' % (i + 3, fname, row[i + 2])) gmf_by_imt[imts[i]][lineno - 2] = r_sites.expand(array, 0) etags.append(row[0]) if lineno < num_gmfs + 1: raise InvalidFile('%s contains %d rows, expected %d' % ( fname, lineno, num_gmfs + 1)) if etags != sorted(etags): raise InvalidFile('The etags in %s are not ordered: %s' % (fname, etags)) return sitecol, numpy.array(etags, '|S100'), gmf_by_imt.T
def get_gmfs_from_txt(oqparam, fname): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: the full path of the CSV file :returns: a composite array of shape (N, R) read from a CSV file with format `etag indices [gmv1 ... gmvN] * num_imts` """ with open(fname) as csvfile: firstline = next(csvfile) try: coords = valid.coordinates(firstline) except: raise InvalidFile( 'The first line of %s is expected to contain comma separated' 'ordered coordinates, got %s instead' % (fname, firstline)) sitecol = sitecol_from_coords(oqparam, coords) if not oqparam.imtls: oqparam.set_risk_imtls(get_risk_models(oqparam)) imts = list(oqparam.imtls) imt_dt = numpy.dtype([(bytes(imt), F32) for imt in imts]) num_gmfs = oqparam.number_of_ground_motion_fields gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt) etags = [] for lineno, line in enumerate(csvfile, 2): row = line.split(',') try: indices = list(map(valid.positiveint, row[1].split())) except: raise InvalidFile( 'The second column in %s is expected to contain integer ' 'indices, got %s' % (fname, row[1])) r_sites = (sitecol if not indices else site.FilteredSiteCollection( indices, sitecol)) for i in range(len(imts)): try: array = numpy.array(valid.positivefloats(row[i + 2])) # NB: i + 2 because the first 2 fields are etag and indices except: raise InvalidFile( 'The column #%d in %s is expected to contain positive ' 'floats, got %s instead' % (i + 3, fname, row[i + 2])) gmf_by_imt[imts[i]][lineno - 2] = r_sites.expand(array, 0) etags.append(row[0]) if lineno < num_gmfs + 1: raise InvalidFile('%s contains %d rows, expected %d' % (fname, lineno, num_gmfs + 1)) if etags != sorted(etags): raise InvalidFile('The etags in %s are not ordered: %s' % (fname, etags)) return sitecol, numpy.array(etags, '|S100'), gmf_by_imt.T
def ffconvert(fname, limit_states, ff): """ Convert a fragility function into a numpy array plus a bunch of attributes. :param fname: path to the fragility model file :param limit_states: expected limit states :param ff: fragility function node :returns: a pair (array, dictionary) """ with context(fname, ff): ffs = ff[1:] imls = ff.imls with context(fname, imls): attrs = dict(format=ff['format'], imt=imls['imt'], nodamage=imls.attrib.get('noDamageLimit')) LS = len(limit_states) if LS != len(ffs): with context(fname, ff): raise InvalidFile('expected %d limit states, found %d' % (LS, len(ffs))) if ff['format'] == 'continuous': attrs['minIML'] = float(imls['minIML']) attrs['maxIML'] = float(imls['maxIML']) array = numpy.zeros(LS, [('mean', F64), ('stddev', F64)]) for i, ls, node in zip(range(LS), limit_states, ff[1:]): if ls != node['ls']: with context(fname, node): raise InvalidFile('expected %s, found' % (ls, node['ls'])) array['mean'][i] = node['mean'] array['stddev'][i] = node['stddev'] elif ff['format'] == 'discrete': attrs['imls'] = valid.positivefloats(~imls) valid.check_levels(attrs['imls'], attrs['imt']) num_poes = len(attrs['imls']) array = numpy.zeros((LS, num_poes)) for i, ls, node in zip(range(LS), limit_states, ff[1:]): with context(fname, node): if ls != node['ls']: raise InvalidFile('expected %s, found' % (ls, node['ls'])) poes = (~node if isinstance(~node, list) else valid.probabilities(~node)) if len(poes) != num_poes: raise InvalidFile('expected %s, found' % (num_poes, len(poes))) array[i, :] = poes # NB: the format is constrained in nrml.FragilityNode to be either # discrete or continuous, there is no third option return array, attrs
def validate_uncertainty_value(self, node, branchset, value): """ See superclass' method for description and signature specification. Checks that the following conditions are met: * For uncertainty of type "sourceModel": referenced file must exist and be readable. This is checked in :meth:`collect_source_model_data` along with saving the source model information. * For uncertainty of type "abGRAbsolute": value should be two float values. * For both absolute uncertainties: the source (only one) must be referenced in branchset's filter "applyToSources". * For all other cases: value should be a single float value. """ _float_re = re.compile(r'^(\+|\-)?(\d+|\d*\.\d+)$') if branchset.uncertainty_type == 'sourceModel': self.collect_source_model_data(value) elif branchset.uncertainty_type == 'abGRAbsolute': ab = value.split() if len(ab) == 2: a, b = ab if _float_re.match(a) and _float_re.match(b): return raise ValidationError( node, self.filename, 'expected a pair of floats separated by space') elif branchset.uncertainty_type == 'incrementalMFDAbsolute': mbr = value.split(',') if len(mbr) == 3: min_mag, bin_width, rates = mbr try: rates = valid.positivefloats(rates) except ValueError: rates = [] if _float_re.match(min_mag) and _float_re.match(bin_width) and\ len(rates): return raise ValidationError( node, self.filename, 'expected mfd in the form min_mag,bin_width,rate_1 rate_2 ...') else: if not _float_re.match(value): raise ValidationError(node, self.filename, 'expected single float value')
def parse_uncertainty_value(self, node, branchset, value): """ See superclass' method for description and signature specification. Doesn't change source model file name, converts other values to either pair of floats or a single float depending on uncertainty type. """ if branchset.uncertainty_type == "sourceModel": return value elif branchset.uncertainty_type == "abGRAbsolute": [a, b] = value.strip().split() return float(a), float(b) elif branchset.uncertainty_type == "incrementalMFDAbsolute": min_mag, bin_width, rates = value.strip().split(",") return float(min_mag), float(bin_width), valid.positivefloats(rates) else: return float(value)
def parse_uncertainty_value(self, node, branchset, value): """ See superclass' method for description and signature specification. Doesn't change source model file name, converts other values to either pair of floats or a single float depending on uncertainty type. """ if branchset.uncertainty_type == 'sourceModel': return value elif branchset.uncertainty_type == 'abGRAbsolute': [a, b] = value.strip().split() return float(a), float(b) elif branchset.uncertainty_type == 'incrementalMFDAbsolute': min_mag, bin_width, rates = value.strip().split(',') return float(min_mag), float(bin_width),\ valid.positivefloats(rates) else: return float(value)
def get_gmfs(oqparam, sitecol): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param sitecol: a SiteCollection instance with sites consistent with the CSV file :returns: a composite array of shape (N, R) read from a CSV file with format `tag indices [gmv1 ... gmvN] * num_imts` """ imts = oqparam.imtls.keys() imt_dt = numpy.dtype([(imt, float) for imt in imts]) num_gmfs = oqparam.number_of_ground_motion_fields gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt) tags = [] fname = oqparam.inputs['gmfs'] with open(fname) as csvfile: for lineno, line in enumerate(csvfile, 1): row = line.split(',') try: indices = map(valid.positiveint, row[1].split()) except: raise InvalidFile( 'The second column in %s is expected to contain integer ' 'indices, got %s instead' % (fname, row[1])) r_sites = ( sitecol if not indices else site.FilteredSiteCollection(indices, sitecol)) for i in range(len(imts)): try: array = numpy.array(valid.positivefloats(row[i + 2])) # NB: i + 2 because the first 2 fields are tag and indices except: raise InvalidFile( 'The column #%d in %s is expected to contain positive ' 'floats, got %s instead' % (i + 3, fname, row[i + 2])) gmf_by_imt[imts[i]][lineno - 1, :] = r_sites.expand(array, 0) tags.append(row[0]) if lineno < num_gmfs: raise InvalidFile('%s contains %d rows, expected %d' % ( fname, lineno, num_gmfs)) if tags != sorted(tags): raise InvalidFile('The tags in %s are not ordered: %s' % (fname, tags)) return gmf_by_imt.T
def get_gmfs_from_csv(oqparam, sitecol, fname): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param sitecol: a SiteCollection instance with sites consistent with the CSV file :param fname: the full path of the CSV file :returns: a composite array of shape (N, R) read from a CSV file with format `tag indices [gmv1 ... gmvN] * num_imts` """ imts = list(oqparam.imtls) imt_dt = numpy.dtype([(imt, float) for imt in imts]) num_gmfs = oqparam.number_of_ground_motion_fields gmf_by_imt = numpy.zeros((num_gmfs, len(sitecol)), imt_dt) tags = [] with open(fname) as csvfile: for lineno, line in enumerate(csvfile, 1): row = line.split(',') try: indices = list(map(valid.positiveint, row[1].split())) except: raise InvalidFile( 'The second column in %s is expected to contain integer ' 'indices, got %s instead' % (fname, row[1])) r_sites = (sitecol if not indices else site.FilteredSiteCollection( indices, sitecol)) for i in range(len(imts)): try: array = numpy.array(valid.positivefloats(row[i + 2])) # NB: i + 2 because the first 2 fields are tag and indices except: raise InvalidFile( 'The column #%d in %s is expected to contain positive ' 'floats, got %s instead' % (i + 3, fname, row[i + 2])) gmf_by_imt[imts[i]][lineno - 1] = r_sites.expand(array, 0) tags.append(row[0]) if lineno < num_gmfs: raise InvalidFile('%s contains %d rows, expected %d' % (fname, lineno, num_gmfs)) if tags != sorted(tags): raise InvalidFile('The tags in %s are not ordered: %s' % (fname, tags)) return sitecol, numpy.array(tags, '|S100'), gmf_by_imt.T
def validate_uncertainty_value(self, node, branchset, value): """ See superclass' method for description and signature specification. Checks that the following conditions are met: * For uncertainty of type "sourceModel": referenced file must exist and be readable. This is checked in :meth:`collect_source_model_data` along with saving the source model information. * For uncertainty of type "abGRAbsolute": value should be two float values. * For both absolute uncertainties: the source (only one) must be referenced in branchset's filter "applyToSources". * For all other cases: value should be a single float value. """ _float_re = re.compile(r"^(\+|\-)?(\d+|\d*\.\d+)$") if branchset.uncertainty_type == "sourceModel": self.collect_source_model_data(value) elif branchset.uncertainty_type == "abGRAbsolute": ab = value.split() if len(ab) == 2: a, b = ab if _float_re.match(a) and _float_re.match(b): return raise ValidationError(node, self.filename, "expected a pair of floats separated by space") elif branchset.uncertainty_type == "incrementalMFDAbsolute": mbr = value.split(",") if len(mbr) == 3: min_mag, bin_width, rates = mbr try: rates = valid.positivefloats(rates) except ValueError: rates = [] if _float_re.match(min_mag) and _float_re.match(bin_width) and len(rates): return raise ValidationError(node, self.filename, "expected mfd in the form min_mag,bin_width,rate_1 rate_2 ...") else: if not _float_re.match(value): raise ValidationError(node, self.filename, "expected single float value")
class VulnerabilityNode(LiteralNode): """ Literal Node class used to validate discrete vulnerability functions """ validators = dict( vulnerabilitySetID=str, # any ASCII string is fine vulnerabilityFunctionID=str, # any ASCII string is fine assetCategory=str, # any ASCII string is fine # the assetCategory here has nothing to do with the category # in the exposure model and it is not used by the engine lossCategory=valid.utf8, # a description field IML=valid.IML, imls=lambda text, imt: valid.positivefloats(text), lr=valid.probability, lossRatio=valid.positivefloats, coefficientsVariation=valid.positivefloats, probabilisticDistribution=valid.Choice('LN', 'BT'), dist=valid.Choice('LN', 'BT', 'PM'), meanLRs=valid.positivefloats, covLRs=valid.positivefloats, )
class FragilityNode(LiteralNode): """ Literal Node class used to validate fragility functions and consequence functions. """ validators = dict( id=valid.utf8, # no constraints on the taxonomy format=valid.ChoiceCI('discrete', 'continuous'), assetCategory=valid.utf8, dist=valid.Choice('LN'), mean=valid.positivefloat, stddev=valid.positivefloat, lossCategory=valid.name, poes=lambda text, **kw: valid.positivefloats(text), IML=valid.IML, minIML=valid.positivefloat, maxIML=valid.positivefloat, limitStates=valid.namelist, description=valid.utf8_not_empty, type=valid.ChoiceCI('lognormal'), poEs=valid.probabilities, noDamageLimit=valid.NoneOr(valid.positivefloat), )