def get_source_model_05(node, fname, converter=default): converter.fname = fname groups = [] # expect a sequence of sourceGroup nodes for src_group in node: if 'sourceGroup' not in src_group.tag: raise InvalidFile( '%s: you have an incorrect declaration ' 'xmlns="http://openquake.org/xmlns/nrml/0.5"; it should be ' 'xmlns="http://openquake.org/xmlns/nrml/0.4"' % fname) groups.append(converter.convert_node(src_group)) itime = node.get('investigation_time') if itime is not None: itime = valid.positivefloat(itime) stime = node.get('start_time') if stime is not None: stime = valid.positivefloat(stime) return SourceModel(sorted(groups), node.get('name'), itime, stime)
def __init__(self, src_groups, name=None, investigation_time=None, start_time=None): self.src_groups = src_groups self.name = name if investigation_time is not None: investigation_time = valid.positivefloat(investigation_time) self.investigation_time = investigation_time self.start_time = start_time
def get_source_model_05(node, fname, converter=default): converter.fname = fname groups = [] # expect a sequence of sourceGroup nodes for src_group in node: if 'sourceGroup' not in src_group.tag: raise InvalidFile( '%s: you have an incorrect declaration ' 'xmlns="http://openquake.org/xmlns/nrml/0.5"; it should be ' 'xmlns="http://openquake.org/xmlns/nrml/0.4"' % fname) sg = converter.convert_node(src_group) if len(sg): # a source group can be empty if the source_id filtering is on groups.append(sg) itime = node.get('investigation_time') if itime is not None: itime = valid.positivefloat(itime) stime = node.get('start_time') if stime is not None: stime = valid.positivefloat(stime) return SourceModel(sorted(groups), node.get('name'), itime, stime)
def _read_csv(self, csvnames, dirname): """ :param csvnames: names of csv files, space separated :param dirname: the directory where the csv files are :yields: asset nodes """ expected_header = self._csv_header() fnames = [os.path.join(dirname, f) for f in csvnames.split()] for fname in fnames: with open(fname, encoding='utf-8') as f: fields = next(csv.reader(f)) header = set(fields) if len(header) < len(fields): raise InvalidFile( '%s: The header %s contains a duplicated field' % (fname, header)) elif expected_header - header: raise InvalidFile( 'Unexpected header in %s\nExpected: %s\nGot: %s' % (fname, sorted(expected_header), sorted(header))) occupancy_periods = self.occupancy_periods.split() for fname in fnames: with open(fname, encoding='utf-8') as f: for i, dic in enumerate(csv.DictReader(f), 1): asset = Node('asset', lineno=i) with context(fname, asset): asset['id'] = dic['id'] asset['number'] = valid.positivefloat(dic['number']) asset['taxonomy'] = dic['taxonomy'] if 'area' in dic: # optional attribute asset['area'] = dic['area'] loc = Node( 'location', dict(lon=valid.longitude(dic['lon']), lat=valid.latitude(dic['lat']))) costs = Node('costs') for cost in self.cost_types['name']: a = dict(type=cost, value=dic[cost]) costs.append(Node('cost', a)) occupancies = Node('occupancies') for period in occupancy_periods: a = dict(occupants=float(dic[period]), period=period) occupancies.append(Node('occupancy', a)) tags = Node('tags') for tagname in self.tagcol.tagnames: if tagname != 'taxonomy': tags.attrib[tagname] = dic[tagname] asset.nodes.extend([loc, costs, occupancies, tags]) if i % 100000 == 0: logging.info('Read %d assets', i) yield asset
def _read_csv(self): """ :yields: asset nodes """ expected_header = self._csv_header() for fname in self.datafiles: with open(fname, encoding='utf-8') as f: fields = next(csv.reader(f)) header = set(fields) if len(header) < len(fields): raise InvalidFile( '%s: The header %s contains a duplicated field' % (fname, header)) elif expected_header - header - {'exposure', 'country'}: raise InvalidFile( 'Unexpected header in %s\nExpected: %s\nGot: %s' % (fname, sorted(expected_header), sorted(header))) occupancy_periods = self.occupancy_periods.split() for fname in self.datafiles: with open(fname, encoding='utf-8') as f: for i, dic in enumerate(csv.DictReader(f), 1): asset = Node('asset', lineno=i) with context(fname, asset): asset['id'] = dic['id'] asset['number'] = valid.positivefloat(dic['number']) asset['taxonomy'] = dic['taxonomy'] if 'area' in dic: # optional attribute asset['area'] = dic['area'] loc = Node( 'location', dict(lon=valid.longitude(dic['lon']), lat=valid.latitude(dic['lat']))) costs = Node('costs') for cost in self.cost_types['name']: a = dict(type=cost, value=dic[cost]) if 'retrofitted' in dic: a['retrofitted'] = dic['retrofitted'] costs.append(Node('cost', a)) occupancies = Node('occupancies') for period in occupancy_periods: a = dict(occupants=float(dic[period]), period=period) occupancies.append(Node('occupancy', a)) tags = Node('tags') for tagname in self.tagcol.tagnames: if tagname not in ('taxonomy', 'exposure', 'country'): tags.attrib[tagname] = dic[tagname] asset.nodes.extend([loc, costs, occupancies, tags]) yield asset
def _read_csv(self): """ :yields: asset nodes """ expected_header = self._csv_header() for fname in self.datafiles: with open(fname, encoding='utf-8') as f: fields = next(csv.reader(f)) header = set(fields) if len(header) < len(fields): raise InvalidFile( '%s: The header %s contains a duplicated field' % (fname, header)) elif expected_header - header - {'exposure', 'country'}: raise InvalidFile( 'Unexpected header in %s\nExpected: %s\nGot: %s' % (fname, sorted(expected_header), sorted(header))) occupancy_periods = self.occupancy_periods.split() for fname in self.datafiles: with open(fname, encoding='utf-8') as f: for i, dic in enumerate(csv.DictReader(f), 1): asset = Node('asset', lineno=i) with context(fname, asset): asset['id'] = dic['id'] asset['number'] = valid.positivefloat(dic['number']) asset['taxonomy'] = dic['taxonomy'] if 'area' in dic: # optional attribute asset['area'] = dic['area'] loc = Node('location', dict(lon=valid.longitude(dic['lon']), lat=valid.latitude(dic['lat']))) costs = Node('costs') for cost in self.cost_types['name']: a = dict(type=cost, value=dic[cost]) if 'retrofitted' in dic: a['retrofitted'] = dic['retrofitted'] costs.append(Node('cost', a)) occupancies = Node('occupancies') for period in occupancy_periods: a = dict(occupants=float(dic[period]), period=period) occupancies.append(Node('occupancy', a)) tags = Node('tags') for tagname in self.tagcol.tagnames: if tagname not in ( 'taxonomy', 'exposure', 'country'): tags.attrib[tagname] = dic[tagname] asset.nodes.extend([loc, costs, occupancies, tags]) yield asset
def test_positivefloat(self): self.assertEqual(valid.positiveint('1'), 1) with self.assertRaises(ValueError): valid.positivefloat('-1') self.assertEqual(valid.positivefloat('1.1'), 1.1)
def damage_triple(value, ds, mean, stddev): return ds, valid.positivefloat(mean), valid.positivefloat(stddev)
def asset_mean_stddev(value, assetRef, mean, stdDev): return assetRef, valid.positivefloat(mean), valid.positivefloat(stdDev)
def test_positivefloat(self): self.assertEqual(valid.positiveint('1'), 1) with self.assertRaises(ValueError): valid.positivefloat('-1') self.assertEqual(valid.positivefloat('1.1'), 1.1)
def damage_triple(value, ds, mean, stddev): return ds, valid.positivefloat(mean), valid.positivefloat(stddev)
def asset_mean_stddev(value, assetRef, mean, stdDev): return assetRef, valid.positivefloat(mean), valid.positivefloat(stdDev)