def upscale_hazardcategories(target_adminlevel_mnemonic): # identify the admin divisions matching the given level: admindivs = DBSession.query(AdministrativeDivision)\ .join(AdminLevelType) \ .filter(AdminLevelType.mnemonic == target_adminlevel_mnemonic) hazardtypes = DBSession.query(HazardType) # for each of these admin divisions and hazard types, # identify the highest HazardCategory among their children: for admindiv in admindivs: # identify max(level) for each hazardtype across children for hazardtype in hazardtypes: hazardcategory = DBSession.query(HazardCategory) \ .join((AdministrativeDivision, HazardCategory.administrativedivisions)) \ .join(HazardType) \ .join(HazardLevel) \ .filter(HazardType.id == hazardtype.id) \ .filter(AdministrativeDivision.parent == admindiv) \ .order_by(HazardLevel.order.asc()).first() if hazardcategory: # find the highest hazardlevel for all children admindivs print '[upscaling] admindiv {} inherits hazardlevel {} for {}'\ .format(admindiv.code, hazardcategory.hazardlevel_id, hazardcategory.hazardtype.mnemonic) admindiv.hazardcategories.append(hazardcategory) DBSession.add(admindiv)
def complete(hazardset_id=None, force=False, dry_run=False): if force: try: clearall() if dry_run: transaction.abort() else: transaction.commit() except: transaction.abort() raise ids = DBSession.query(HazardSet.id) if not force: ids = ids.filter(HazardSet.complete.is_(False)) if hazardset_id is not None: ids = ids.filter(HazardSet.id == hazardset_id) for id in ids: logger.info(' Working on hazardset {}'.format(id[0])) try: if not complete_hazardset(id[0]): hazardset = DBSession.query(HazardSet).get(id) if hazardset.processed: logger.info('Deleting {} previous outputs related \ to this hazardset'.format( hazardset.outputs.count())) hazardset.outputs.delete() if dry_run: transaction.abort() else: transaction.commit() except Exception as e: transaction.abort() logger.error(e.message)
def process_outputs(): print "Decision Tree running..." # first of all, remove all records # in the datamart table linking admin divs with hazard categories: hazardcategory_administrativedivision_table.delete() # identify the admin level for which we run the decision tree: # (REG)ion aka admin level 2 dt_level = DBSession.query(AdminLevelType)\ .filter(AdminLevelType.mnemonic == u'REG').one() # then, identify the unique (admindiv, hazardtype) tuples # contained in the Output table: admindiv_hazardtype_tuples = ( DBSession.query(AdministrativeDivision, HazardType).distinct() .join(Output) .join(HazardSet) .join(HazardType) # the following should not be necessary in production # because only the lowest admin levels should be inserted # in the Output table: .filter(AdministrativeDivision.leveltype_id == dt_level.id) # not necessary, but practical for debugging: .order_by(AdministrativeDivision.code) ) # for each tuple, identify the most relevant HazardSet # in the light of the criteria that we all agreed on (cf Decision Tree): for (admindiv, hazardtype) in admindiv_hazardtype_tuples: (hazardset, output) = DBSession.query(HazardSet, Output) \ .join(Output) \ .filter(Output.admin_id == admindiv.id) \ .filter(HazardSet.hazardtype_id == hazardtype.id) \ .order_by(HazardSet.calculation_method_quality.desc(), HazardSet.scientific_quality.desc(), HazardSet.local.desc(), HazardSet.data_lastupdated_date.desc()).first() print "[decision tree] admindiv {} gets hazardlevel {} from {} for {}"\ .format(admindiv.code, output.hazardlevel_id, hazardset.id, hazardtype.mnemonic) # find the relevant HazardCategory for the current hazardtype # and the hazardset's hazardlevel hazardcategory = DBSession.query(HazardCategory) \ .filter(HazardCategory.hazardtype_id == hazardtype.id) \ .filter(HazardCategory.hazardlevel_id == output.hazardlevel_id) \ .one() # append new hazardcategory to current admin div: admindiv.hazardcategories.append(hazardcategory) DBSession.add(admindiv) # UpScaling level2 (REG)ion -> level1 (PRO)vince upscale_hazardcategories(u'PRO') # UpScaling level1 (PRO)vince -> level0 (COU)ntry upscale_hazardcategories(u'COU') transaction.commit()
def populate(): DBSession.query(Output).delete() DBSession.query(AdministrativeDivision).delete() DBSession.query(Layer).delete() DBSession.query(HazardSet).delete() populate_datamart() populate_processing() transaction.commit()
def populate(): DBSession.query(Output).delete() DBSession.query(Layer).delete() DBSession.query(HazardSet).delete() DBSession.query(AdministrativeDivision).delete() populate_datamart() populate_processing() transaction.commit()
def download_layer(geonode_id): layer = DBSession.query(Layer).get(geonode_id) if layer is None: raise Exception('Layer {} does not exist.'.format(geonode_id)) logger.info('Downloading layer {}'.format(layer.name())) dir_path = os.path.dirname(layer_path(layer)) if not os.path.exists(dir_path): os.makedirs(dir_path) if not os.path.isfile(layer_path(layer)): h = Http() url = urlunsplit((geonode['scheme'], geonode['netloc'], layer.download_url, '', '')) logger.info('Retrieving {}'.format(url)) response, content = h.request(url) try: with open(layer_path(layer), 'wb') as f: f.write(content) layer.downloaded = True except EnvironmentError: logger.error('Writing data from layer {} failed'.format( layer.name())) DBSession.flush()
def download(title=None, force=False, dry_run=False): if force: try: clearall() if dry_run: transaction.abort() else: transaction.commit() except: transaction.abort() raise geonode_ids = DBSession.query(Layer.geonode_id) if not force: geonode_ids = geonode_ids.filter(Layer.downloaded.is_(False)) if title is not None: geonode_ids = geonode_ids.filter(Layer.title == title) for geonode_id in geonode_ids: try: download_layer(geonode_id) if dry_run: transaction.abort() else: transaction.commit() except Exception as e: transaction.abort() logger.error('{} raise an exception :\n{}' .format(geonode_id, e.message))
def populate(): DBSession.query(Output).delete() DBSession.query(Layer).delete() DBSession.query(HazardSet).delete() DBSession.query(AdministrativeDivision).delete() populate_datamart() populate_notpreprocessed(notpreprocessed_type, notpreprocessed_unit) populate_preprocessed(preprocessed_type) transaction.commit()
def test_preprocessed_empty(self, open_mock): '''Test mask layer''' open_mock.side_effect = [ global_reader(), ] process(hazardset_id='preprocessed') output = DBSession.query(Output).first() self.assertEqual(output, None)
def report_json(request): try: division_code = request.matchdict.get('divisioncode') except: raise HTTPBadRequest(detail='incorrect value for parameter ' '"divisioncode"') try: resolution = float(request.params.get('resolution')) except: raise HTTPBadRequest(detail='invalid value for parameter "resolution"') hazard_type = request.matchdict.get('hazardtype', None) _filter = or_(AdministrativeDivision.code == division_code, AdministrativeDivision.parent_code == division_code) simplify = func.ST_Simplify( func.ST_Transform(AdministrativeDivision.geom, 3857), resolution / 2) if hazard_type is not None: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, HazardLevel.mnemonic, HazardLevel.title) \ .outerjoin(AdministrativeDivision.hazardcategories) \ .join(HazardCategory) \ .outerjoin(HazardType)\ .outerjoin(HazardLevel) \ .filter(and_(_filter, HazardType.mnemonic == hazard_type)) else: divisions = DBSession.query(AdministrativeDivision) \ .add_columns(simplify, literal_column("'None'"), literal_column("'blah'")) \ .filter(_filter) return [{ 'type': 'Feature', 'geometry': to_shape(geom_simplified), 'properties': { 'name': division.name, 'code': division.code, 'hazardLevelMnemonic': hazardlevel_mnemonic, 'hazardLevelTitle': hazardlevel_title } } for division, geom_simplified, hazardlevel_mnemonic, hazardlevel_title in divisions]
def add_resource(hazard_type, resource): categories = DBSession.query(HazardCategory) \ .join(HazardType) \ .filter(HazardType.mnemonic == hazard_type) for category in categories: association = HazardCategoryFurtherResourceAssociation(order=1) association.hazardcategory = category resource.hazardcategory_associations.append(association) DBSession.add(resource)
def test_preprocessed_hig(self, open_mock): '''Test mask layer''' hazardtype = HazardType.get(preprocessed_type) hazardtype_settings = settings['hazard_types'][hazardtype.mnemonic] open_mock.side_effect = [ global_reader(hazardtype_settings['values']['HIG'][0]), ] process(hazardset_id='preprocessed') output = DBSession.query(Output).first() self.assertEqual(output.hazardlevel.mnemonic, u'HIG')
def test_process_medium(self, open_mock): '''Test value > threshold in MED layer''' open_mock.side_effect = [ rasterio_open(global_reader()), rasterio_open(global_reader(100.0)), rasterio_open(global_reader()) ] process(force=True) output = DBSession.query(Output).first() self.assertEqual(output.hazardlevel.mnemonic, 'MED')
def test_process_hig(self, open_mock): '''Test value > threshold in HIG layer''' open_mock.side_effect = [ global_reader(100.0), global_reader(), global_reader(), global_reader() ] process(hazardset_id='notpreprocessed') output = DBSession.query(Output).first() self.assertEqual(output.hazardlevel.mnemonic, u'HIG')
def test_process_vlo(self, open_mock): '''Test value < threshold <=> hazardlevel VLO''' open_mock.side_effect = [ global_reader(0.0), global_reader(), global_reader(), global_reader() ] process(hazardset_id='notpreprocessed') output = DBSession.query(Output).first() self.assertEqual(output.hazardlevel.mnemonic, 'VLO')
def test_process_empty(self, open_mock): '''Test nodata everywhere''' open_mock.side_effect = [ global_reader(), global_reader(), global_reader(), global_reader() ] process(hazardset_id='notpreprocessed') output = DBSession.query(Output).first() self.assertEqual(output, None)
def process(hazardset_id=None, force=False): hazardsets = DBSession.query(HazardSet) hazardsets = hazardsets.filter(HazardSet.complete.is_(True)) if hazardset_id is not None: hazardsets = hazardsets.filter(HazardSet.id == hazardset_id) if not force: hazardsets = hazardsets.filter(HazardSet.processed.is_(False)) if hazardsets.count() == 0: print 'No hazardsets to process' return for hazardset in hazardsets: process_hazardset(hazardset, force=force)
def test_process_nodata(self, open_mock): '''Test nodata everywhere''' open_mock.side_effect = [ rasterio_open(global_reader()), rasterio_open(global_reader()), rasterio_open(global_reader()) ] rasterio_open.return_period = None rasterio_open.value = None process(force=True) output = DBSession.query(Output).first() self.assertEqual(output.hazardlevel.mnemonic, 'VLO')
def administrativedivision(request): if 'q' not in request.params: raise HTTPBadRequest(detail='parameter "q" is missing') ilike = '%%%s%%' % request.params['q'] admin0s = DBSession.query(AdministrativeDivision).filter( and_(AdministrativeDivision.leveltype_id == 1, AdministrativeDivision.name.ilike(ilike))).limit(10) admin1s = DBSession.query(AdministrativeDivision).filter( and_(AdministrativeDivision.leveltype_id == 2, AdministrativeDivision.name.ilike(ilike))).limit(10) admin2s = DBSession.query(AdministrativeDivision).filter( and_(AdministrativeDivision.leveltype_id == 3, AdministrativeDivision.name.ilike(ilike))).limit(10) data = sorted(_roundrobin(admin0s, admin1s, admin2s, limit=10), key=lambda o: o.leveltype_id) return {'data': data}
def process(hazardset_id=None, force=False, dry_run=False): ids = DBSession.query(HazardSet.id) \ .filter(HazardSet.complete.is_(True)) if hazardset_id is not None: ids = ids.filter(HazardSet.id == hazardset_id) if not force: ids = ids.filter(HazardSet.processed.is_(False)) if ids.count() == 0: logger.info('No hazardset to process') return for id in ids: logger.info(id[0]) try: process_hazardset(id[0], force=force) if dry_run: logger.info(' Abording transaction') transaction.abort() else: logger.info(' Committing transaction') transaction.commit() except Exception: transaction.abort() logger.error(traceback.format_exc())
def process_hazardset(hazardset, force=False): print hazardset.id chrono = datetime.datetime.now() last_percent = 0 level_VLO = HazardLevel.get(u'VLO') if hazardset is None: raise ProcessException('HazardSet {} does not exist.' .format(hazardset.id)) if hazardset.processed: if force: hazardset.processed = False else: raise ProcessException('HazardSet {} has already been processed.' .format(hazardset.id)) # clean previous outputs DBSession.query(Output) \ .filter(Output.hazardset_id == hazardset.id) \ .delete() DBSession.flush() hazardtype = hazardset.hazardtype hazardtype_settings = settings['hazard_types'][hazardtype.mnemonic] thresholds = hazardtype_settings['thresholds'] project = partial( pyproj.transform, pyproj.Proj(init='epsg:3857'), pyproj.Proj(init='epsg:4326')) layers = {} for level in (u'HIG', u'MED', u'LOW'): hazardlevel = HazardLevel.get(level) layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset.id) \ .filter(Layer.hazardlevel_id == hazardlevel.id) \ .one() layers[level] = layer with rasterio.drivers(): with rasterio.open(layers['HIG'].path()) as src_hig, \ rasterio.open(layers['MED'].path()) as src_med, \ rasterio.open(layers['LOW'].path()) as src_low: readers = {} readers['HIG'] = src_hig readers['MED'] = src_med readers['LOW'] = src_low polygon_hig = polygonFromBounds(src_hig.bounds) polygon_med = polygonFromBounds(src_med.bounds) polygon_low = polygonFromBounds(src_low.bounds) polygon = cascaded_union(( polygon_hig, polygon_med, polygon_low)) adminlevel_REG = AdminLevelType.get(u'REG') admindivs = DBSession.query(AdministrativeDivision) \ .filter(AdministrativeDivision.leveltype_id == adminlevel_REG.id) \ if hazardset.local: admindivs = admindivs \ .filter( func.ST_Transform(AdministrativeDivision.geom, 4326) .intersects( func.ST_GeomFromText(polygon.wkt, 4326))) \ .filter(func.ST_Intersects( func.ST_Transform(AdministrativeDivision.geom, 4326), func.ST_GeomFromText(polygon.wkt, 4326))) current = 0 outputs = 0 total = admindivs.count() for admindiv in admindivs: # print ' ', admindiv.id, admindiv.code, admindiv.name current += 1 if admindiv.geom is None: print ' ', ('{}-{} has null geometry' .format(admindiv.code, admindiv.name)) continue reprojected = transform( project, to_shape(admindiv.geom)) output = Output() output.hazardset = hazardset output.administrativedivision = admindiv output.hazardlevel = None # TODO: calculate coverage ratio output.coverage_ratio = 100 for level in (u'HIG', u'MED', u'LOW'): layer = layers[level] src = readers[level] if not reprojected.intersects(polygon): continue window = src.window(*reprojected.bounds) data = src.read(1, window=window, masked=True) if data.shape[0] * data.shape[1] == 0: continue threshold = thresholds[layer.hazardunit] positive_data = (data > threshold).astype(rasterio.uint8) division = features.rasterize( ((g, 1) for g in [reprojected]), out_shape=data.shape, transform=src.window_transform(window), all_touched=True) masked = numpy.ma.masked_array(positive_data, mask=~division.astype(bool)) if str(numpy.max(masked)) == str(numpy.ma.masked): break else: if output.hazardlevel is None: output.hazardlevel = level_VLO if numpy.max(masked) > 0: output.hazardlevel = layer.hazardlevel break if output.hazardlevel is not None: # print ' hazardlevel :', output.hazardlevel.mnemonic DBSession.add(output) outputs += 1 percent = int(100.0 * current / total) if percent % 10 == 0 and percent != last_percent: print ' ... processed {}%'.format(percent) last_percent = percent pass hazardset.processed = True DBSession.flush() transaction.commit() print ('Successfully processed {} divisions, {} outputs generated in {}' .format(total, outputs, datetime.datetime.now() - chrono))
def complete_hazardset(hazardset_id, dry_run=False): hazardset = DBSession.query(HazardSet).get(hazardset_id) if hazardset is None: raise Exception('Hazardset {} does not exist.' .format(hazardset_id)) hazardtype = hazardset.hazardtype type_settings = settings['hazard_types'][hazardtype.mnemonic] preprocessed = 'values' in type_settings layers = [] if preprocessed: if len(hazardset.layers) == 0: logger.info(' No layer found') return False layers.append(hazardset.layers[0]) else: for level in (u'LOW', u'MED', u'HIG'): layer = hazardset.layerByLevel(level) if layer is None: logger.info(' No layer for level {}'.format(level)) return False layers.append(layer) if ('mask_return_period' in type_settings): layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset_id) \ .filter(Layer.mask.is_(True)) if layer.count() == 0: logger.info(' No mask layer') return False layers.append(layer.one()) for layer in layers: if not layer.downloaded: logger.info(' No data for layer {}'.format(layer.name())) return False stats = DBSession.query( Layer.local, func.min(Layer.data_lastupdated_date), func.min(Layer.metadata_lastupdated_date), func.min(Layer.calculation_method_quality), func.min(Layer.scientific_quality)) \ .filter(Layer.hazardset_id == hazardset.id) \ .group_by(Layer.local) if stats.count() > 1: logger.warning(' Mixed local and global layers') return False stat = stats.one() hazardset.local = stat[0] hazardset.data_lastupdated_date = stat[1] hazardset.metadata_lastupdated_date = stat[2] hazardset.calculation_method_quality = stat[3] hazardset.scientific_quality = stat[4] hazardset.complete = True DBSession.flush() logger.info(' Completed') return True
def clearall(): logger.info('Reset all hazardsets to incomplete state') DBSession.query(HazardSet).update({ HazardSet.processed: False }) DBSession.flush()
def layerByLevel(self, level): hazardlevel = HazardLevel.get(level) return DBSession.query(Layer) \ .filter(Layer.hazardset_id == self.id) \ .filter(Layer.hazardlevel_id == hazardlevel.id) \ .one_or_none()
def import_further_resources(): with transaction.manager: DBSession.query(HazardCategoryFurtherResourceAssociation).delete() DBSession.query(FurtherResource).delete() DBSession.flush() further_resource = FurtherResource(**{ 'text': u'Educational web resources on earthquakes and seismic hazard', # NOQA 'url': u'http://earthquake.usgs.gov/learn/?source=sitemap' }) add_resource(u'EQ', further_resource) add_resource(u'FL', further_resource) further_resource = FurtherResource(**{ 'text': u'Publications by GFDRR, providing information on disaster risk assessment, risk reduction and preparedness', # NOQA 'url': u'https://www.gfdrr.org/publications' }) add_resource(u'EQ', further_resource) further_resource = FurtherResource(**{ 'text': u'The Aqueduct Global Flood Analyzer. Note that this tool only provides information about river flooding (not coastal, pluvial or flash flooding)', # NOQA 'url': u'http://www.wri.org/floods' }) add_resource(u'FL', further_resource) further_resource = FurtherResource(**{ 'text': u'The Climate app, providing information on possible risk reducing intervention measures under site-specific conditions', # NOQA 'url': u'http://www.climateapp.nl/' }) add_resource(u'FL', further_resource) further_resource = FurtherResource(**{ 'text': u'RIMAROCC, providing a risk management framework for road managers and operators dealing with climate change', # NOQA 'url': u'http://www.cedr.fr/home/index.php?id=251&dlpath=2008%20Call%20Road%20Owners%20Getting%20to%20Grips%20with%20Climate%20Change%2FRIMAROCC&cHash=0d3ce2ac10a4d935d9012c515d8e1dc3' # NOQA }) add_resource(u'FL', further_resource) further_resource = FurtherResource(**{ 'text': u'FLOOD PROBE, research on technologies for the cost-effective flood protection of the built environment', # NOQA 'url': u'http://www.floodprobe.eu' }) add_resource(u'FL', further_resource) further_resource = FurtherResource(**{ 'text': u'Climate Change Knowledge Portal', 'url': u'http://sdwebx.worldbank.org/climateportal/' }) add_resource(u'CY', further_resource) add_resource(u'DG', further_resource) add_resource(u'CF', further_resource) further_resource = FurtherResource(**{ 'text': u'Drought Risk Reduction: Framework and Practices', 'url': u'https://www.gfdrr.org/sites/gfdrr/files/publication/Drought%20Risk%20Reduction-Contributing%20to%20the%20Implementation%20of%20the%20Hyogo%20Framework%20for%20Action.pdf' # NOQA }) add_resource(u'DG', further_resource) further_resource = FurtherResource(**{ 'text': u'Learning from Megadisasters: Lessons from the Great Japan Earthquake', # NOQA 'url': u'https://www.gfdrr.org/sites/gfdrr/files/publication/Learning%20from%20Megadisasters%20%20Lessons%20from%20the%20Great%20East%20Japan%20Earthquake.pdf' # NOQA }) add_resource(u'TS', further_resource) further_resource = FurtherResource(**{ 'text': u'Weather and Climate Resilience: Effective Preparedness through National Meteorological and Hydrological Services', # NOQA 'url': u'https://www.gfdrr.org/sites/gfdrr/files/publication/Weather_and_Climate_Resilience_2013.pdf' # NOQA }) add_resource(u'CY', further_resource) further_resource = FurtherResource(**{ 'text': u'Volcano Observatory database - World Organization of Volcano Observatories (WOVO)', # NOQA 'url': u'http://www.wovo.org/observatories/' }) add_resource(u'VA', further_resource) further_resource = FurtherResource(**{ 'text': u'Database of Volcanoes - Global Volcansim Program', 'url': u'http://www.volcano.si.edu/search_volcano.cfm' }) add_resource(u'VA', further_resource) further_resource = FurtherResource(**{ 'text': u'Tsunami Runup database - NGDC', 'url': u'http://ngdc.noaa.gov/nndc/struts/form?t=101650&s=167&d=166' # NOQA }) add_resource(u'TS', further_resource) further_resource = FurtherResource(**{ 'text': u'Volcanic Ash: what it can do and how to prevent damage', 'url': u'http://volcanoes.usgs.gov/ash/index.html' }) add_resource(u'VA', further_resource) further_resource = FurtherResource(**{ 'text': u'Preparing your community for tsunamis', 'url': u'http://geohaz.org/wp/wp-content/uploads/2015/05/PreparingYourCommunityforTsunamisVersion2-1.pdf' # NOQA }) add_resource(u'TS', further_resource) further_resource = FurtherResource(**{ 'text': u'National Drought Management Policy Guidelines: A Template for Action', # NOQA 'url': u'http://www.droughtmanagement.info/literature/IDMP_NDMPG_en.pdf' # NOQA }) add_resource(u'DG', further_resource) further_resource = FurtherResource(**{ 'text': u'World Bank Publication Series: Turn Down the Heat', 'url': u'http://www.worldbank.org/en/topic/climatechange/publication/turn-down-the-heat' # NOQA }) add_resource(u'CF', further_resource)
def clearall(): logger.info('Cleaning previous data') DBSession.query(Output).delete() DBSession.query(Layer).delete() DBSession.query(HazardSet).delete() DBSession.flush()
def populate_processing(): hazardtypeEQ = HazardType.get(u'EQ') hazardtypeFL = HazardType.get(u'FL') hazardset_id = u'hazardset1' print 'Populating hazardset {}'.format(hazardset_id) hazardset1 = HazardSet() hazardset1.id = hazardset_id hazardset1.hazardtype = hazardtypeEQ hazardset1.local = False hazardset1.calculation_method_quality = 0 hazardset1.data_lastupdated_date = datetime.now() hazardset1.metadata_lastupdated_date = datetime.now() hazardset1.complete = True hazardset1.layers.extend(make_layers()) DBSession.add(hazardset1) hazardset_id = u'hazardset1b' print 'Populating hazardset {}'.format(hazardset_id) hazardset1b = HazardSet() hazardset1b.id = hazardset_id hazardset1b.hazardtype = hazardtypeFL hazardset1b.local = True hazardset1b.calculation_method_quality = 3 hazardset1b.data_lastupdated_date = datetime.now() hazardset1b.metadata_lastupdated_date = datetime.now() hazardset1b.complete = True hazardset1b.layers.extend(make_layers()) DBSession.add(hazardset1b) # create hazardsets 2, 3 ... 5 hazardset_id = u'hazardset2' # calculation_method_quality = 1 / scientific_quality = 0 print 'Populating hazardset {}'.format(hazardset_id) hazardset2 = HazardSet() hazardset2.id = hazardset_id hazardset2.hazardtype = hazardtypeEQ hazardset2.local = True hazardset2.calculation_method_quality = 1 hazardset2.scientific_quality = 0 hazardset2.data_lastupdated_date = datetime.now() hazardset2.metadata_lastupdated_date = datetime.now() hazardset2.complete = True hazardset2.layers.extend(make_layers()) DBSession.add(hazardset2) hazardset_id = u'hazardset3' # date = 2015-01-01 / global / # calculation_method_quality = 1 / scientific_quality = 2 print 'Populating hazardset {}'.format(hazardset_id) hazardset3 = HazardSet() hazardset3.id = hazardset_id hazardset3.hazardtype = hazardtypeEQ hazardset3.local = False hazardset3.calculation_method_quality = 1 hazardset3.scientific_quality = 2 hazardset3.data_lastupdated_date = datetime(2015, 1, 1, 0, 0) hazardset3.metadata_lastupdated_date = datetime.now() hazardset3.complete = True hazardset3.layers.extend(make_layers()) DBSession.add(hazardset3) hazardset_id = u'hazardset4' # date = 2015-01-01 / local / # calculation_method_quality = 1 / scientific_quality = 2 print 'Populating hazardset {}'.format(hazardset_id) hazardset4 = HazardSet() hazardset4.id = hazardset_id hazardset4.hazardtype = hazardtypeEQ hazardset4.local = True hazardset4.calculation_method_quality = 1 hazardset4.scientific_quality = 2 hazardset4.data_lastupdated_date = datetime(2015, 1, 1, 0, 0) hazardset4.metadata_lastupdated_date = datetime.now() hazardset4.complete = True hazardset4.layers.extend(make_layers()) DBSession.add(hazardset4) hazardset_id = u'hazardset5' # date = now() / local / # calculation_method_quality = 1 / scientific_quality = 2 print 'Populating hazardset {}'.format(hazardset_id) hazardset5 = HazardSet() hazardset5.id = hazardset_id hazardset5.hazardtype = hazardtypeEQ hazardset5.local = True hazardset5.calculation_method_quality = 1 hazardset5.scientific_quality = 2 hazardset5.data_lastupdated_date = datetime.now() hazardset5.metadata_lastupdated_date = datetime.now() hazardset5.complete = True hazardset5.layers.extend(make_layers()) DBSession.add(hazardset5) hazardset_id = u'hazardset6' # date = now() / global / # calculation_method_quality = 1 / scientific_quality = 2 print 'Populating hazardset {}'.format(hazardset_id) hazardset6 = HazardSet() hazardset6.id = hazardset_id hazardset6.hazardtype = hazardtypeEQ hazardset6.local = False hazardset6.calculation_method_quality = 1 hazardset6.scientific_quality = 2 hazardset6.data_lastupdated_date = datetime.now() hazardset6.metadata_lastupdated_date = datetime.now() hazardset6.complete = True hazardset6.layers.extend(make_layers()) DBSession.add(hazardset6) # populate output table # admin div (code 30) has only one hazardset for EQ admin30 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 30).one() # => test outcome = hazardset1 output1 = Output() output1.coverage_ratio = 10 output1.hazardset = hazardset1 output1.administrativedivision = admin30 output1.hazardlevel = HazardLevel.get(u'HIG') DBSession.add(output1) # admin div (code 30) also has another hazardset # but this one is for FL # => test outcome = hazardset1b output1b = Output() output1b.coverage_ratio = 11 output1b.hazardset = hazardset1b output1b.administrativedivision = admin30 output1b.hazardlevel = HazardLevel.get(u'NPR') DBSession.add(output1b) # admin div (code 31) has 2 hazardsets, # one with a higher calculation_method_quality # => test outcome = hazardset2 admin31 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 31).one() output2 = Output() output2.coverage_ratio = 20 output2.hazardset = hazardset1 # calculation_method_quality = 0 output2.administrativedivision = admin31 output2.hazardlevel = HazardLevel.get(u'MED') DBSession.add(output2) output3 = Output() output3.coverage_ratio = 30 output3.hazardset = hazardset2 # calculation_method_quality = 1 output3.administrativedivision = admin31 output3.hazardlevel = HazardLevel.get(u'LOW') DBSession.add(output3) # admin div (code 32) has 2 hazardsets, # both share the same calculation_method_quality, # one with a higher scientific_quality # => test outcome = hazardset3 admin32 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 32).one() output4 = Output() output4.coverage_ratio = 40 output4.hazardset = hazardset2 # calculation_method_quality = 1 / scientific_quality = 0 output4.administrativedivision = admin32 output4.hazardlevel = HazardLevel.get(u'MED') DBSession.add(output4) output5 = Output() output5.coverage_ratio = 50 output5.hazardset = hazardset3 # calculation_method_quality = 1 / scientific_quality = 2 output5.administrativedivision = admin32 output5.hazardlevel = HazardLevel.get(u'LOW') DBSession.add(output5) # admin div (code 33) has 2 hazardsets, # both share the same ratings, one is global, one local # => test outcome = hazardset4 admin33 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 33).one() output6 = Output() output6.coverage_ratio = 60 output6.hazardset = hazardset3 # global / calculation_method_quality = 1 / scientific_quality = 2 output6.administrativedivision = admin33 output6.hazardlevel = HazardLevel.get(u'MED') DBSession.add(output6) output7 = Output() output7.coverage_ratio = 70 output7.hazardset = hazardset4 # local / calculation_method_quality = 1 / scientific_quality = 2 output7.administrativedivision = admin33 output7.hazardlevel = HazardLevel.get(u'LOW') DBSession.add(output7) # admin div (code 34) has 2 hazardsets, # both share the same ratings, are local, one is more recent # => test outcome = hazardset5 admin34 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 34).one() output8 = Output() output8.coverage_ratio = 80 output8.hazardset = hazardset4 # date = 2015-01-01 / local / # calculation_method_quality = 1 / scientific_quality = 2 output8.administrativedivision = admin34 output8.hazardlevel = HazardLevel.get(u'MED') DBSession.add(output8) output9 = Output() output9.coverage_ratio = 90 output9.hazardset = hazardset5 # date = now() / local / # calculation_method_quality = 1 / scientific_quality = 2 output9.administrativedivision = admin34 output9.hazardlevel = HazardLevel.get(u'LOW') DBSession.add(output9) # admin div (code 35) has 2 hazardsets, # both share the same ratings, are global, one is more recent # => test outcome = hazardset6 admin35 = DBSession.query(AdministrativeDivision)\ .filter(AdministrativeDivision.code == 35).one() output10 = Output() output10.coverage_ratio = 95 output10.hazardset = hazardset3 # date = 2015-01-01 / global / # calculation_method_quality = 1 / scientific_quality = 2 output10.administrativedivision = admin35 output10.hazardlevel = HazardLevel.get(u'MED') DBSession.add(output10) output11 = Output() output11.coverage_ratio = 99 output11.hazardset = hazardset6 # date = now() / global / # calculation_method_quality = 1 / scientific_quality = 2 output11.administrativedivision = admin35 output11.hazardlevel = HazardLevel.get(u'LOW') DBSession.add(output11) DBSession.flush()
def process_hazardset(hazardset_id, force=False): hazardset = DBSession.query(HazardSet).get(hazardset_id) if hazardset is None: raise ProcessException('Hazardset {} does not exist.' .format(hazardset_id)) chrono = datetime.datetime.now() if hazardset.processed: if force: hazardset.processed = False else: raise ProcessException('Hazardset {} has already been processed.' .format(hazardset.id)) logger.info(" Cleaning previous outputs") DBSession.query(Output) \ .filter(Output.hazardset_id == hazardset.id) \ .delete() DBSession.flush() type_settings = settings['hazard_types'][hazardset.hazardtype.mnemonic] with rasterio.drivers(): try: logger.info(" Opening raster files") # Open rasters layers = {} readers = {} if 'values' in type_settings.keys(): # preprocessed layer layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset.id) \ .one() reader = rasterio.open(layer_path(layer)) layers[0] = layer readers[0] = reader else: for level in (u'HIG', u'MED', u'LOW'): hazardlevel = HazardLevel.get(level) layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset.id) \ .filter(Layer.hazardlevel_id == hazardlevel.id) \ .one() reader = rasterio.open(layer_path(layer)) layers[level] = layer readers[level] = reader if ('mask_return_period' in type_settings): layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset.id) \ .filter(Layer.mask.is_(True)) \ .one() reader = rasterio.open(layer_path(layer)) layers['mask'] = layer readers['mask'] = reader outputs = create_outputs(hazardset, layers, readers) if outputs: DBSession.add_all(outputs) finally: logger.info(" Closing raster files") for key, reader in readers.iteritems(): if reader and not reader.closed: reader.close() hazardset.processed = True DBSession.flush() logger.info(' Successfully processed {}, {} outputs generated in {}' .format(hazardset.id, len(outputs), datetime.datetime.now() - chrono)) return True
def create_outputs(hazardset, layers, readers): type_settings = settings['hazard_types'][hazardset.hazardtype.mnemonic] adminlevel_REG = AdminLevelType.get(u'REG') bbox = None for reader in readers.itervalues(): polygon = polygon_from_boundingbox(reader.bounds) if bbox is None: bbox = polygon else: bbox = bbox.intersection(polygon) admindivs = DBSession.query(AdministrativeDivision) \ .filter(AdministrativeDivision.leveltype_id == adminlevel_REG.id) \ .filter(func.ST_Intersects(AdministrativeDivision.geom, func.ST_GeomFromText(bbox.wkt, 4326))) \ .order_by(AdministrativeDivision.id) # Needed for windowed querying current = 0 last_percent = 0 outputs = [] total = admindivs.count() logger.info(' Iterating over {} administrative divisions'.format(total)) # Windowed querying to limit memory usage limit = 1000 # 1000 records <=> 10 Mo admindivs = admindivs.limit(limit) for offset in xrange(0, total, limit): admindivs = admindivs.offset(offset) for admindiv in admindivs: current += 1 if admindiv.geom is None: logger.warning(' {}-{} has null geometry' .format(admindiv.code, admindiv.name)) continue shape = to_shape(admindiv.geom) # Try block to include admindiv.code in exception message try: if 'values' in type_settings.keys(): # preprocessed layer hazardlevel = preprocessed_hazardlevel( type_settings, layers[0], readers[0], shape) else: hazardlevel = notpreprocessed_hazardlevel( hazardset.hazardtype.mnemonic, type_settings, layers, readers, shape) except Exception as e: e.message = ("{}-{} raises an exception :\n{}" .format(admindiv.code, admindiv.name, e.message)) raise # Create output record if hazardlevel is not None: output = Output() output.hazardset = hazardset output.admin_id = admindiv.id output.hazardlevel = hazardlevel # TODO: calculate coverage ratio output.coverage_ratio = 100 outputs.append(output) # Remove admindiv from memory DBSession.expunge(admindiv) percent = int(100.0 * current / total) if percent % 10 == 0 and percent != last_percent: logger.info(' ... processed {}%'.format(percent)) last_percent = percent return outputs
def report(request): try: division_code = request.matchdict.get('divisioncode') except: raise HTTPBadRequest(detail='incorrect value for parameter ' '"divisioncode"') hazard = request.matchdict.get('hazardtype', None) # Get all the hazard types. hazardtype_query = DBSession.query(HazardType).order_by(HazardType.order) # Get the hazard categories corresponding to the administrative # division whose code is division_code. hazardcategories_query = DBSession.query(HazardCategory) \ .join(HazardCategory.administrativedivisions) \ .join(HazardType) \ .join(HazardLevel) \ .filter(AdministrativeDivision.code == division_code) # Create a dict with the categories. Keys are the hazard type mnemonic. hazardcategories = {d.hazardtype.mnemonic: d for d in hazardcategories_query} hazard_types = [] for hazardtype in hazardtype_query: cat = _hazardlevel_nodata if hazardtype.mnemonic in hazardcategories: cat = hazardcategories[hazardtype.mnemonic].hazardlevel hazard_types.append({ 'hazardtype': hazardtype, 'hazardlevel': cat }) association = None technical_recommendations = None further_resources = None climate_change_recommendation = None # Get the administrative division whose code is division_code. _alias = aliased(AdministrativeDivision) division = DBSession.query(AdministrativeDivision) \ .outerjoin(_alias, _alias.code == AdministrativeDivision.parent_code) \ .filter(AdministrativeDivision.code == division_code).one() if hazard is not None: try: association = DBSession.query( HazardCategoryAdministrativeDivisionAssociation) \ .join(AdministrativeDivision) \ .join(HazardCategory) \ .join(HazardLevel) \ .join(HazardType) \ .filter(HazardType.mnemonic == hazard) \ .filter(AdministrativeDivision.code == division_code) \ .one() except NoResultFound: url = request.route_url('report_overview', divisioncode=division_code) return HTTPFound(location=url) try: # get the code for level 0 division code = division.code if division.leveltype_id == 2: code = division.parent.code if division.leveltype_id == 3: code = division.parent.parent.code climate_change_recommendation = DBSession.query( ClimateChangeRecommendation) \ .join(AdministrativeDivision) \ .join(HazardType) \ .filter(AdministrativeDivision.code == code) \ .filter(HazardType.mnemonic == hazard) \ .one() except NoResultFound: pass technical_recommendations = DBSession.query(TechnicalRecommendation) \ .join(TechnicalRecommendation.hazardcategory_associations) \ .join(HazardCategory) \ .filter(HazardCategory.id == association.hazardcategory.id) \ .all() further_resources = DBSession.query(FurtherResource) \ .join(FurtherResource.hazardcategory_associations) \ .join(HazardCategory) \ .outerjoin(AdministrativeDivision) \ .filter(HazardCategory.id == association.hazardcategory.id) \ .filter(or_(AdministrativeDivision.code == division_code, AdministrativeDivision.code == null())) \ .all() # Get the geometry for division and compute its extent cte = select([AdministrativeDivision.geom]) \ .where(AdministrativeDivision.code == division_code) \ .cte('bounds') bounds = list(DBSession.query( func.ST_XMIN(cte.c.geom), func.ST_YMIN(cte.c.geom), func.ST_XMAX(cte.c.geom), func.ST_YMAX(cte.c.geom)) .one()) division_bounds = bounds # compute a 0-360 version of the extent cte = select([ func.ST_Shift_Longitude(AdministrativeDivision.geom).label('shift')]) \ .where(AdministrativeDivision.code == division_code) \ .cte('bounds') bounds_shifted = list(DBSession.query( func.ST_XMIN(cte.c.shift), func.ST_YMIN(cte.c.shift), func.ST_XMAX(cte.c.shift), func.ST_YMAX(cte.c.shift)) .one()) # Use the 0-360 if it's smaller if bounds_shifted[2] - bounds_shifted[0] < bounds[2] - bounds[0]: division_bounds = bounds_shifted parents = [] if division.leveltype_id >= 2: parents.append(division.parent) if division.leveltype_id == 3: parents.append(division.parent.parent) return {'hazards': hazard_types, 'hazards_sorted': sorted(hazard_types, key=lambda a: a['hazardlevel'].order), 'hazard_category': association.hazardcategory if association else '', 'source': association.source if association else '', 'climate_change_recommendation': climate_change_recommendation, 'recommendations': technical_recommendations, 'resources': further_resources, 'division': division, 'bounds': division_bounds, 'parents': parents, 'parent_division': division.parent}
def clearall(): logger.info('Reset all layer to not downloaded state.') DBSession.query(Layer).update({ Layer.downloaded: False }) DBSession.flush()
def index(request): hazard_types = DBSession.query(HazardType).order_by(HazardType.order) return {"hazards": hazard_types}
def harvest_layer(object, dry_run=False): title = object['title'] ''' typename = urllib.unquote( object['distribution_url'].split('/').pop()) # print title ''' hazardset_id = object['hazard_set'] if not hazardset_id: logger.info('{} - hazard_set is empty'.format(title)) return False hazard_type = object['hazard_type'] if not hazard_type: logger.warning('{} - hazard_type is empty'.format(title)) return False hazardtype = hazardtype_from_geonode(hazard_type) if hazardtype is None: logger.warning('{} - hazard_type not supported: {}' .format(title, hazard_type)) return False type_settings = settings['hazard_types'][hazardtype.mnemonic] preprocessed = 'values' in type_settings ''' csw_wkt_geometry = object['csw_wkt_geometry'] bounds = wkt_loads(csw_wkt_geometry).bounds # print ' bounds :', bounds # TODO: minimum global bbox should be defined in settings minimum_global_bounds = (-175, -45, 175, 45) from shapely.geometry import box local = not box(bounds).contains(box(minimum_global_bounds)) ''' local = 'GLOBAL' not in hazardset_id ''' local = ( bounds[0] > -175 or bounds[1] > -45 or bounds[2] < 175 or bounds[3] < 45) ''' mask = False if preprocessed is True: hazardlevel = None hazard_unit = None if object['hazard_period']: logger.info('{} - Has a return period'.format(title)) return False hazard_period = None else: hazard_period = int(object['hazard_period']) hazardlevel = None for level in (u'LOW', u'MED', u'HIG'): return_periods = type_settings['return_periods'][level] if isinstance(return_periods, list): if (hazard_period >= return_periods[0] and hazard_period <= return_periods[1]): hazardlevel = HazardLevel.get(level) break else: if hazard_period == return_periods: hazardlevel = HazardLevel.get(level) if ('mask_return_period' in type_settings and hazard_period == type_settings['mask_return_period']): mask = True if hazardlevel is None and not mask: logger.info('{} - No corresponding hazard_level'.format(title)) return False hazard_unit = object['hazard_unit'] if hazard_unit == '': logger.info('{} - hazard_unit is empty'.format(title)) return False if object['srid'] != 'EPSG:4326': logger.info('{} - srid is different from "EPSG:4326"' .format(title)) return False data_update_date = object['data_update_date'] if not data_update_date: logger.warning('{} - data_update_date is empty'.format(title)) # TODO: Restore bypassed constraint to get Volcanic data # return False data_update_date = datetime.now() metadata_update_date = object['metadata_update_date'] if not metadata_update_date: logger.warning('{} - metadata_update_date is empty'.format(title)) # return False metadata_update_date = datetime.now() calculation_method_quality = object['calculation_method_quality'] if not calculation_method_quality: logger.warning('{} - calculation_method_quality is empty' .format(title)) return False calculation_method_quality = int(float(calculation_method_quality)) scientific_quality = object['scientific_quality'] if not scientific_quality: logger.warning('{} - scientific_quality is empty'.format(title)) return False scientific_quality = int(float(scientific_quality)) download_url = object['download_url'] if not download_url: logger.warning('{} - download_url is empty'.format(title)) return False hazardset = DBSession.query(HazardSet).get(hazardset_id) if hazardset is None: logger.info('{} - Create new hazardset {}' .format(title, hazardset_id)) hazardset = HazardSet() hazardset.id = hazardset_id hazardset.hazardtype = hazardtype hazardset.data_lastupdated_date = data_update_date hazardset.metadata_lastupdated_date = metadata_update_date DBSession.add(hazardset) else: # print ' Hazardset {} found'.format(hazardset_id) pass layer = DBSession.query(Layer) \ .filter(Layer.hazardset_id == hazardset_id) if hazardlevel is not None: layer = layer.filter(Layer.hazardlevel_id == hazardlevel.id) if mask: layer = layer.filter(Layer.mask.is_(True)) layer = layer.first() if layer is None: layer = Layer() logger.info('{} - Create new Layer {}'.format(title, title)) DBSession.add(layer) else: if object['id'] == layer.geonode_id: # TODO: metadata change return False if hazard_period > layer.return_period: logger.info('{} - Use preferred return period {}' .format(title, layer.return_period)) return False logger.info('{} - Replace layer for level {}' .format(title, hazardlevel.mnemonic)) layer.hazardset = hazardset layer.hazardlevel = hazardlevel layer.mask = mask layer.return_period = hazard_period layer.hazardunit = hazard_unit layer.data_lastupdated_date = data_update_date layer.metadata_lastupdated_date = metadata_update_date layer.geonode_id = object['id'] layer.download_url = download_url # TODO: retrieve quality attributes layer.calculation_method_quality = calculation_method_quality layer.scientific_quality = scientific_quality layer.local = local DBSession.flush() return True
def import_admindivs(): ''' This script makes it so the database is populated with administrative divisions. ''' engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) connection = DBSession.bind.connect() engine_url = DBSession.bind.url for i in [0, 1, 2]: print "Importing GAUL data for level {}".format(i) print "This may take a while" sql_file = "g2015_2014_{}.sql".format(i) call(["sudo", "-u", "postgres", "psql", "-d", str(engine_url), "-f", sql_file]) trans = connection.begin() print "Removing duplicates" connection.execute(''' DELETE FROM g2015_2014_0 WHERE gid = 177; DELETE FROM g2015_2014_2 WHERE gid = 5340; DELETE FROM g2015_2014_2 WHERE gid = 5382; DELETE FROM g2015_2014_2 WHERE gid = 5719; DELETE FROM g2015_2014_2 WHERE gid = 20775; DELETE FROM g2015_2014_2 WHERE gid = 1059; ''') trans.commit() print "Creating administrative divs" trans = connection.begin() connection.execute(''' INSERT INTO datamart.administrativedivision (code, leveltype_id, name, parent_code, geom) SELECT adm0_code, 1, adm0_name, NULL, geom FROM g2015_2014_0; SELECT DropGeometryColumn('public', 'g2015_2014_0', 'geom'); DROP TABLE g2015_2014_0;''') trans.commit() trans = connection.begin() connection.execute(''' INSERT INTO datamart.administrativedivision (code, leveltype_id, name, parent_code, geom) SELECT adm1_code, 2, adm1_name, adm0_code, geom FROM g2015_2014_1; SELECT DropGeometryColumn('public', 'g2015_2014_1', 'geom'); DROP TABLE g2015_2014_1;''') trans.commit() trans = connection.begin() connection.execute(''' INSERT INTO datamart.administrativedivision (code, leveltype_id, name, parent_code, geom) SELECT adm2_code, 3, adm2_name, adm1_code, geom FROM g2015_2014_2; SELECT DropGeometryColumn('public', 'g2015_2014_2', 'geom'); DROP TABLE g2015_2014_2; ''') trans.commit() trans = connection.begin() connection.execute(''' DELETE from datamart.administrativedivision WHERE code in (4375, 426, 10); ''') trans.commit() print "{} administrative divisions created".format( DBSession.query(AdministrativeDivision).count() )
def import_recommendations(): engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) with transaction.manager: DBSession.query(HazardCategoryTechnicalRecommendationAssociation) \ .delete() DBSession.query(TechnicalRecommendation).delete() # First load general recommendations with open('data/general_recommendations.csv', 'rb') as csvfile: recommendations = csv.reader(csvfile, delimiter=',') for row in recommendations: hazardcategory = DBSession.query(HazardCategory) \ .join(HazardLevel) \ .join(HazardType) \ .filter(HazardLevel.mnemonic == row[1]) \ .filter(HazardType.mnemonic == row[0]) \ .one() hazardcategory.general_recommendation = row[2] DBSession.add(hazardcategory) categories = [] for type in [u'EQ', u'FL', u'CY', u'TS', u'CF', u'VA', u'DG']: for level in [u'HIG', u'MED', u'LOW', u'VLO']: hazardcategory = DBSession.query(HazardCategory) \ .join(HazardLevel) \ .join(HazardType) \ .filter(HazardLevel.mnemonic == level) \ .filter(HazardType.mnemonic == type) \ .one() categories.append(hazardcategory) # Then technical recommendations hctra = HazardCategoryTechnicalRecommendationAssociation with open('data/technical_recommendations.csv', 'rb') as csvfile: recommendations = csv.reader(csvfile, delimiter=',') next(recommendations, None) # skip the headers for row in recommendations: technical_rec = TechnicalRecommendation(**{ 'text': row[0] }) associations = technical_rec.hazardcategory_associations # the other columns are hazard category (type / level) for col_index in range(1, 28): value = row[col_index] if value is not '' and value is not 'Y': association = hctra(order=value) association.hazardcategory = categories[col_index - 1] associations.append(association) DBSession.add(technical_rec) # Climate change recommendations DBSession.query(ClimateChangeRecommendation).delete() # hazard types and corresponding columns hazard_types = [ (u'FL', 6), (u'EQ', 7), (u'CY', 8), (u'CF', 9), (u'DG', 10), (u'TS', 11), (u'VA', 12), (u'LS', 13), ] with open('data/climate_change_recommendations.csv', 'rb') as csvfile: countries = csv.reader(csvfile, delimiter=',') next(countries, None) # skip the headers for row in countries: division = DBSession.query(AdministrativeDivision) \ .filter(AdministrativeDivision.code == row[1]) \ .one_or_none() if not division: continue for hazard_type, column in hazard_types: text = row[column] if text == 'NA': continue climate_rec = ClimateChangeRecommendation() climate_rec.text = row[column] climate_rec.administrativedivision = division climate_rec.hazardtype = DBSession.query(HazardType) \ .filter(HazardType.mnemonic == hazard_type).one() DBSession.add(climate_rec)
def populate_db(): config = ConfigParser.ConfigParser() config.read(local_settings_path) db_url = config.get('app:main', 'sqlalchemy.url') from sqlalchemy import create_engine engine = create_engine(db_url) from ..scripts.initializedb import populate_db as populate populate(engine, drop_all=True) shape = MultiPolygon([ Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]) ]) geometry = from_shape(shape, 4326) div_level_1 = AdministrativeDivision(**{ 'code': 10, 'leveltype_id': 1, 'name': u'Division level 1' }) div_level_1.geom = geometry DBSession.add(div_level_1) div_level_2 = AdministrativeDivision(**{ 'code': 20, 'leveltype_id': 2, 'name': u'Division level 2' }) div_level_2.parent_code = div_level_1.code div_level_2.geom = geometry DBSession.add(div_level_2) shape = MultiPolygon([ Polygon([(0, 0), (0, 1), (.5, 1), (.5, 0), (0, 0)]) ]) geometry = from_shape(shape, 4326) div_level_3_1 = AdministrativeDivision(**{ 'code': 30, 'leveltype_id': 3, 'name': u'Division level 3 - 1' }) div_level_3_1.parent_code = div_level_2.code div_level_3_1.geom = geometry div_level_3_1.hazardcategories = [] shape = MultiPolygon([ Polygon([(.5, 0), (.5, 1), (1, 1), (1, 0), (.5, 0)]) ]) geometry = from_shape(shape, 4326) div_level_3_2 = AdministrativeDivision(**{ 'code': 31, 'leveltype_id': 3, 'name': u'Division level 3 - 2' }) div_level_3_2.parent_code = div_level_2.code div_level_3_2.geom = geometry div_level_3_2.hazardcategories = [] category_eq_hig = HazardCategory(**{ 'general_recommendation': u'General recommendation for EQ HIG', }) category_eq_hig.hazardtype = DBSession.query(HazardType) \ .filter(HazardType.mnemonic == u'EQ').one() category_eq_hig.hazardlevel = DBSession.query(HazardLevel) \ .filter(HazardLevel.mnemonic == u'HIG').one() associate_admindiv_hazardcategory(div_level_3_1, category_eq_hig, u'test') associate_admindiv_hazardcategory(div_level_3_2, category_eq_hig, u'test') climate_rec = ClimateChangeRecommendation() climate_rec.text = u'Climate change recommendation' climate_rec.administrativedivision = div_level_1 climate_rec.hazardtype = DBSession.query(HazardType) \ .filter(HazardType.mnemonic == u'EQ').one() DBSession.add(climate_rec) technical_rec = TechnicalRecommendation(**{ 'text': u'Recommendation #1 for earthquake, applied to' ' hazard categories HIG, MED and LOW' }) association = HazardCategoryTechnicalRecommendationAssociation(order=1) association.hazardcategory = category_eq_hig technical_rec.hazardcategory_associations.append(association) DBSession.add(technical_rec) technical_rec = TechnicalRecommendation(**{ 'text': u'Educational web resources on earthquakes and' ' seismic hazard' }) association = HazardCategoryTechnicalRecommendationAssociation(order=1) association.hazardcategory = category_eq_hig technical_rec.hazardcategory_associations.append(association) DBSession.add(technical_rec) category_fl_med = HazardCategory(**{ 'general_recommendation': u'General recommendation for FL MED', }) category_fl_med.hazardtype = DBSession.query(HazardType) \ .filter(HazardType.mnemonic == u'FL').one() category_fl_med.hazardlevel = DBSession.query(HazardLevel) \ .filter(HazardLevel.mnemonic == u'MED').one() associate_admindiv_hazardcategory(div_level_3_1, category_fl_med, u'test') DBSession.add(div_level_3_1) associate_admindiv_hazardcategory(div_level_3_2, category_fl_med, u'test') DBSession.add(div_level_3_2) further_resource = FurtherResource(**{ 'text': u'Educational web resources on earthquakes and' + ' seismic hazard', 'url': u'http://earthquake.usgs.gov/learn/?source=sitemap' }) association = HazardCategoryFurtherResourceAssociation(order=1) association.hazardcategory = category_eq_hig further_resource.hazardcategory_associations.append(association) DBSession.add(further_resource) # Add further resource for one division only further_resource = FurtherResource(**{ 'text': u'Further resource specific to division level 3 - 2', 'url': u'http://domain.com/the/document/url.txt' }) association = HazardCategoryFurtherResourceAssociation(order=2) association.hazardcategory = category_eq_hig association.administrativedivision = div_level_3_2 further_resource.hazardcategory_associations.append(association) DBSession.add(further_resource)
def new_geonode_id(): row = DBSession.query(func.max(Layer.geonode_id)).one_or_none() if row[0] is None: return 1 return row[0] + 1