def get_layer_data(layer_name): layer = Layer.objects.get(name=layer_name) layer_path = os.path.join(settings.MEDIA_ROOT, 'layers', layer.slug, 'raw') os.chdir(layer_path) filename = glob.glob('*.shp')[0] layer_file = os.path.join(layer_path, filename) return read_layer(layer_file)
def calculate(hazard_filename, exposure_filename): """ Use SAFE to calculate the impact Inputs: hazard_filename: Absolute path to hazard file exposure_filename: Absolute path to exposure file """ H = read_layer(hazard_filename) E = read_layer(exposure_filename) IF = ModisFloodImpactFunction impact_layer = calculate_impact(layers=[H, E],impact_fcn=IF) impact_filename = impact_layer.get_filename() calculated_raster = read_layer(impact_filename) return calculated_raster
def get_layer_data(layer_name): """ :param layer_name: :return: """ layer_file = shapefile_path(layer_name) return read_layer(layer_file)
def post(self): layer_type = self.get_argument("layer_type") #change this to ID filename = self.get_argument("filename") encoding = sys.getfilesystemencoding() if "hazard" in layer_type: layer = read_layer(filename.encode(encoding)) json_data = layer.keywords json_data.update({ "name": layer.name }) self.set_header("Content-Type", "application/json") self.write(json.dumps(json_data)) elif "exposure" in layer_type: layer = read_layer(filename.encode(encoding)) json_data = layer.keywords json_data.update({ "name": layer.name }) self.set_header("Content-Type", "application/json") self.write(json.dumps(json_data))
def post(self): layer_type = self.get_argument("layer_type") #change this to ID filename = self.get_argument("filename") encoding = sys.getfilesystemencoding() if "hazard" in layer_type: layer = read_layer(filename.encode(encoding)) json_data = layer.keywords json_data.update({"name": layer.name}) self.set_header("Content-Type", "application/json") self.write(json.dumps(json_data)) elif "exposure" in layer_type: layer = read_layer(filename.encode(encoding)) json_data = layer.keywords json_data.update({"name": layer.name}) self.set_header("Content-Type", "application/json") self.write(json.dumps(json_data))
def get_bounding_box(filename): """Get bounding box for specified raster or vector file Input: filename Output: bounding box as python list of numbers [West, South, East, North] """ layer = read_layer(filename) return layer.get_bounding_box()
def flood_severity(hazard_files): """ Accumulate the hazard level """ # Value above which people are regarded affected # For this dataset, 0 is no data, 1 is cloud, 2 is normal water level # and 3 is overflow. threshold = 2.9 # This is a scalar but will end up being a matrix I_sum = None projection = None geotransform = None total_days = len(hazard_files) ignored = 0 print 'Accumulating layers' for hazard_filename in hazard_files: print " - Processing %s" % hazard_filename layer = read_layer(hazard_filename) # Extract data as numeric arrays D = layer.get_data(nan=0.0) # Depth # Assign ones where it is affected I = numpy.where(D > threshold, 1, 0) # If this is the first file, use it to initialize the aggregated one and stop processing if I_sum is None: I_sum = I projection=layer.get_projection() geotransform=layer.get_geotransform() continue # If it is not the first one, add it up if it has the right shape, otherwise, ignore it if I_sum.shape == I.shape: I_sum = I_sum + I else: # Add them to a list of ignored files ignored = ignored + 1 print 'Ignoring file %s because it is incomplete' % hazard_filename # Create raster object and return R = Raster(I_sum, projection=projection, geotransform=geotransform, name='People affected', keywords={'category':'hazard', 'subcategory': 'flood', 'units': 'days', 'total_days': total_days, 'ignored': ignored, }) return R
def check_layer(layer, full=False): """Verify if an object is a valid Layer. If check fails an exception is raised. Input layer: Layer object full: Optional flag controlling whether layer is to be downloaded as part of the check. """ from geonode.maps.models import Layer msg = ('Was expecting layer object, got None') assert layer is not None, msg msg = ('Was expecting layer object, got %s' % (type(layer))) assert type(layer) is Layer, msg msg = ('The layer does not have a valid name: %s' % layer.name) assert len(layer.name) > 0, msg msg = ('The layer does not have a valid workspace: %s' % layer.workspace) assert len(layer.workspace) > 0, msg # Get layer metadata layer_name = '%s:%s' % (layer.workspace, layer.name) metadata = get_metadata(INTERNAL_SERVER_URL, layer_name) #try: # metadata = get_metadata(INTERNAL_SERVER_URL, layer_name) #except: # # Convert any exception to AssertionError for use in retry loop in # # save_file_to_geonode. # raise AssertionError assert 'id' in metadata assert 'title' in metadata assert 'layertype' in metadata assert 'keywords' in metadata assert 'bounding_box' in metadata # Get bounding box and download bbox = metadata['bounding_box'] assert len(bbox) == 4 if full: # Check that layer can be downloaded again downloaded_layer = download(INTERNAL_SERVER_URL, layer_name, bbox) assert os.path.exists(downloaded_layer.filename) # Check integrity between Django layer and file assert_bounding_box_matches(layer, downloaded_layer.filename) # Read layer and verify L = read_layer(downloaded_layer.filename)
def resample(files, population): """ Resample the input files to the resolution of the population dataset. """ p = read_layer(population) res_x, res_y = p.get_resolution() out = [] for input_file in files: basename, ext = os.path.splitext(input_file) sampled_output = basename + "_resampled" + ext if not os.path.exists(sampled_output): subprocess.call(['gdalwarp', '-tr', str(res_x), str(res_y), input_file, sampled_output], stdout=open(os.devnull, 'w')) out.append(sampled_output) return out
resolution = layer_metadata['resolution'] #resolution = (resolution, resolution) #FIXME (Ole): Make nicer # Download raster using specified bounding box and resolution template = WCS_TEMPLATE suffix = '.tif' download_url = template % (server_url, layer_name, bbox_string, resolution[0], resolution[1]) filename = get_file(download_url, suffix) # Write keywords file keywords = layer_metadata['keywords'] write_keywords(keywords, os.path.splitext(filename)[0] + '.keywords') # Instantiate layer from file lyr = read_layer(filename) # FIXME (Ariel) Don't monkeypatch the layer object lyr.metadata = layer_metadata return lyr def dummy_save(filename, title, user, metadata=''): """Take a file-like object and uploads it to a GeoNode """ return 'http://dummy/data/geonode:' + filename + '_by_' + user.username #-------------------------------------------------------------------- # Functionality to upload layers to GeoNode and check their integrity #--------------------------------------------------------------------
def post(self): result = None purpose = self.get_argument("purpose") if "pdf" in purpose: html = self.get_argument("html") output = os.path.join(DATA_PATH, 'pdf', 'report.pdf') data = open(os.path.join(ROOT, 'static', 'css', 'pdf.css')) css = data.read() data.close() HTML(string=html).write_pdf(output, stylesheets=[CSS(string=css)]) return elif "calculate" in purpose: encoding = sys.getfilesystemencoding() exposure = self.get_argument("exposure") exposure_category = self.get_argument("exposure_category") exposure_subcategory = self.get_argument("exposure_subcategory") hazard = self.get_argument("hazard") hazard_category = self.get_argument("hazard_category") hazard_subcategory = self.get_argument("hazard_subcategory") #params = {} try: hazard_layer = read_layer(hazard.encode(encoding)) exposure_layer = read_layer(exposure.encode(encoding)) # assign the required keywords for inasafe calculations exposure_layer.keywords['category'] = exposure_category exposure_layer.keywords['subcategory'] = exposure_subcategory hazard_layer.keywords['category'] = hazard_category hazard_layer.keywords['subcategory'] = hazard_subcategory #define a method that determines the correct impact function based on keywords given impact_function = FloodBuildingImpactFunction #requirements = requirements_collect(impact_function) #print requirements #requirement_check(params=params, require_str=requirements, verbose=True) output = os.path.join(DATA_PATH, 'impact', 'impact.json') output_style = os.path.join(DATA_PATH, 'impact', 'impact_style.json') output_summary = os.path.join(DATA_PATH, 'impact', 'impact_summary.html') if os.path.exists(output) and os.path.exists(output_style) \ and os.path.exists(output_summary): with open(output_summary) as summary: result = summary.read() summary.close() else: impact = calculate_impact( layers=[exposure_layer, hazard_layer], impact_fcn=impact_function) #create the style for the impact layer with open(output_style, 'w') as style_json: json.dump(impact.style_info, style_json) style_json.close() #call(['ogr2ogr', '-f', 'GeoJSON', output, impact.filename]) #create the impact summary file result = impact.keywords["impact_summary"] with open(output_summary, 'w') as summary: summary.write(result) summary.close() except: print 'IO Error or something else has occurred!' raise else: self.render("result.html", result=result)
def post(self): result = None purpose = self.get_argument("purpose") if "pdf" in purpose: html = self.get_argument("html") output = os.path.join(DATA_PATH, 'pdf', 'report.pdf') data = open(os.path.join(ROOT, 'static', 'css', 'pdf.css')) css = data.read() data.close() HTML(string=html).write_pdf(output, stylesheets=[CSS(string=css)]) return elif "calculate" in purpose: encoding = sys.getfilesystemencoding() exposure = self.get_argument("exposure") exposure_category = self.get_argument("exposure_category") exposure_subcategory = self.get_argument("exposure_subcategory") hazard = self.get_argument("hazard") hazard_category = self.get_argument("hazard_category") hazard_subcategory = self.get_argument("hazard_subcategory") #params = {} try: hazard_layer = read_layer(hazard.encode(encoding)) exposure_layer = read_layer(exposure.encode(encoding)) # assign the required keywords for inasafe calculations exposure_layer.keywords['category'] = exposure_category exposure_layer.keywords['subcategory'] = exposure_subcategory hazard_layer.keywords['category'] = hazard_category hazard_layer.keywords['subcategory'] = hazard_subcategory #define a method that determines the correct impact function based on keywords given impact_function = FloodBuildingImpactFunction #requirements = requirements_collect(impact_function) #print requirements #requirement_check(params=params, require_str=requirements, verbose=True) output = os.path.join(DATA_PATH, 'impact', 'impact.json') output_style = os.path.join(DATA_PATH, 'impact', 'impact_style.json') output_summary = os.path.join(DATA_PATH, 'impact', 'impact_summary.html') if os.path.exists(output) and os.path.exists(output_style) \ and os.path.exists(output_summary): with open(output_summary) as summary: result = summary.read() summary.close() else: impact = calculate_impact( layers=[exposure_layer, hazard_layer], impact_fcn=impact_function ) #create the style for the impact layer with open(output_style, 'w') as style_json: json.dump(impact.style_info, style_json) style_json.close() #call(['ogr2ogr', '-f', 'GeoJSON', output, impact.filename]) #create the impact summary file result = impact.keywords["impact_summary"] with open(output_summary, 'w') as summary: summary.write(result) summary.close() except: print 'IO Error or something else has occurred!' raise else: self.render("result.html", result=result)
def get(self): # try: exposure_id = self.get_argument('e') hazard_id = self.get_argument('h') impact_name = 'impact-e%s-h%s' % (exposure_id, hazard_id) if exposure_id and hazard_id: # First check if the impact already exists in the cache try: # try to connect to the redis cache redis_server = redis.Redis() cache = True print 'Successfully connected to redis!' except: # This is just a flag that will be used later on print "I couldn't connect to redis" cache = False else: # If the impact exists, get it from the cache and return if redis_server.exists(impact_name): print 'Entry exists in cache!' writeout = redis_server.get(impact_name) self.set_header('Content-Type', 'application/javascript') self.write(writeout) return # Query the db and calculate if it doesn't try: #try connecting to the pg database conn = psycopg2.connect( "dbname='dev' user='******' password='******'" ) print 'Successfully connected to postgres!' except: writeout = 'Could not connect to the database!' else: # create a cursor cursor = conn.cursor( cursor_factory = psycopg2.extras.DictCursor ) try: #1. Query the db for the layers query = 'SELECT shapefile FROM layers'+\ ' WHERE id = %s' % exposure_id cursor.execute(query) exposure = cursor.fetchone() query = 'SELECT shapefile FROM layers'+\ ' WHERE id = %s' % hazard_id cursor.execute(query) hazard = cursor.fetchone() except: writeout = 'There was something wrong with your query' conn.rollback() else: if exposure and hazard: # Pass the shapefile (paths) to read_layer try: exposure_layer = read_layer(exposure['shapefile']) hazard_layer = read_layer(hazard['shapefile']) except: writeout = 'Something went wrong when reading the layers' # Keywords exposure_dict = exposure_layer.get_keywords() hazard_dict = hazard_layer.get_keywords() if exposure_layer.is_vector: exposure_dict['layertype'] = 'vector' else: exposure_dict['layertype'] = 'raster' if hazard_layer.is_vector: hazard_dict['layertype'] = 'vector' else: exposure_dict['layertype'] = 'raster' #get optimal bounding box common_bbox = bbox_intersection( exposure_layer.get_bounding_box(), hazard_layer.get_bounding_box() ) print exposure_layer.get_bounding_box() print hazard_layer.get_bounding_box() bbox_string = '' try: for val in common_bbox: bbox_string += str(val) + ' ' except: writeout = 'The layers have no intersection!' else: #gdal clip dest = 'hazard_tmp.shp' src = hazard_layer.filename print src try: call( "ogr2ogr -clipsrc %s %s %s" % \ (bbox_string, dest, src), shell=True ) except: print 'could not clip hazard' else: print 'created clipped hazard. Reading layer now.' try: clipped_hazard = read_layer("hazard_tmp.shp") except: print 'something went wrong when reading the clipped hazard' else: print clipped_hazard dest = 'exposure_tmp.shp' src = exposure_layer.filename print src try: call( "ogr2ogr -clipsrc %s %s %s" % \ (bbox_string, dest, src), shell=True ) except: print 'could not clip exposure' else: print 'created clipped exposure. Reading layer now.' try: clipped_exposure = read_layer("exposure_tmp.shp") except: print 'something went wrong when reading the clipped exposure' else: print clipped_exposure #get impact function based on layer keywords fncs = get_admissible_plugins([hazard_dict, exposure_dict]) impact_fnc = fncs.values()[0] layers = [clipped_hazard, clipped_exposure] # Call calculate_impact impact_file = calculate_impact( layers, impact_function ) tmpfile = 'tmp%s.json' % impact_name #5. Serialize the output into json and write out # Convert the impact file into a json file call(['ogr2ogr', '-f', 'GeoJSON', tmpfile, impact_file.filename]) # Open the json file f = open(tmpfile) #FIXME: Something needs to be done about the encoding # Load the file as json json_data = json.loads( f.read(), ) # Write it out as json writeout = json.dumps( json_data, ) #close the file, and delete temporary files f.close() os.remove(tmpfile) os.remove("hazard_tmp.shp") os.remove("exposure_tmp.shp") #os.remove(impact_file.filename) #6. Cache if cache: redis_server.set(impact_name, writeout) #use setex to add a cache expiry #writeout = json.dumps(impact_file.data, encoding='latin-1') else: writeout = 'Sorry, your query returned one or' + \ ' more empty matches' # except: # writeout = 'Something went wrong! Hmmm...' self.set_header('Content-Type', 'application/javascript') self.write(writeout)
def save_file_to_geonode(filename, user=None, title=None, overwrite=True, check_metadata=True, ignore=None): """Save a single layer file to local Risiko GeoNode Input filename: Layer filename of type as defined in LAYER_TYPES user: Django User object title: String describing the layer. If None or '' the filename will be used. overwrite: Boolean variable controlling whether existing layers can be overwritten by this operation. Default is True check_metadata: Flag controlling whether metadata is verified. If True (default), an exception will be raised if metada is not available after a number of retries. If False, no check is done making the function faster. Output layer object """ if ignore is not None and filename == ignore: return None # Extract fully qualified basename and extension basename, extension = os.path.splitext(filename) if extension not in LAYER_TYPES: msg = ('Invalid file extension "%s" in file %s. Valid extensions are ' '%s' % (extension, filename, str(LAYER_TYPES))) raise RisikoException(msg) # Try to find a file with a .keywords extension # and create a keywords list from there. # It is assumed that the keywords are separated # by new lines. # Empty keyword lines are ignored (as this causes issues downstream) keyword_list = [] keyword_file = basename + '.keywords' kw_title = title if title is not None else None kw_summary = None kw_table = None if os.path.exists(keyword_file): f = open(keyword_file, 'r') for line in f.readlines(): # Ignore blank lines raw_keyword = line.strip() if raw_keyword == '': continue # Strip any spaces after or before the colons if present if ':' in raw_keyword: keyword = ':'.join([x.strip() for x in raw_keyword.split(':')]) # Grab title if present if 'title' in keyword and kw_title is None: kw_title = keyword.split(':')[1] if 'impact_summary' in keyword: kw_summary = ''.join(keyword.split(':')[1:]) continue if 'impact_table' in keyword: kw_table = keyword.split(':')[1] continue keyword_list.append(keyword) f.close() # Take care of file types if extension == '.asc': # We assume this is an AAIGrid ASCII file such as those generated by # ESRI and convert it to Geotiff before uploading. # Create temporary tif file for upload and check that the road is clear prefix = os.path.split(basename)[-1] upload_filename = unique_filename(prefix=prefix, suffix='.tif') upload_basename, extension = os.path.splitext(upload_filename) # Copy any metadata files to unique filename for ext in ['.sld', '.keywords']: if os.path.exists(basename + ext): cmd = 'cp %s%s %s%s' % (basename, ext, upload_basename, ext) run(cmd) # Check that projection file exists prjname = basename + '.prj' if not os.path.isfile(prjname): msg = ('File %s must have a projection file named ' '%s' % (filename, prjname)) raise RisikoException(msg) # Convert ASCII file to GeoTIFF R = read_layer(filename) R.write_to_file(upload_filename) else: # The specified file is the one to upload upload_filename = filename # Use file name or keywords to derive title if not specified if kw_title is None: title = os.path.split(basename)[-1] else: title = kw_title # Attempt to upload the layer try: # Upload layer = file_upload(upload_filename, user=user, title=title, keywords=keyword_list, overwrite=overwrite) if kw_summary is not None: layer.abstract = kw_summary if kw_table is not None: layer.supplemental_information = kw_table if kw_title is not None: layer.title = kw_title layer.save() except GeoNodeException, e: raise
def get(self): # try: exposure_id = self.get_argument('e') hazard_id = self.get_argument('h') impact_name = 'impact-e%s-h%s' % (exposure_id, hazard_id) if exposure_id and hazard_id: # First check if the impact already exists in the cache try: # try to connect to the redis cache redis_server = redis.Redis() cache = True print 'Successfully connected to redis!' except: # This is just a flag that will be used later on print "I couldn't connect to redis" cache = False else: # If the impact exists, get it from the cache and return if redis_server.exists(impact_name): print 'Entry exists in cache!' writeout = redis_server.get(impact_name) self.set_header('Content-Type', 'application/javascript') self.write(writeout) return # Query the db and calculate if it doesn't try: #try connecting to the pg database conn = psycopg2.connect( "dbname='dev' user='******' password='******'") print 'Successfully connected to postgres!' except: writeout = 'Could not connect to the database!' else: # create a cursor cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) try: #1. Query the db for the layers query = 'SELECT shapefile FROM layers'+\ ' WHERE id = %s' % exposure_id cursor.execute(query) exposure = cursor.fetchone() query = 'SELECT shapefile FROM layers'+\ ' WHERE id = %s' % hazard_id cursor.execute(query) hazard = cursor.fetchone() except: writeout = 'There was something wrong with your query' conn.rollback() else: if exposure and hazard: # Pass the shapefile (paths) to read_layer try: exposure_layer = read_layer(exposure['shapefile']) hazard_layer = read_layer(hazard['shapefile']) except: writeout = 'Something went wrong when reading the layers' # Keywords exposure_dict = exposure_layer.get_keywords() hazard_dict = hazard_layer.get_keywords() if exposure_layer.is_vector: exposure_dict['layertype'] = 'vector' else: exposure_dict['layertype'] = 'raster' if hazard_layer.is_vector: hazard_dict['layertype'] = 'vector' else: exposure_dict['layertype'] = 'raster' #get optimal bounding box common_bbox = bbox_intersection( exposure_layer.get_bounding_box(), hazard_layer.get_bounding_box()) print exposure_layer.get_bounding_box() print hazard_layer.get_bounding_box() bbox_string = '' try: for val in common_bbox: bbox_string += str(val) + ' ' except: writeout = 'The layers have no intersection!' else: #gdal clip dest = 'hazard_tmp.shp' src = hazard_layer.filename print src try: call( "ogr2ogr -clipsrc %s %s %s" % \ (bbox_string, dest, src), shell=True ) except: print 'could not clip hazard' else: print 'created clipped hazard. Reading layer now.' try: clipped_hazard = read_layer("hazard_tmp.shp") except: print 'something went wrong when reading the clipped hazard' else: print clipped_hazard dest = 'exposure_tmp.shp' src = exposure_layer.filename print src try: call( "ogr2ogr -clipsrc %s %s %s" % \ (bbox_string, dest, src), shell=True ) except: print 'could not clip exposure' else: print 'created clipped exposure. Reading layer now.' try: clipped_exposure = read_layer( "exposure_tmp.shp") except: print 'something went wrong when reading the clipped exposure' else: print clipped_exposure #get impact function based on layer keywords fncs = get_admissible_plugins( [hazard_dict, exposure_dict]) impact_fnc = fncs.values()[0] layers = [clipped_hazard, clipped_exposure] # Call calculate_impact impact_file = calculate_impact( layers, impact_function) tmpfile = 'tmp%s.json' % impact_name #5. Serialize the output into json and write out # Convert the impact file into a json file call([ 'ogr2ogr', '-f', 'GeoJSON', tmpfile, impact_file.filename ]) # Open the json file f = open(tmpfile) #FIXME: Something needs to be done about the encoding # Load the file as json json_data = json.loads(f.read(), ) # Write it out as json writeout = json.dumps(json_data, ) #close the file, and delete temporary files f.close() os.remove(tmpfile) os.remove("hazard_tmp.shp") os.remove("exposure_tmp.shp") #os.remove(impact_file.filename) #6. Cache if cache: redis_server.set(impact_name, writeout) #use setex to add a cache expiry #writeout = json.dumps(impact_file.data, encoding='latin-1') else: writeout = 'Sorry, your query returned one or' + \ ' more empty matches' # except: # writeout = 'Something went wrong! Hmmm...' self.set_header('Content-Type', 'application/javascript') self.write(writeout)
def get(self): data = dict() encoding = sys.getfilesystemencoding() exposure_name = '' hazard_name = "%s.shp" % self.get_argument("hazard_name") hazard_path = os.path.join(DATA_PATH, 'hazard', hazard_name) impact_function_keyword = self.get_argument("impact_function") if impact_function_keyword == 'structure': exposure_name = "%s.shp" % self.get_argument("exposure_name") #impact_function = FloodBuildingImpactFunction impact_function = NOAHFloodBuildingImpactFunction elif impact_function_keyword == 'population': exposure_name = "%s.tif" % self.get_argument("exposure_name") impact_function = FloodEvacuationFunctionVectorHazard exposure_path = os.path.join(DATA_PATH, 'exposure', exposure_name) try: hazard_layer = read_layer(hazard_path.encode(encoding)) exposure_layer = read_layer(exposure_path.encode(encoding)) # hardcoded the required keywords for inasafe calculations exposure_layer.keywords['category'] = 'exposure' hazard_layer.keywords['category'] = 'hazard' hazard_layer.keywords['subcategory'] = 'flood' if impact_function_keyword == 'structure': exposure_layer.keywords['subcategory'] = 'structure' elif impact_function_keyword == 'population': exposure_layer.keywords['subcategory'] = 'population' haz_fnam, ext = os.path.splitext(hazard_name) exp_fnam, ext = os.path.splitext(exposure_name) impact_base_name = "IMPACT_%s_%s" % (exp_fnam, haz_fnam) impact_filename = impact_base_name + '.shp' impact_summary = "IMPACT_%s_%s.html" % (exp_fnam, haz_fnam) output = str(os.path.join(DATA_PATH, 'impact', impact_filename)) output_summary = str(os.path.join(DATA_PATH, 'impact summary', impact_summary)) if os.path.exists(output) and os.path.exists(output_summary): print 'impact file and impact summary already exists!' data = { 'return': 'success', 'resource': impact_base_name, } with open(output_summary) as html: data['html'] = html.read() print_pdf(data['html'], impact_base_name) html.close() else: try: impact = calculate_impact( layers=[exposure_layer, hazard_layer], impact_fcn=impact_function ) impact.write_to_file(output) data = upload_impact_vector(output) #create the impact summary file make_data_dirs() result = impact.keywords["impact_summary"] with open(output_summary, 'w+') as summary: summary.write(result) summary.close() if impact_function_keyword == 'population': make_style(impact_base_name, impact.style_info) set_style(impact_base_name, impact_base_name) else: set_style(impact_base_name, "Flood-Building") data['html'] = result print_pdf(result, impact_base_name) except: raise except: print 'IO Error or something else has occurred!' raise else: self.set_header("Content-Type", "application/json") self.write(json.dumps(data))