def get_cropped_recursively(meter_x: float, meter_y: float, zoom: int, path: str, steps: int, do_epx_scale: bool, file_ending: str): """Recursively crops tiles until the required one has been generated. To prevent a stack overflow, the steps are limited to MAX_STEP_NUMBER. """ if steps >= MAX_STEP_NUMBER: # No tile that could be cropped has been found for the set MAX_STEP_NUMBER logger.error("{}: No tile could be found or created (tried from zoom {} down to {}) at location {}, {}!" " Possible causes: Missing data, wrong path, no write permissions" .format(path, zoom + steps, zoom, meter_x, meter_y)) return None full_path = utils.join_path(path, FULL_PATH) this_point = webmercator.Point(meter_x=meter_x, meter_y=meter_y, zoom_level=zoom) this_point_filename = full_path.format(zoom, this_point.tile_x, this_point.tile_y, file_ending) if not os.path.isfile(this_point_filename) or (os.path.getsize(this_point_filename) == 0): prev_point = webmercator.Point(meter_x=meter_x, meter_y=meter_y, zoom_level=zoom - 1) prev_point_filename = full_path.format(zoom, prev_point.tile_x, prev_point.tile_y, file_ending) if not os.path.isfile(prev_point_filename) or (os.path.getsize(prev_point_filename) == 0): get_cropped_recursively(meter_x, meter_y, zoom - 1, path, steps + 1, do_epx_scale, file_ending) get_cropped_for_next_tile(meter_x, meter_y, zoom - 1, path, do_epx_scale, file_ending) return this_point_filename
def parse_phytocoenosis_data(json_data): """Parses a fixture list to the correct data types for phytocoenosis. The fields are parsed as follows: "id": int, "speciesRepresentations": list, "name": string, "albedo_path": string, "normal_path": string, "displacement_path": string albedo_path, normal_path and displacement_path are built from the "texture" field. """ for entry in json_data: entry = entry["fields"] try: entry["id"] = int(entry["id"]) entry["speciesRepresentations"] = literal_eval( entry["speciesRepresentations"] or "[]") entry["albedo_path"] = utils.join_path(TEXTURE_PREFIX, entry["texture"], ALBEDO_TEXTURE_NAME) entry["normal_path"] = utils.join_path(TEXTURE_PREFIX, entry["texture"], NORMAL_TEXTURE_NAME) entry["displacement_path"] = utils.join_path( TEXTURE_PREFIX, entry["texture"], DISPLACEMENT_TEXTURE_NAME) del entry["texture"] logger.debug("Parsed phytocoenosis with ID {}".format(entry["id"])) except ValueError: logger.error( "One of the types in the json row {} did not have the correct type!" " Please check the specification.".format(entry))
def parse_species_representation_data(json_data): """Parses a fixture list to the correct data types for species representations. The fields are parsed as follows: "id": int, "species": int, "avg_height": float, "sigma_height": float, "vegetation_layer": int, "distribution_density": float "billboard": string """ to_be_deleted = [] for index, raw_entry in enumerate(json_data): entry = raw_entry["fields"] try: # Check if the billboard field is set - the species representation is useless otherwise if not entry["billboard"]: logger.warn( "Field with empty billboard at ID {} will not be included!" .format(entry["id"])) to_be_deleted.append( index) # Can't delete from list while iterating! continue entry["id"] = int(entry["id"]) entry["species"] = int( entry["species"]) if entry["species"] else None entry["avg_height"] = float(entry["avg_height"] or AVG_HEIGHT_DEFAULT) entry["sigma_height"] = float(entry["sigma_height"] or SIGMA_HEIGHT_DEFAULT) entry["billboard"] = utils.join_path(BILLBOARD_PREFIX, entry["billboard"]) # Get the layer based on the avg_height layer_set = False for (layer, max_height) in LAYER_MAXHEIGHTS: # TODO: The sigma_height should be added to this calculation in the future. # For now, this works since we don't use the sigma_height here. if entry["avg_height"] <= max_height: entry["vegetation_layer"] = layer layer_set = True break # If there was no fitting layer, set it to max, but issue a warning if not layer_set: entry["vegetation_layer"] = len(LAYER_MAXHEIGHTS) logger.warning( "The plant with id {} is too high ({}) - the maximum layer has been assigned" .format(entry["id"], entry["avg_height"])) # Convert distribution density to float entry["distribution_density"] = float( entry["distribution_density"]) logger.debug("Parsed vegetation with ID {}".format(entry["id"])) except ValueError: logger.error( "One of the types in the json row {} did not have the correct type!" " Please check the specification.".format(entry)) # Delete the items which were flagged as invalid # We reverse to_be_deleted since otherwise, indices in json_data change while deleting! for index in reversed(to_be_deleted): del json_data[index]
def get_cropped_for_next_tile(meter_x: float, meter_y: float, zoom: int, path: str, do_epx_scale: bool, file_ending: str): """Takes the tile at the given parameters (which must exist!) and crops it to create a tile one zoom level above the given one. This new tile is then saved in the LOD pyramid. The quarter of the existing tile to crop to is chosen by utilizing how tile coordinates work in OSM: 2x,2y 2x+1,2y 2x,2y+1 2x+1,2y+1 """ p_wanted = webmercator.Point(meter_x=meter_x, meter_y=meter_y, zoom_level=zoom + 1) p_available = webmercator.Point(meter_x=meter_x, meter_y=meter_y, zoom_level=zoom) if p_wanted.tile_x % 2 == 0: left_right = [0, 0.5] else: left_right = [0.5, 1] if p_wanted.tile_y % 2 == 0: upper_lower = [0, 0.5] else: upper_lower = [0.5, 1] zoom_path_template = utils.join_path(path, ZOOM_PATH) x_path_template = utils.join_path(path, METER_X_PATH) full_path_template = utils.join_path(path, FULL_PATH) available_filename = full_path_template.format(zoom, p_available.tile_x, p_available.tile_y, file_ending) wanted_filename = full_path_template.format(zoom + 1, p_wanted.tile_x, p_wanted.tile_y, file_ending) if not os.path.isfile(available_filename): # Nothing here yet - we might recurse further in get_cropped_recursively return x_path = x_path_template.format(zoom + 1, p_wanted.tile_x) os.makedirs(x_path, exist_ok=True) try: # Wait for access to the file to become available - in case the image is still being written to while True: try: os.rename(available_filename, available_filename) break except OSError as e: pass available_image = Image.open(available_filename) except OSError as error: logger.error("OSError while opening image: {}".format(error)) return except Error as error: logger.error("Other Error while opening image: {}".format(error)) return # PIL needs the image to be in RGB mode for processing - convert it if necessary original_image_mode = available_image.mode if original_image_mode != "RGB": available_image.convert('RGB') available_size = tuple(available_image.size) # If the available image is smaller than 2x2, this won't work if available_size[0] < 2: logger.warning("Image {} was too small, not proceeding!".format(available_filename)) return wanted_image = available_image.crop((int(left_right[0] * available_size[0]), int(upper_lower[0] * available_size[1]), int(left_right[1] * available_size[0]), int(upper_lower[1] * available_size[1]))) if do_epx_scale: wanted_image = epx.scale_epx(wanted_image) # If the image has been converted to RGB for processing, convert it back to the original mode if original_image_mode != wanted_image.mode: wanted_image.convert(original_image_mode) # It is possible that in the time since we last checked whether the image exists, # the same request was handled in another thread. This means that the image already # exists at this point. This error doesn't matter; in any case, the image exists try: out_file = open(wanted_filename, 'wb') except OSError as error: logger.error("OSError: Could not open new image file {}. Got error: {}".format(available_filename, error)) except IOError as error: logger.error("IOError: Could not open new image file {}. Got error: {}".format(available_filename, error)) try: wanted_image.save(out_file) wanted_image.close() # Make sure that the file is completely written and closed - otherwise the client # may try to open an image which is still unfinished out_file.flush() os.fsync(out_file.fileno()) out_file.close() logger.debug("Done saving image {}".format(wanted_filename)) except IOError as error: logger.warning("IOError: Image {} could not be saved! This could be due to another thread having saved it earlier, " "in which case it is not an issue. Error: {}".format(wanted_filename, error)) except OSError as error: logger.error("OSError: Could not save file {} - this file does not seem valid. Got error: {}".format(available_filename, error))
import os import webmercator import logging from PIL import Image from landscapelab import utils from raster import epx from django.contrib.gis.geos import Point from location.models import Scenario from assetpos.models import Tile ZOOM_PATH = "{}" METER_X_PATH = utils.join_path(ZOOM_PATH, "{}") FULL_PATH = utils.join_path(METER_X_PATH, "{}.{}") MAX_STEP_NUMBER = 8 logger = logging.getLogger(__name__) # TODO: maybe add a callback to the parameters, which is called if the file could not # TODO: be found and needs to be downloaded (or generated) externally def get_tile(meter_x: float, meter_y: float, zoom: int, path: str, do_epx_scale=False, file_ending="png"): """Returns the path to the tile at the given coordinates. The given path must lead to a tile directory. This means that the content of this directory must be organized like this: zoom level folders (containing) tile x folders (containing) tile y images If such a tile does not exist, it is created by cropping lower LOD tiles. """