def expire_mbtiles(mbtiles_file, **kwargs): scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) expire_days = kwargs.get('expire', 0) if expire_days == 0: return if zoom >= 0: min_zoom = max_zoom = zoom elif min_zoom == max_zoom: zoom = min_zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) logger.info("Expiring tiles from %s" % (prettify_connect_string(con.connect_string))) expire_timestamp = (int(time.time()) - (int(expire_days) * 86400)) logger.debug("Expiring tiles older than %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(expire_timestamp)))) con.expire_tiles(min_zoom, max_zoom, 0, expire_timestamp, scale) con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def check_mbtiles(mbtiles_file, **kwargs): logger.info("Checking database %s" % (mbtiles_file)) result = True zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 255) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file) cur = con.cursor() optimize_connection(cur) logger.debug("Loading zoom levels") zoom_levels = [int(x[0]) for x in cur.execute("SELECT distinct(zoom_level) FROM tiles").fetchall()] missing_tiles = [] for current_zoom_level in zoom_levels: if current_zoom_level < min_zoom or current_zoom_level > max_zoom: continue logger.debug("Starting zoom level %d" % (current_zoom_level)) t = cur.execute("""SELECT min(tile_column), max(tile_column), min(tile_row), max(tile_row) FROM tiles WHERE zoom_level = ?""", [current_zoom_level]).fetchone() minX, maxX, minY, maxY = t[0], t[1], t[2], t[3] logger.debug(" - Checking zoom level %d, x: %d - %d, y: %d - %d" % (current_zoom_level, minX, maxX, minY, maxY)) for current_row in range(minY, maxY+1): logger.debug(" - Row: %d (%.1f%%)" % (current_row, (float(current_row - minY) / float(maxY - minY)) * 100.0) if minY != maxY else 100.0) mbtiles_columns = set([int(x[0]) for x in cur.execute("""SELECT tile_column FROM tiles WHERE zoom_level=? AND tile_row=?""", (current_zoom_level, current_row)).fetchall()]) for current_column in range(minX, maxX+1): if current_column not in mbtiles_columns: missing_tiles.append([current_zoom_level, current_column, current_row]) if len(missing_tiles) > 0: result = False logger.error("(zoom, x, y)") for current_tile in missing_tiles: logger.error(current_tile) return result
def expire_tiles_bbox(mbtiles_file, **kwargs): scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) flip_tile_y = kwargs.get('flip_y', False) bbox = kwargs.get('bbox', None) tile_bbox = kwargs.get('tile_bbox', None) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) print_progress = kwargs.get('progress', False) if zoom >= 0: min_zoom = max_zoom = zoom elif min_zoom == max_zoom: zoom = min_zoom if tile_bbox == None and bbox == None: logger.info("Either --tile-bbox or --bbox must be given, exiting...") return min_x = min_y = max_x = max_y = 0 if tile_bbox: min_x, min_y, max_x, max_y = parse_and_convert_tile_bbox(tile_bbox, flip_tile_y) else: min_x, min_y, max_x, max_y = parse_bbox(bbox) con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) logger.info("Expiring tiles from %s" % (prettify_connect_string(con.connect_string))) for tile_z in range(min_zoom, max_zoom+1): for tile_z, tile_x, tile_y in tiles_for_bbox(min_x, min_y, max_x, max_y, tile_z, flip_tile_y): logger.debug("Expiring tile %d/%d/%d" % (tile_z, tile_x, tile_y)) if print_progress: sys.stdout.write("\rExpiring tile %d/%d/%d" % (tile_z, tile_x, tile_y)) con.expire_tile(tile_z, tile_x, tile_y, scale) if print_progress: sys.stdout.write('\n') con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def mbtiles_tilelist(mbtiles_file, **kwargs): flip_tile_y = kwargs.get('flip_y', False) as_bboxes = kwargs.get('as_bboxes', False) scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Tile list for %s (%s)" % (prettify_connect_string(con.connect_string), zoom_level_string)) for tile_z in range(min_zoom, max_zoom+1): logger.debug("Starting zoom level %d" % (tile_z)) for t in con.columns_and_rows_for_zoom_level(tile_z, scale): tile_x, tile_y = int(t[0]), int(t[1]) if as_bboxes: convert_tile_to_bbox(tile_z, tile_x, tile_y, flip_tile_y) else: if flip_tile_y: tile_y = flip_y(tile_z, tile_y) sys.stdout.write("%d/%d/%d\n" % (tile_z, tile_x, tile_y)) con.close()
def clean_mbtiles(mbtiles_file, **kwargs): result = True auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) logger.info("Cleaning %s" % (prettify_connect_string(con.connect_string))) con.delete_orphaned_images() con.close() return result
def fill_mbtiles(mbtiles_file, image_filename, **kwargs): zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) flip_tile_y = kwargs.get('flip_y', False) bbox = kwargs.get('bbox', None) tile_bbox = kwargs.get('tile_bbox', None) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) print_progress = kwargs.get('progress', False) if zoom >= 0: min_zoom = max_zoom = zoom elif min_zoom == max_zoom: zoom = min_zoom if tile_bbox != None and zoom < 0: logger.info("--tile-bbox can only be used with --zoom, exiting...") return if tile_bbox == None and bbox == None: logger.info("Either --tile-bbox or --bbox must be given, exiting...") return con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False) con.mbtiles_setup() if not con.is_compacted(): con.close() logger.info("The mbtiles database must be compacted, exiting...") return zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Filling %s (%s)" % (prettify_connect_string(con.connect_string), zoom_level_string)) # Insert an image tmp_file = open(image_filename, "r") tile_data = tmp_file.read() tmp_file.close() m = hashlib.md5() m.update(tile_data) tile_id = m.hexdigest() con.insert_tile_to_images(tile_id, tile_data) count = 0 start_time = time.time() for tile_z in range(min_zoom, max_zoom+1): min_x = min_y = max_x = max_y = 0 if tile_bbox != None: match = re.match(r'(\d+),(\d+),(\d+),(\d+)', tile_bbox, re.I) if match: min_x, min_y, max_x, max_y = int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)) elif bbox != None: match = re.match(r'([-0-9\.]+),([-0-9\.]+),([-0-9\.]+),([-0-9\.]+)', bbox, re.I) if match: left, bottom, right, top = float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4)) min_x, min_y = coordinate_to_tile(left, bottom, tile_z) max_x, max_y = coordinate_to_tile(right, top, tile_z) if min_y > max_y: min_y, max_y = max_y, min_y for tile_x in range(min_x, max_x+1): for tile_y in range(min_y, max_y+1): if flip_tile_y: tile_y = flip_y(tile_z, tile_y) # z, x, y con.insert_tile_to_map(tile_z, tile_x, tile_y, tile_id, False) # Don't overwrite existing tiles count = count + 1 if (count % 100) == 0: logger.debug("%d tiles inserted (%.1f tiles/sec)" % (count, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles inserted (%.1f tiles/sec)" % (count, count / (time.time() - start_time))) sys.stdout.flush() if print_progress: sys.stdout.write('\n') logger.info("%d tiles inserted (100.0%% @ %.1f tiles/sec)" % (count, count / (time.time() - start_time))) if print_progress: sys.stdout.write("%d tiles inserted (100.0%% @ %.1f tiles/sec)\n" % (count, count / (time.time() - start_time))) sys.stdout.flush() con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs): auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) print_progress = kwargs.get('progress', False) flip_tile_y = kwargs.get('flip_y', False) scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) tmp_dir = kwargs.get('tmp_dir', None) if tmp_dir and not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False) con.mbtiles_setup() # if not con.is_compacted(): # con.close() # logger.info("The mbtiles database must be compacted, exiting...") # return con.mbtiles_setup() zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Importing path:'%s' --> %s (%s)" % (directory_path, prettify_connect_string(con.connect_string), zoom_level_string)) image_format = 'png' try: metadata = json.load(open(os.path.join(directory_path, 'metadata.json'), 'r')) image_format = metadata.get('format', 'png') # Check that the old and new image formats are the same receiving_metadata = con.metadata() if receiving_metadata != None and len(receiving_metadata) > 0: original_format = receiving_metadata.get('format') if original_format != None and image_format != original_format: sys.stderr.write('The databases to merge must use the same image format (png or jpg)\n') sys.exit(1) for name, value in metadata.items(): con.update_metadata(name, value) logger.info('metadata from metadata.json restored') except IOError: logger.warning('metadata.json not found') count = 0 start_time = time.time() if print_progress: sys.stdout.write("0 tiles imported (0 tiles/sec)") sys.stdout.flush() known_tile_ids = set() tmp_images_list = [] tmp_row_list = [] tmp_tiles_list = [] for r1, zs, ignore in os.walk(os.path.join(directory_path, "tiles")): for tile_z in zs: if int(tile_z) < min_zoom or int(tile_z) > max_zoom: continue for r2, xs, ignore in os.walk(os.path.join(r1, tile_z)): for tile_x in xs: for r2, ignore, ys in os.walk(os.path.join(r1, tile_z, tile_x)): for tile_y in ys: tile_y, extension = tile_y.split('.') f = open(os.path.join(r1, tile_z, tile_x, tile_y) + '.' + extension, 'rb') tile_data = f.read() f.close() if flip_tile_y: tile_y = str(flip_y(tile_z, tile_y)) # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data, tmp_dir) if con.is_compacted(): m = hashlib.md5() m.update(tile_data) tile_id = m.hexdigest() if tile_id not in known_tile_ids: tmp_images_list.append( (tile_id, tile_data) ) known_tile_ids.add(tile_id) tmp_row_list.append( (tile_z, tile_x, tile_y, 1, tile_id, int(time.time())) ) else: tmp_tiles_list.append( (tile_z, tile_x, tile_y, 1, tile_data, int(time.time())) ) count = count + 1 if (count % 100) == 0: logger.debug("%d tiles imported (%.1f tiles/sec)" % (count, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles imported (%.1f tiles/sec)" % (count, count / (time.time() - start_time))) sys.stdout.flush() if len(tmp_images_list) > 250: con.insert_tiles_to_images(tmp_images_list) tmp_images_list = [] if len(tmp_row_list) > 250: con.insert_tiles_to_map(tmp_row_list) tmp_row_list = [] if len(tmp_tiles_list) > 250: con.insert_tiles(tmp_tiles_list) tmp_tiles_list = [] # Push the remaining rows to the database if len(tmp_images_list) > 0: con.insert_tiles_to_images(tmp_images_list) if len(tmp_row_list) > 0: con.insert_tiles_to_map(tmp_row_list) if len(tmp_tiles_list) > 0: con.insert_tiles(tmp_tiles_list) if print_progress: sys.stdout.write('\n') logger.info("%d tiles imported." % (count)) if print_progress: sys.stdout.write("%d tiles imported.\n" % (count)) sys.stdout.flush() con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def check_mbtiles(mbtiles_file, **kwargs): result = True scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) flip_tile_y = kwargs.get('flip_y', False) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Checking %s (%s)" % (prettify_connect_string(con.connect_string), zoom_level_string)) logger.debug("Loading zoom levels") zoom_levels = con.zoom_levels(scale) missing_tiles = [] for tile_z in zoom_levels: if tile_z < min_zoom or tile_z > max_zoom: continue logger.debug("Starting zoom level %d" % (tile_z)) t = con.bounding_box_for_zoom_level(tile_z, scale) minX, maxX, minY, maxY = t[0], t[1], t[2], t[3] logger.debug(" - Checking zoom level %d, x: %d - %d, y: %d - %d" % (tile_z, minX, maxX, minY, maxY)) for tile_y in range(minY, maxY+1): logger.debug(" - Row: %d (%.1f%%)" % (tile_y, (float(tile_y - minY) / float(maxY - minY)) * 100.0) if minY != maxY else 100.0) mbtiles_columns = con.columns_for_zoom_level_and_row(tile_z, tile_y, scale) for tile_x in range(minX, maxX+1): if tile_x not in mbtiles_columns: if flip_tile_y: tile_y = flip_y(tile_z, tile_y) missing_tiles.append([tile_z, tile_x, tile_y]) if len(missing_tiles) > 0: result = False logger.error("(zoom, x, y)") for current_tile in missing_tiles: logger.error(current_tile) con.close() if result: logger.info("Check succeeded") else: logger.info("Check failed") return result
def update_mbtiles(mbtiles_file1, mbtiles_file2, **kwargs): scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) print_progress = kwargs.get('progress', False) flip_tile_y = kwargs.get('flip_y', False) con1 = mbtiles_connect(mbtiles_file1, auto_commit, journal_mode, synchronous_off, False, False) con2 = mbtiles_connect(mbtiles_file2, auto_commit, journal_mode, synchronous_off, False, True) con1.mbtiles_setup() if not con1.is_compacted() or not con2.is_compacted: con1.close() con2.close() sys.stderr.write('To update mbtiles databases, both databases must already be compacted\n') sys.exit(1) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Updating %s --> %s (%s)" % (prettify_connect_string(con2.connect_string), prettify_connect_string(con1.connect_string), zoom_level_string)) # Check that the old and new image formats are the same original_format = new_format = None try: original_format = con1.metadata().get('format') except: pass try: new_format = con2.metadata().get('format') except: pass if new_format == None: logger.info("No image format found in the sending database, assuming 'png'") new_format = "png" if original_format != None and new_format != original_format: con1.close() con2.close() sys.stderr.write('The files to merge must use the same image format (png or jpg)\n') sys.exit(1) if original_format == None and new_format != None: con1.update_metadata("format", new_format) if new_format == None: new_format = original_format count = 0 start_time = time.time() min_timestamp = con1.max_timestamp() if min_timestamp is None: min_timestamp = 0 max_timestamp = int(time.time()) total_tiles = con2.updates_count(min_zoom, max_zoom, min_timestamp, max_timestamp) if total_tiles == 0: con1.close() con2.close() sys.stderr.write('No tiles to update, exiting...\n') return logger.debug("%d tiles to update" % (total_tiles)) if print_progress: sys.stdout.write("%d tiles to update\n" % (total_tiles)) sys.stdout.write("0 tiles updated (0% @ 0 tiles/sec)") sys.stdout.flush() known_tile_ids = set() tmp_images_list = [] tmp_row_list = [] deleted_tiles_count = 0 for t in con2.updates(min_zoom, max_zoom, min_timestamp, max_timestamp): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_scale = t[3] tile_data = str(t[4]) tile_id = t[5] if flip_tile_y: tile_y = flip_y(tile_z, tile_y) if tile_id is None: deleted_tiles_count += 1 if tile_id and tile_id not in known_tile_ids: tmp_images_list.append( (tile_id, tile_data) ) known_tile_ids.add(tile_id) tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_id, int(time.time())) ) count = count + 1 if (count % 100) == 0: logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) sys.stdout.flush() if len(tmp_images_list) > 250: con1.insert_tiles_to_images(tmp_images_list) tmp_images_list = [] if len(tmp_row_list) > 250: con1.insert_tiles_to_map(tmp_row_list) tmp_row_list = [] # Push the remaining rows to the database if len(tmp_images_list) > 0: con1.insert_tiles_to_images(tmp_images_list) if len(tmp_row_list) > 0: con1.insert_tiles_to_map(tmp_row_list) if print_progress: sys.stdout.write('\n') logger.info("%d tiles merged (100.0%% @ %.1f tiles/sec)" % (count, count / (time.time() - start_time))) if print_progress: sys.stdout.write("%d tiles merged (100.0%% @ %.1f tiles/sec)\n" % (count, count / (time.time() - start_time))) sys.stdout.flush() if deleted_tiles_count > 0: logger.info("Deleting %d orphaned image(s)" % (deleted_tiles_count)) if print_progress: sys.stdout.write("Deleting %d orphaned image(s)\n" % (deleted_tiles_count)) sys.stdout.flush() con1.delete_orphaned_images() con1.close() con2.close()
def mbtiles_to_disk(mbtiles_file, directory_path, **kwargs): delete_after_export = kwargs.get('delete_after_export', False) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) tmp_dir = kwargs.get('tmp_dir', None) print_progress = kwargs.get('progress', False) flip_tile_y = kwargs.get('flip_y', False) min_timestamp = kwargs.get('min_timestamp', 0) max_timestamp = kwargs.get('max_timestamp', 0) if tmp_dir and not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Exporting %s --> path:'%s' (%s)" % (prettify_connect_string(con.connect_string), directory_path, zoom_level_string)) if not os.path.isdir(directory_path): os.mkdir(directory_path) base_path = os.path.join(directory_path, "tiles") if not os.path.isdir(base_path): os.makedirs(base_path) metadata = con.metadata() json.dump(metadata, open(os.path.join(directory_path, 'metadata.json'), 'w'), indent=4) count = 0 start_time = time.time() image_format = metadata.get('format', 'png') sending_mbtiles_is_compacted = con.is_compacted() if not sending_mbtiles_is_compacted and (min_timestamp != 0 or max_timestamp != 0): con.close() sys.stderr.write('min-timestamp/max-timestamp can only be used with compacted databases.\n') sys.exit(1) total_tiles = con.tiles_count(min_zoom, max_zoom, min_timestamp, max_timestamp) logger.debug("%d tiles to export" % (total_tiles)) if print_progress: sys.stdout.write("%d tiles to export\n" % (total_tiles)) sys.stdout.write("%d / %d tiles exported (0%% @ 0 tiles/sec)" % (count, total_tiles)) sys.stdout.flush() for t in con.tiles(min_zoom, max_zoom, min_timestamp, max_timestamp): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_data = str(t[3]) # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data, tmp_dir) if flip_tile_y: tile_y = flip_y(tile_z, tile_y) tile_dir = os.path.join(base_path, str(tile_z), str(tile_x)) if not os.path.isdir(tile_dir): os.makedirs(tile_dir) tile_file = os.path.join(tile_dir, '%s.%s' % (tile_y, metadata.get('format', 'png'))) f = open(tile_file, 'wb') f.write(tile_data) f.close() count = count + 1 if (count % 100) == 0: logger.debug("%d / %d tiles exported (%.1f%% @ %.1f tiles/sec)" % (count, total_tiles, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d / %d tiles exported (%.1f%% @ %.1f tiles/sec)" % (count, total_tiles, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) sys.stdout.flush() if print_progress: sys.stdout.write('\n') logger.info("%d / %d tiles exported (100.0%% @ %.1f tiles/sec)" % (count, total_tiles, count / (time.time() - start_time))) if print_progress: sys.stdout.write("%d / %d tiles exported (100.0%% @ %.1f tiles/sec)\n" % (count, total_tiles, count / (time.time() - start_time))) sys.stdout.flush() if delete_after_export: logger.debug("WARNING: Removing exported tiles from %s" % (mbtiles_file)) con.delete_tiles(min_zoom, max_zoom, min_timestamp, max_timestamp) con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def execute_commands_on_mbtiles(mbtiles_file, **kwargs): logger.info("Executing commands on database %s" % (mbtiles_file)) if kwargs.get('command_list') == None or len(kwargs['command_list']) == 0: return auto_commit = kwargs.get('auto_commit', False) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 255) default_pool_size = kwargs.get('poolsize', -1) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit) cur = con.cursor() optimize_connection(cur) existing_mbtiles_is_compacted = (con.execute("select count(name) from sqlite_master where type='table' AND name='images';").fetchone()[0] > 0) if not existing_mbtiles_is_compacted: logger.info("The mbtiles file must be compacted, exiting...") return image_format = 'png' try: image_format = con.execute("select value from metadata where name='format';").fetchone()[0] except: pass count = 0 duplicates = 0 chunk = 1000 start_time = time.time() processed_tile_ids = set() max_rowid = (con.execute("select max(rowid) from map").fetchone()[0]) total_tiles = (con.execute("""select count(distinct(tile_id)) from map where zoom_level>=? and zoom_level<=?""", (min_zoom, max_zoom)).fetchone()[0]) logger.debug("%d tiles to process" % (total_tiles)) logger.debug("Creating an index for the tile_id column...") con.execute("""CREATE INDEX IF NOT EXISTS tile_id_index ON map (tile_id)""") logger.debug("...done") if default_pool_size < 1: default_pool_size = None logger.debug("Using default pool size") else: logger.debug("Using pool size = %d" % (default_pool_size)) pool = Pool(default_pool_size) multiprocessing.log_to_stderr(logger.level) for i in range((max_rowid / chunk) + 1): # logger.debug("Starting range %d-%d" % (i*chunk, (i+1)*chunk)) tiles = cur.execute("""select images.tile_id, images.tile_data, map.zoom_level, map.tile_column, map.tile_row from map, images where (map.rowid > ? and map.rowid <= ?) and (map.zoom_level>=? and map.zoom_level<=?) and (images.tile_id == map.tile_id)""", ((i * chunk), ((i + 1) * chunk), min_zoom, max_zoom)) tiles_to_process = [] t = tiles.fetchone() while t: tile_id = t[0] tile_data = t[1] # tile_z = t[2] # tile_x = t[3] # tile_y = t[4] # logging.debug("Working on tile (%d, %d, %d)" % (tile_z, tile_x, tile_y)) if tile_id in processed_tile_ids: duplicates = duplicates + 1 else: processed_tile_ids.add(tile_id) tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (image_format), prefix="tile_") tmp_file = os.fdopen(tmp_file_fd, "w") tmp_file.write(tile_data) tmp_file.close() tiles_to_process.append({ 'tile_id' : tile_id, 'filename' : tmp_file_name, 'format' : image_format, 'command_list' : kwargs.get('command_list', []) }) t = tiles.fetchone() if len(tiles_to_process) == 0: continue # Execute commands in parallel # logger.debug("Starting multiprocessing...") processed_tiles = pool.map(process_tile, tiles_to_process) # logger.debug("Starting reimport...") for next_tile in processed_tiles: tile_id, tile_file_path = next_tile['tile_id'], next_tile['filename'] tmp_file = open(tile_file_path, "r") tile_data = tmp_file.read() tmp_file.close() os.remove(tile_file_path) if tile_data and len(tile_data) > 0: m = hashlib.md5() m.update(tile_data) new_tile_id = m.hexdigest() cur.execute("""insert or ignore into images (tile_id, tile_data) values (?, ?)""", (new_tile_id, sqlite3.Binary(tile_data))) cur.execute("""update map set tile_id=? where tile_id=?""", (new_tile_id, tile_id)) if tile_id != new_tile_id: cur.execute("""delete from images where tile_id=?""", [tile_id]) # logger.debug("Tile %s done\n" % (tile_id, )) count = count + 1 if (count % 100) == 0: logger.debug("%s tiles finished (%.1f%%, %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) logger.info("%s tiles finished, %d duplicates ignored (100.0%%, %.1f tiles/sec)" % (count, duplicates, count / (time.time() - start_time))) pool.close() con.commit() con.close()
def execute_commands_on_mbtiles(mbtiles_file, **kwargs): if kwargs.get('command_list') == None or len(kwargs['command_list']) == 0: return auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) tmp_dir = kwargs.get('tmp_dir', None) default_pool_size = kwargs.get('poolsize', -1) print_progress = kwargs.get('progress', False) min_timestamp = kwargs.get('min_timestamp', 0) max_timestamp = kwargs.get('max_timestamp', 0) delete_vanished_tiles = kwargs.get('delete_vanished_tiles', False) if tmp_dir and not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) if not con.is_compacted(): con.close() logger.info("The mbtiles database must be compacted, exiting...") return con.mbtiles_setup() zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Executing commands on %s (%s)" % (prettify_connect_string(con.connect_string), zoom_level_string)) image_format = 'png' metadata = con.metadata() if metadata.has_key('format'): image_format = metadata['format'] count = 0 duplicates = 0 chunk = 1000 start_time = time.time() total_tiles = con.tiles_count(min_zoom, max_zoom, min_timestamp, max_timestamp) logger.debug("%d tiles to process" % (total_tiles)) if print_progress: sys.stdout.write("%d tiles to process\n" % (total_tiles)) sys.stdout.write("0 tiles finished (0% @ 0 tiles/sec)") sys.stdout.flush() logger.debug("Creating an index for the tile_id column...") con.create_map_tile_index() logger.debug("...done") if default_pool_size < 1: default_pool_size = None logger.debug("Using default pool size") else: logger.debug("Using pool size = %d" % (default_pool_size)) pool = Pool(default_pool_size) multiprocessing.log_to_stderr(logger.level) tiles_to_process = [] processed_tile_ids = set() for t in con.tiles_with_tile_id(min_zoom, max_zoom, min_timestamp, max_timestamp): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_data = str(t[3]) tile_id = t[4] # logging.debug("Working on tile (%d, %d, %d)" % (tile_z, tile_x, tile_y)) if tile_id in processed_tile_ids: duplicates = duplicates + 1 else: processed_tile_ids.add(tile_id) tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (image_format), prefix="tile_", dir=tmp_dir) tmp_file = os.fdopen(tmp_file_fd, "w") tmp_file.write(tile_data) tmp_file.close() tiles_to_process.append({ 'tile_id' : tile_id, 'tile_x' : tile_x, 'tile_y' : tile_y, 'tile_z' : tile_z, 'filename' : tmp_file_name, 'format' : image_format, 'size' : len(tile_data), 'command_list' : kwargs.get('command_list', []) }) if len(tiles_to_process) < chunk: continue count = process_tiles(pool, tiles_to_process, con, count, total_tiles, start_time, print_progress, delete_vanished_tiles) tiles_to_process = [] if len(tiles_to_process) > 0: count = process_tiles(pool, tiles_to_process, con, total_tiles, start_time, print_progress, count, delete_vanished_tiles) if print_progress: sys.stdout.write('\n') logger.info("%d tiles finished, %d duplicates ignored (100.0%% @ %.1f tiles/sec)" % (count, duplicates, count / (time.time() - start_time))) if print_progress: sys.stdout.write("%d tiles finished, %d duplicates ignored (100.0%% @ %.1f tiles/sec)\n" % (count, duplicates, count / (time.time() - start_time))) sys.stdout.flush() pool.close() logger.debug("Dropping index for the tile_id column...") con.drop_map_tile_index() logger.debug("...done") con.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.close()
def compact_mbtiles(mbtiles_file): logger.info("Compacting database %s" % (mbtiles_file)) con = mbtiles_connect(mbtiles_file) cur = con.cursor() optimize_connection(cur) existing_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0) if existing_mbtiles_is_compacted: logger.info("The mbtiles file is already compacted") return overlapping = 0 unique = 0 count = 0 chunk = 100 start_time = time.time() total_tiles = con.execute("SELECT count(zoom_level) FROM tiles").fetchone()[0] max_rowid = con.execute("SELECT max(rowid) FROM tiles").fetchone()[0] logger.debug("%d total tiles" % total_tiles) compaction_prepare(cur) for i in range((max_rowid / chunk) + 1): cur.execute("""SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles WHERE rowid > ? AND rowid <= ?""", ((i * chunk), ((i + 1) * chunk))) rows = cur.fetchall() for r in rows: z = r[0] x = r[1] y = r[2] tile_data = r[3] # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], "png", tile_data) m = hashlib.md5() m.update(tile_data) tile_id = m.hexdigest() try: cur.execute("""INSERT INTO images (tile_id, tile_data) VALUES (?, ?)""", (tile_id, sqlite3.Binary(tile_data))) except: overlapping = overlapping + 1 else: unique = unique + 1 cur.execute("""REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?)""", (z, x, y, tile_id)) count = count + 1 if (count % 100) == 0: logger.debug("%s tiles finished, %d unique, %d duplicates (%.1f%%, %.1f tiles/sec)" % (count, unique, overlapping, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) logger.info("%s tiles finished, %d unique, %d duplicates (100.0%%, %.1f tiles/sec)" % (count, unique, overlapping, count / (time.time() - start_time))) compaction_finalize(cur) con.commit() con.close()
def merge_mbtiles(mbtiles_file1, mbtiles_file2, **kwargs): scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) tmp_dir = kwargs.get('tmp_dir', None) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) min_timestamp = kwargs.get('min_timestamp', 0) max_timestamp = kwargs.get('max_timestamp', 0) delete_after_export = kwargs.get('delete_after_export', False) print_progress = kwargs.get('progress', False) delete_vanished_tiles = kwargs.get('delete_vanished_tiles', False) flip_tile_y = kwargs.get('flip_y', False) debug = kwargs.get('debug', False) if tmp_dir and not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) if zoom >= 0: min_zoom = max_zoom = zoom check_before_merge = kwargs.get('check_before_merge', False) if check_before_merge and not check_mbtiles(mbtiles_file2, **kwargs): sys.stderr.write("The pre-merge check on %s failed\n" % (mbtiles_file2)) sys.exit(1) con1 = mbtiles_connect(mbtiles_file1, auto_commit, journal_mode, synchronous_off, False, False) con2 = mbtiles_connect(mbtiles_file2, auto_commit, journal_mode, synchronous_off, False, True) con1.mbtiles_setup() # if not con1.is_compacted(): # sys.stderr.write('To merge two mbtiles databases, the receiver must already be compacted\n') # con1.close() # con2.close() # sys.exit(1) if not con2.is_compacted() and (min_timestamp != 0 or max_timestamp != 0): con1.close() con2.close() sys.stderr.write('min-timestamp/max-timestamp can only be used with compacted databases.\n') sys.exit(1) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Merging %s --> %s (%s)" % (prettify_connect_string(con2.connect_string), prettify_connect_string(con1.connect_string), zoom_level_string)) # Check that the old and new image formats are the same original_format = new_format = None try: original_format = con1.metadata().get('format') except: pass try: new_format = con2.metadata().get('format') except: pass if new_format == None: logger.info("No image format found in the sending database, assuming 'png'") new_format = "png" if original_format != None and new_format != original_format: con1.close() con2.close() sys.stderr.write('The files to merge must use the same image format (png or jpg)\n') sys.exit(1) if original_format == None and new_format != None: con1.update_metadata("format", new_format) if new_format == None: new_format = original_format count = 0 start_time = time.time() chunk = 1000 total_tiles = 1 if print_progress or debug: total_tiles = con2.tiles_count(min_zoom, max_zoom, min_timestamp, max_timestamp, scale) if total_tiles == 0: con1.close() con2.close() sys.stderr.write('No tiles to merge, exiting...\n') return logger.debug("%d tiles to merge" % (total_tiles)) if print_progress: sys.stdout.write("%d tiles to merge\n" % (total_tiles)) sys.stdout.write("0 tiles merged (0% @ 0 tiles/sec)") sys.stdout.flush() # merge and process (--merge --execute) if con2.is_compacted() and kwargs['command_list']: default_pool_size = kwargs.get('poolsize', -1) if default_pool_size < 1: default_pool_size = None logger.debug("Using default pool size") else: logger.debug("Using pool size = %d" % (default_pool_size)) pool = Pool(default_pool_size) multiprocessing.log_to_stderr(logger.level) tiles_to_process = [] known_tile_ids = {} for t in con2.tiles_with_tile_id(min_zoom, max_zoom, min_timestamp, max_timestamp, scale): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_scale = t[3] tile_data = str(t[4]) tile_id = t[5] if flip_tile_y: tile_y = flip_y(tile_z, tile_y) new_tile_id = known_tile_ids.get(tile_id) if new_tile_id is None: tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (new_format), prefix="tile_", dir=tmp_dir) tmp_file = os.fdopen(tmp_file_fd, "w") tmp_file.write(tile_data) tmp_file.close() tiles_to_process.append({ 'tile_id':tile_id, 'filename':tmp_file_name, 'format':new_format, 'size':len(tile_data), 'command_list':kwargs['command_list'], 'tile_x':tile_x, 'tile_y':tile_y, 'tile_z':tile_z, 'tile_scale':tile_scale }) else: con1.insert_tile_to_map(tile_z, tile_x, tile_y, tile_scale, new_tile_id) count = count + 1 if (count % 100) == 0: logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) sys.stdout.flush() if len(tiles_to_process) < chunk: continue count = process_tiles(pool, tiles_to_process, con1, count, total_tiles, start_time, print_progress, delete_vanished_tiles, known_tile_ids) tiles_to_process = [] if len(tiles_to_process) > 0: count = process_tiles(pool, tiles_to_process, con1, count, total_tiles, start_time, print_progress, delete_vanished_tiles, known_tile_ids) # merge from a compacted database (--merge) elif con2.is_compacted(): known_tile_ids = set() tmp_images_list = [] tmp_row_list = [] tmp_tiles_list = [] for t in con2.tiles_with_tile_id(min_zoom, max_zoom, min_timestamp, max_timestamp, scale): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_scale = t[3] tile_data = str(t[4]) tile_id = t[5] if flip_tile_y: tile_y = flip_y(tile_z, tile_y) if con1.is_compacted(): if tile_id not in known_tile_ids: tmp_images_list.append( (tile_id, tile_data) ) known_tile_ids.add(tile_id) tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_id, int(time.time())) ) else: tmp_tiles_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_data, int(time.time())) ) count = count + 1 if (count % 100) == 0: logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) sys.stdout.flush() if len(tmp_images_list) > 250: con1.insert_tiles_to_images(tmp_images_list) tmp_images_list = [] if len(tmp_row_list) > 250: con1.insert_tiles_to_map(tmp_row_list) tmp_row_list = [] if len(tmp_tiles_list) > 250: con1.insert_tiles(tmp_tiles_list) tmp_tiles_list = [] # Push the remaining rows to the database if len(tmp_images_list) > 0: con1.insert_tiles_to_images(tmp_images_list) if len(tmp_row_list) > 0: con1.insert_tiles_to_map(tmp_row_list) if len(tmp_tiles_list) > 0: con1.insert_tiles(tmp_tiles_list) # merge an uncompacted database (--merge) else: known_tile_ids = set() tmp_images_list = [] tmp_row_list = [] tmp_tiles_list = [] for t in con2.tiles(min_zoom, max_zoom, min_timestamp, max_timestamp, scale): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_scale = t[3] tile_data = str(t[4]) if flip_tile_y: tile_y = flip_y(tile_z, tile_y) # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], new_format, tile_data, tmp_dir) if con1.is_compacted(): m = hashlib.md5() m.update(tile_data) tile_id = m.hexdigest() if tile_id not in known_tile_ids: tmp_images_list.append( (tile_id, tile_data) ) known_tile_ids.add(tile_id) tmp_row_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_id, int(time.time())) ) else: tmp_tiles_list.append( (tile_z, tile_x, tile_y, tile_scale, tile_data, int(time.time())) ) count = count + 1 if (count % 100) == 0: logger.debug("%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) if print_progress: sys.stdout.write("\r%d tiles merged (%.1f%% @ %.1f tiles/sec)" % (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) sys.stdout.flush() if len(tmp_images_list) > 250: con1.insert_tiles_to_images(tmp_images_list) tmp_images_list = [] if len(tmp_row_list) > 250: con1.insert_tiles_to_map(tmp_row_list) tmp_row_list = [] if len(tmp_tiles_list) > 250: con1.insert_tiles(tmp_tiles_list) tmp_tiles_list = [] # Push the remaining rows to the database if len(tmp_images_list) > 0: con1.insert_tiles_to_images(tmp_images_list) if len(tmp_row_list) > 0: con1.insert_tiles_to_map(tmp_row_list) if len(tmp_tiles_list) > 0: con1.insert_tiles(tmp_tiles_list) if print_progress: sys.stdout.write('\n') logger.info("%d tiles merged (100.0%% @ %.1f tiles/sec)" % (count, count / (time.time() - start_time))) if print_progress: sys.stdout.write("%d tiles merged (100.0%% @ %.1f tiles/sec)\n" % (count, count / (time.time() - start_time))) sys.stdout.flush() if delete_after_export: logger.debug("WARNING: Removing merged tiles from %s" % (mbtiles_file2)) con2.delete_tiles(min_zoom, max_zoom, min_timestamp, max_timestamp, scale) con2.optimize_database(kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con1.close() con2.close()
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs): logger.info("Importing from disk to database: %s --> %s" % (directory_path, mbtiles_file)) import_into_existing_mbtiles = os.path.isfile(mbtiles_file) existing_mbtiles_is_compacted = True no_overwrite = kwargs.get('no_overwrite', False) auto_commit = kwargs.get('auto_commit', False) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 255) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit) cur = con.cursor() optimize_connection(cur, False) if import_into_existing_mbtiles: existing_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0) else: mbtiles_setup(cur) image_format = 'png' try: metadata = json.load(open(os.path.join(directory_path, 'metadata.json'), 'r')) image_format = metadata.get('format', 'png') # Check that the old and new image formats are the same if import_into_existing_mbtiles: original_format = None try: original_format = cur.execute("SELECT value FROM metadata WHERE name='format'").fetchone()[0] except: pass if original_format != None and image_format != original_format: sys.stderr.write('The files to merge must use the same image format (png or jpg)\n') sys.exit(1) if not import_into_existing_mbtiles: for name, value in metadata.items(): cur.execute('INSERT OR IGNORE INTO metadata (name, value) VALUES (?, ?)', (name, value)) con.commit() logger.info('metadata from metadata.json restored') except IOError: logger.warning('metadata.json not found') existing_tiles = {} if no_overwrite: tiles = cur.execute("""SELECT zoom_level, tile_column, tile_row FROM tiles WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom)) t = tiles.fetchone() while t: z = str(t[0]) x = str(t[1]) y = str(t[2]) zoom = existing_tiles.get(z, None) if not zoom: zoom = {} existing_tiles[z] = zoom row = zoom.get(y, None) if not row: row = set() zoom[y] = row row.add(x) t = tiles.fetchone() count = 0 start_time = time.time() for r1, zs, ignore in os.walk(os.path.join(directory_path, "tiles")): for z in zs: if int(z) < min_zoom or int(z) > max_zoom: continue for r2, xs, ignore in os.walk(os.path.join(r1, z)): for x in xs: for r2, ignore, ys in os.walk(os.path.join(r1, z, x)): for y in ys: y, extension = y.split('.') if no_overwrite: if x in existing_tiles.get(z, {}).get(y, set()): logging.debug("Ignoring tile (%s, %s, %s)" % (z, x, y)) continue if kwargs.get('flip_y', False) == True: y = flip_y(z, y) f = open(os.path.join(r1, z, x, y) + '.' + extension, 'rb') tile_data = f.read() f.close() # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data) if existing_mbtiles_is_compacted: m = hashlib.md5() m.update(tile_data) tile_id = m.hexdigest() cur.execute("""INSERT OR IGNORE INTO images (tile_id, tile_data) VALUES (?, ?)""", (tile_id, sqlite3.Binary(tile_data))) cur.execute("""REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?)""", (z, x, y, tile_id)) else: cur.execute("""REPLACE INTO tiles (zoom_level, tile_column, tile_row, tile_data) VALUES (?, ?, ?, ?)""", (z, x, y.split('.')[0], sqlite3.Binary(tile_data))) count = count + 1 if (count % 100) == 0: logger.debug("%s tiles imported (%d tiles/sec)" % (count, count / (time.time() - start_time))) logger.info("%d tiles imported." % (count)) con.commit() con.close()
def test_mbtiles(mbtiles_file, **kwargs): scale = kwargs.get('tile_scale', None) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 18) tmp_dir = kwargs.get('tmp_dir', None) flip_tile_y = kwargs.get('flip_y', False) min_timestamp = kwargs.get('min_timestamp', 0) max_timestamp = kwargs.get('max_timestamp', 0) revert_test = kwargs.get('revert_test', False) auto_commit = kwargs.get('auto_commit', False) journal_mode = kwargs.get('journal_mode', 'wal') synchronous_off = kwargs.get('synchronous_off', False) default_pool_size = kwargs.get('poolsize', -1) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file, auto_commit, journal_mode, synchronous_off, False, True) zoom_level_string = None if min_zoom == max_zoom: zoom_level_string = "zoom level %d" % (min_zoom) else: zoom_level_string = "zoom levels %d -> %d" % (min_zoom, max_zoom) logger.info("Testing %s (%s)" % (prettify_connect_string(con.connect_string), zoom_level_string)) image_format = 'png' metadata = con.metadata() if metadata.has_key('format'): image_format = metadata['format'] if default_pool_size < 1: default_pool_size = None logger.debug("Using default pool size") else: logger.debug("Using pool size = %d" % (default_pool_size)) pool = Pool(default_pool_size) multiprocessing.log_to_stderr(logger.level) chunk = 1000 tiles_to_process = [] for t in con.tiles(min_zoom, max_zoom, min_timestamp, max_timestamp, scale): tile_z = t[0] tile_x = t[1] tile_y = t[2] tile_scale = t[3] tile_data = str(t[4]) if flip_tile_y: tile_y = flip_y(tile_z, tile_y) tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (image_format), prefix="tile_", dir=tmp_dir) tmp_file = os.fdopen(tmp_file_fd, "w") tmp_file.write(tile_data) tmp_file.close() tiles_to_process.append({ 'tile_x' : tile_x, 'tile_y' : tile_y, 'tile_z' : tile_z, 'filename' : tmp_file_name, 'format' : image_format, 'revert_test' : revert_test, 'command_list' : kwargs.get('command_list', []) }) if len(tiles_to_process) < chunk: continue process_tiles(pool, tiles_to_process) tiles_to_process = [] if len(tiles_to_process) > 0: process_tiles(pool, tiles_to_process) pool.close() con.close()
def mbtiles_to_disk(mbtiles_file, directory_path, **kwargs): logger.info("Exporting database to disk: %s --> %s" % (mbtiles_file, directory_path)) delete_after_export = kwargs.get('delete_after_export', False) no_overwrite = kwargs.get('no_overwrite', False) zoom = kwargs.get('zoom', -1) min_zoom = kwargs.get('min_zoom', 0) max_zoom = kwargs.get('max_zoom', 255) if zoom >= 0: min_zoom = max_zoom = zoom con = mbtiles_connect(mbtiles_file) cur = con.cursor() optimize_connection(cur) if not os.path.isdir(directory_path): os.mkdir(directory_path) base_path = os.path.join(directory_path, "tiles") if not os.path.isdir(base_path): os.makedirs(base_path) metadata = dict(con.execute('SELECT name, value FROM metadata').fetchall()) json.dump(metadata, open(os.path.join(directory_path, 'metadata.json'), 'w'), indent=4) count = 0 start_time = time.time() image_format = metadata.get('format', 'png') total_tiles = con.execute("""SELECT count(zoom_level) FROM tiles WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom)).fetchone()[0] sending_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0) tiles = cur.execute("""SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom)) t = tiles.fetchone() while t: z = t[0] x = t[1] y = t[2] tile_data = t[3] # Execute commands if kwargs.get('command_list'): tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data) if kwargs.get('flip_y', False) == True: y = flip_y(z, y) tile_dir = os.path.join(base_path, str(z), str(x)) if not os.path.isdir(tile_dir): os.makedirs(tile_dir) tile_file = os.path.join(tile_dir, '%s.%s' % (y, metadata.get('format', 'png'))) if no_overwrite == False or not os.path.isfile(tile_file): f = open(tile_file, 'wb') f.write(tile_data) f.close() count = count + 1 if (count % 100) == 0: logger.debug("%s / %s tiles exported (%.1f%%, %.1f tiles/sec)" % (count, total_tiles, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time))) t = tiles.fetchone() logger.info("%s / %s tiles exported (100.0%%, %.1f tiles/sec)" % (count, total_tiles, count / (time.time() - start_time))) if delete_after_export: logger.debug("WARNING: Removing exported tiles from %s" % (mbtiles_file)) if sending_mbtiles_is_compacted: cur.execute("""DELETE FROM images WHERE tile_id IN (SELECT tile_id FROM map WHERE zoom_level>=? AND zoom_level<=?)""", (min_zoom, max_zoom)) cur.execute("""DELETE FROM map WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom)) else: cur.execute("""DELETE FROM tiles WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom)) optimize_database(cur, kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False)) con.commit() con.close()