Exemple #1
0
def check_mbtiles(mbtiles_file, **kwargs):
    logger.info("Checking database %s" % (mbtiles_file))

    result = True


    zoom     = kwargs.get('zoom', -1)
    min_zoom = kwargs.get('min_zoom', 0)
    max_zoom = kwargs.get('max_zoom', 255)

    if zoom >= 0:
        min_zoom = max_zoom = zoom

    con = mbtiles_connect(mbtiles_file)
    cur = con.cursor()
    optimize_connection(cur)

    logger.debug("Loading zoom levels")

    zoom_levels = [int(x[0]) for x in cur.execute("SELECT distinct(zoom_level) FROM tiles").fetchall()]
    missing_tiles = []

    for current_zoom_level in zoom_levels:
        if current_zoom_level < min_zoom or current_zoom_level > max_zoom:
            continue

        logger.debug("Starting zoom level %d" % (current_zoom_level))

        t = cur.execute("""SELECT min(tile_column), max(tile_column), min(tile_row), max(tile_row) FROM tiles WHERE zoom_level = ?""",
            [current_zoom_level]).fetchone()

        minX, maxX, minY, maxY = t[0], t[1], t[2], t[3]

        logger.debug(" - Checking zoom level %d, x: %d - %d, y: %d - %d" % (current_zoom_level, minX, maxX, minY, maxY))

        for current_row in range(minY, maxY+1):
            logger.debug("   - Row: %d (%.1f%%)" %
                (current_row, (float(current_row - minY) / float(maxY - minY)) * 100.0) if minY != maxY else 100.0)

            mbtiles_columns = set([int(x[0]) for x in cur.execute("""SELECT tile_column FROM tiles WHERE zoom_level=? AND tile_row=?""",
                (current_zoom_level, current_row)).fetchall()])

            for current_column in range(minX, maxX+1):
                if current_column not in mbtiles_columns:
                    missing_tiles.append([current_zoom_level, current_column, current_row])


    if len(missing_tiles) > 0:
        result = False
        logger.error("(zoom, x, y)")
        for current_tile in missing_tiles:
            logger.error(current_tile)


    return result
Exemple #2
0
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs):
    logger.info("Importing from disk to database: %s --> %s" % (directory_path, mbtiles_file))


    import_into_existing_mbtiles = os.path.isfile(mbtiles_file)
    existing_mbtiles_is_compacted = True

    no_overwrite = kwargs.get('no_overwrite', False)
    auto_commit  = kwargs.get('auto_commit', False)
    zoom     = kwargs.get('zoom', -1)
    min_zoom = kwargs.get('min_zoom', 0)
    max_zoom = kwargs.get('max_zoom', 255)

    if zoom >= 0:
        min_zoom = max_zoom = zoom


    con = mbtiles_connect(mbtiles_file, auto_commit)
    cur = con.cursor()
    optimize_connection(cur, False)


    if import_into_existing_mbtiles:
        existing_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0)
    else:
        mbtiles_setup(cur)


    image_format = 'png'
    try:
        metadata = json.load(open(os.path.join(directory_path, 'metadata.json'), 'r'))
        image_format = metadata.get('format', 'png')

        # Check that the old and new image formats are the same
        if import_into_existing_mbtiles:
            original_format = None

            try:
                original_format = cur.execute("SELECT value FROM metadata WHERE name='format'").fetchone()[0]
            except:
                pass

            if original_format != None and image_format != original_format:
                sys.stderr.write('The files to merge must use the same image format (png or jpg)\n')
                sys.exit(1)

        if not import_into_existing_mbtiles:
            for name, value in metadata.items():
                cur.execute('INSERT OR IGNORE INTO metadata (name, value) VALUES (?, ?)',
                        (name, value))
            con.commit()
            logger.info('metadata from metadata.json restored')

    except IOError:
        logger.warning('metadata.json not found')


    existing_tiles = {}

    if no_overwrite:
        tiles = cur.execute("""SELECT zoom_level, tile_column, tile_row FROM tiles WHERE zoom_level>=? AND zoom_level<=?""",
            (min_zoom, max_zoom))

        t = tiles.fetchone()
        while t:
            z = str(t[0])
            x = str(t[1])
            y = str(t[2])

            zoom = existing_tiles.get(z, None)
            if not zoom:
                zoom = {}
                existing_tiles[z] = zoom

            row = zoom.get(y, None)
            if not row:
                row = set()
                zoom[y] = row

            row.add(x)
            t = tiles.fetchone()


    count = 0
    start_time = time.time()


    for r1, zs, ignore in os.walk(os.path.join(directory_path, "tiles")):
        for z in zs:
            if int(z) < min_zoom or int(z) > max_zoom:
                continue

            for r2, xs, ignore in os.walk(os.path.join(r1, z)):
                for x in xs:
                    for r2, ignore, ys in os.walk(os.path.join(r1, z, x)):
                        for y in ys:
                            y, extension = y.split('.')

                            if no_overwrite:
                                if x in existing_tiles.get(z, {}).get(y, set()):
                                    logging.debug("Ignoring tile (%s, %s, %s)" % (z, x, y))
                                    continue

                            if kwargs.get('flip_y', False) == True:
                                y = flip_y(z, y)

                            f = open(os.path.join(r1, z, x, y) + '.' + extension, 'rb')
                            tile_data = f.read()
                            f.close()

                            # Execute commands
                            if kwargs.get('command_list'):
                                tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data)

                            if existing_mbtiles_is_compacted:
                                m = hashlib.md5()
                                m.update(tile_data)
                                tile_id = m.hexdigest()

                                cur.execute("""INSERT OR IGNORE INTO images (tile_id, tile_data) VALUES (?, ?)""",
                                    (tile_id, sqlite3.Binary(tile_data)))

                                cur.execute("""REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?)""",
                                    (z, x, y, tile_id))
                            else:
                                cur.execute("""REPLACE INTO tiles (zoom_level, tile_column, tile_row, tile_data) VALUES (?, ?, ?, ?)""",
                                    (z, x, y.split('.')[0], sqlite3.Binary(tile_data)))


                            count = count + 1
                            if (count % 100) == 0:
                                logger.debug("%s tiles imported (%d tiles/sec)" %
                                    (count, count / (time.time() - start_time)))


    logger.info("%d tiles imported." % (count))

    con.commit()
    con.close()
Exemple #3
0
def execute_commands_on_mbtiles(mbtiles_file, **kwargs):
    logger.info("Executing commands on database %s" % (mbtiles_file))


    if kwargs.get('command_list') == None or len(kwargs['command_list']) == 0:
        return

    auto_commit = kwargs.get('auto_commit', False)
    zoom        = kwargs.get('zoom', -1)
    min_zoom    = kwargs.get('min_zoom', 0)
    max_zoom    = kwargs.get('max_zoom', 255)
    default_pool_size = kwargs.get('poolsize', -1)

    if zoom >= 0:
        min_zoom = max_zoom = zoom


    con = mbtiles_connect(mbtiles_file, auto_commit)
    cur = con.cursor()
    optimize_connection(cur)


    existing_mbtiles_is_compacted = (con.execute("select count(name) from sqlite_master where type='table' AND name='images';").fetchone()[0] > 0)
    if not existing_mbtiles_is_compacted:
        logger.info("The mbtiles file must be compacted, exiting...")
        return

    image_format = 'png'
    try:
        image_format = con.execute("select value from metadata where name='format';").fetchone()[0]
    except:
        pass


    count = 0
    duplicates = 0
    chunk = 1000
    start_time = time.time()
    processed_tile_ids = set()

    max_rowid = (con.execute("select max(rowid) from map").fetchone()[0])
    total_tiles = (con.execute("""select count(distinct(tile_id)) from map where zoom_level>=? and zoom_level<=?""",
        (min_zoom, max_zoom)).fetchone()[0])

    logger.debug("%d tiles to process" % (total_tiles))


    logger.debug("Creating an index for the tile_id column...")
    con.execute("""CREATE INDEX IF NOT EXISTS tile_id_index ON map (tile_id)""")
    logger.debug("...done")


    if default_pool_size < 1:
        default_pool_size = None
        logger.debug("Using default pool size")
    else:
        logger.debug("Using pool size = %d" % (default_pool_size))

    pool = Pool(default_pool_size)
    multiprocessing.log_to_stderr(logger.level)


    for i in range((max_rowid / chunk) + 1):
        # logger.debug("Starting range %d-%d" % (i*chunk, (i+1)*chunk))
        tiles = cur.execute("""select images.tile_id, images.tile_data, map.zoom_level, map.tile_column, map.tile_row
            from map, images
            where (map.rowid > ? and map.rowid <= ?)
            and (map.zoom_level>=? and map.zoom_level<=?)
            and (images.tile_id == map.tile_id)""",
            ((i * chunk), ((i + 1) * chunk), min_zoom, max_zoom))


        tiles_to_process = []

        t = tiles.fetchone()

        while t:
            tile_id = t[0]
            tile_data = t[1]
            # tile_z = t[2]
            # tile_x = t[3]
            # tile_y = t[4]
            # logging.debug("Working on tile (%d, %d, %d)" % (tile_z, tile_x, tile_y))

            if tile_id in processed_tile_ids:
                duplicates = duplicates + 1
            else:
                processed_tile_ids.add(tile_id)

                tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=".%s" % (image_format), prefix="tile_")
                tmp_file = os.fdopen(tmp_file_fd, "w")
                tmp_file.write(tile_data)
                tmp_file.close()

                tiles_to_process.append({
                    'tile_id' : tile_id,
                    'filename' : tmp_file_name,
                    'format' : image_format,
                    'command_list' : kwargs.get('command_list', [])
                })

            t = tiles.fetchone()


        if len(tiles_to_process) == 0:
            continue

        # Execute commands in parallel
        # logger.debug("Starting multiprocessing...")
        processed_tiles = pool.map(process_tile, tiles_to_process)

	# logger.debug("Starting reimport...")
        for next_tile in processed_tiles:
            tile_id, tile_file_path = next_tile['tile_id'], next_tile['filename']

            tmp_file = open(tile_file_path, "r")
            tile_data = tmp_file.read()
            tmp_file.close()

            os.remove(tile_file_path)

            if tile_data and len(tile_data) > 0:
                m = hashlib.md5()
                m.update(tile_data)
                new_tile_id = m.hexdigest()

                cur.execute("""insert or ignore into images (tile_id, tile_data) values (?, ?)""",
                    (new_tile_id, sqlite3.Binary(tile_data)))
                cur.execute("""update map set tile_id=? where tile_id=?""",
                    (new_tile_id, tile_id))
                if tile_id != new_tile_id:
                    cur.execute("""delete from images where tile_id=?""",
                    [tile_id])

                # logger.debug("Tile %s done\n" % (tile_id, ))


            count = count + 1
            if (count % 100) == 0:
                logger.debug("%s tiles finished (%.1f%%, %.1f tiles/sec)" %
                    (count, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))


    logger.info("%s tiles finished, %d duplicates ignored (100.0%%, %.1f tiles/sec)" %
        (count, duplicates, count / (time.time() - start_time)))

    pool.close()
    con.commit()
    con.close()
Exemple #4
0
def compact_mbtiles(mbtiles_file):
    logger.info("Compacting database %s" % (mbtiles_file))


    con = mbtiles_connect(mbtiles_file)
    cur = con.cursor()
    optimize_connection(cur)

    existing_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0)
    if existing_mbtiles_is_compacted:
        logger.info("The mbtiles file is already compacted")
        return


    overlapping = 0
    unique = 0
    count = 0
    chunk = 100
    start_time = time.time()
    total_tiles = con.execute("SELECT count(zoom_level) FROM tiles").fetchone()[0]
    max_rowid = con.execute("SELECT max(rowid) FROM tiles").fetchone()[0]


    logger.debug("%d total tiles" % total_tiles)

    compaction_prepare(cur)

    for i in range((max_rowid / chunk) + 1):
        cur.execute("""SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles WHERE rowid > ? AND rowid <= ?""",
            ((i * chunk), ((i + 1) * chunk)))

        rows = cur.fetchall()
        for r in rows:
            z = r[0]
            x = r[1]
            y = r[2]
            tile_data = r[3]

            # Execute commands
            if kwargs.get('command_list'):
                tile_data = execute_commands_on_tile(kwargs['command_list'], "png", tile_data)

            m = hashlib.md5()
            m.update(tile_data)
            tile_id = m.hexdigest()

            try:
                cur.execute("""INSERT INTO images (tile_id, tile_data) VALUES (?, ?)""",
                    (tile_id, sqlite3.Binary(tile_data)))
            except:
                overlapping = overlapping + 1
            else:
                unique = unique + 1

            cur.execute("""REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?)""",
                (z, x, y, tile_id))


            count = count + 1
            if (count % 100) == 0:
                logger.debug("%s tiles finished, %d unique, %d duplicates (%.1f%%, %.1f tiles/sec)" %
                    (count, unique, overlapping, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))


    logger.info("%s tiles finished, %d unique, %d duplicates (100.0%%, %.1f tiles/sec)" % (count, unique, overlapping, count / (time.time() - start_time)))

    compaction_finalize(cur)
    con.commit()
    con.close()
Exemple #5
0
def mbtiles_to_disk(mbtiles_file, directory_path, **kwargs):
    logger.info("Exporting database to disk: %s --> %s" % (mbtiles_file, directory_path))


    delete_after_export = kwargs.get('delete_after_export', False)
    no_overwrite        = kwargs.get('no_overwrite', False)

    zoom     = kwargs.get('zoom', -1)
    min_zoom = kwargs.get('min_zoom', 0)
    max_zoom = kwargs.get('max_zoom', 255)

    if zoom >= 0:
        min_zoom = max_zoom = zoom


    con = mbtiles_connect(mbtiles_file)
    cur = con.cursor()
    optimize_connection(cur)


    if not os.path.isdir(directory_path):
        os.mkdir(directory_path)
    base_path = os.path.join(directory_path, "tiles")
    if not os.path.isdir(base_path):
        os.makedirs(base_path)


    metadata = dict(con.execute('SELECT name, value FROM metadata').fetchall())
    json.dump(metadata, open(os.path.join(directory_path, 'metadata.json'), 'w'), indent=4)

    count = 0
    start_time = time.time()
    image_format = metadata.get('format', 'png')
    total_tiles = con.execute("""SELECT count(zoom_level) FROM tiles WHERE zoom_level>=? AND zoom_level<=?""",
        (min_zoom, max_zoom)).fetchone()[0]
    sending_mbtiles_is_compacted = (con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='images'").fetchone()[0] > 0)


    tiles = cur.execute("""SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles WHERE zoom_level>=? AND zoom_level<=?""",
        (min_zoom, max_zoom))
    t = tiles.fetchone()
    while t:
        z = t[0]
        x = t[1]
        y = t[2]
        tile_data = t[3]

        # Execute commands
        if kwargs.get('command_list'):
            tile_data = execute_commands_on_tile(kwargs['command_list'], image_format, tile_data)

        if kwargs.get('flip_y', False) == True:
          y = flip_y(z, y)

        tile_dir = os.path.join(base_path, str(z), str(x))
        if not os.path.isdir(tile_dir):
            os.makedirs(tile_dir)

        tile_file = os.path.join(tile_dir, '%s.%s' % (y, metadata.get('format', 'png')))

        if no_overwrite == False or not os.path.isfile(tile_file):
            f = open(tile_file, 'wb')
            f.write(tile_data)
            f.close()


        count = count + 1
        if (count % 100) == 0:
            logger.debug("%s / %s tiles exported (%.1f%%, %.1f tiles/sec)" %
                (count, total_tiles, (float(count) / float(total_tiles)) * 100.0, count / (time.time() - start_time)))

        t = tiles.fetchone()


    logger.info("%s / %s tiles exported (100.0%%, %.1f tiles/sec)" % (count, total_tiles, count / (time.time() - start_time)))


    if delete_after_export:
        logger.debug("WARNING: Removing exported tiles from %s" % (mbtiles_file))

        if sending_mbtiles_is_compacted:
            cur.execute("""DELETE FROM images WHERE tile_id IN (SELECT tile_id FROM map WHERE zoom_level>=? AND zoom_level<=?)""",
                (min_zoom, max_zoom))
            cur.execute("""DELETE FROM map WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom))
        else:
            cur.execute("""DELETE FROM tiles WHERE zoom_level>=? AND zoom_level<=?""", (min_zoom, max_zoom))

        optimize_database(cur, kwargs.get('skip_analyze', False), kwargs.get('skip_vacuum', False))
        con.commit()


    con.close()