Esempio n. 1
0
def get_storage_path(file_name=None, storage_dir=None, config=None):
    """ Returns an absolute path to a filename within the intermediary storage
    directory for uploaded but unprocessed files. 
    """
    
    config = config or get_ngeo_config()
    
    if not storage_dir:
        storage_dir = config.get(INGEST_SECTION, "storage_dir")
        
    if not file_name:
        return get_project_relative_path(storage_dir)
    
    return get_project_relative_path(join(storage_dir, file_name))
Esempio n. 2
0
def get_storage_path(file_name=None, storage_dir=None, config=None):
    """ Returns an absolute path to a filename within the intermediary storage
    directory for uploaded but unprocessed files.
    """

    config = config or get_ngeo_config()

    if not storage_dir:
        storage_dir = config.get(INGEST_SECTION, "storage_dir")

    if not file_name:
        return get_project_relative_path(storage_dir)

    return get_project_relative_path(join(storage_dir, file_name))
Esempio n. 3
0
def get_status_config_path(config=None):
    """ Returns the configured failure directory. """
    
    config = config or get_ngeo_config()

    return get_project_relative_path(
        config.get(CTRL_SECTION, "status_config_path", "config/status")
    )
Esempio n. 4
0
def get_failure_dir(config=None):
    """ Returns the configured failure directory. """
    
    config = config or get_ngeo_config()
    
    return get_project_relative_path(
        safe_get(config, "control.ingest", "failure_dir")
    )
Esempio n. 5
0
def get_failure_dir(config=None):
    """ Returns the configured failure directory. """

    config = config or get_ngeo_config()

    return get_project_relative_path(
        safe_get(config, "control.ingest", "failure_dir")
    )
Esempio n. 6
0
def get_tileset_path(browse_type, config=None):
    """ Returns the path to a tileset SQLite file in the `tileset_root` dir. """
    
    config = config or get_ngeo_config()
    
    tileset_root = config.get(MAPCACHE_SECTION, "tileset_root")
    tileset = browse_type + ".sqlite" if not browse_type.endswith(".sqlite") else ""
    
    return join(get_project_relative_path(tileset_root), tileset)
Esempio n. 7
0
def add_browse_layer(browse_layer, config=None):
    """ Add a browse layer to the ngEO Browse Server system. This includes the 
        database models, cache configuration and filesystem paths.
    """
    config = config or get_ngeo_config()

    try:
        # create a new browse layer model
        browse_layer_model = models.BrowseLayer(
            **browse_layer.get_kwargs()
        )

        browse_layer_model.full_clean()
        browse_layer_model.save()

        for related_dataset_id in browse_layer.related_dataset_ids:
            models.RelatedDataset.objects.get_or_create(
                dataset_id=related_dataset_id, browse_layer=browse_layer_model
            )

    except Exception:
        raise

    # create EOxServer dataset series
    eoxs_models.DatasetSeries.objects.create(identifier=browse_layer.id)

    # remove source from mapcache sqlite
    if not browse_layer.contains_volumes and not browse_layer.contains_vertical_curtains:
        mapcache_models.Source.objects.create(name=browse_layer.id)

        # add an XML section to the mapcache config xml
        add_mapcache_layer_xml(browse_layer, config)

    # create a base directory for optimized files
    directory = get_project_relative_path(join(
        config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
    ))
    if not os.path.exists(directory):
        os.makedirs(directory)
Esempio n. 8
0
def delete_browse_layer(browse_layer, config=None):
    config = config or get_ngeo_config()

    # remove browse layer model. This should also delete all related browses
    # and browse reports
    models.BrowseLayer.objects.get(id=browse_layer.id).delete()
    eoxs_models.DatasetSeries.objects.get(identifier=browse_layer.id).delete()

    if not browse_layer.contains_volumes and not browse_layer.contains_vertical_curtains:
        # remove source from mapcache sqlite
        mapcache_models.Source.objects.get(name=browse_layer.id).delete()

        # remove browse layer from mapcache XML
        remove_mapcache_layer_xml(browse_layer, config)

        # delete browse layer cache
        try:
            os.remove(get_tileset_path(browse_layer.browse_type))
        except OSError:
            # when no browse was ingested, the sqlite file does not exist, so just
            # issue a warning
            logger.warning(
                "Could not remove tileset '%s'." 
                % get_tileset_path(browse_layer.browse_type)
            )

    # delete all optimzed files by deleting the whole directory of the layer
    optimized_dir = get_project_relative_path(join(
        config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
    ))
    try:
        shutil.rmtree(optimized_dir)
    except OSError:
        logger.error(
            "Could not remove directory for optimzed files: '%s'." 
            % optimized_dir
        )
Esempio n. 9
0
def get_optimized_path(file_name, directory=None, config=None):
    """ Returns an absolute path to a filename within the storage directory for
    optimized raster files. Uses the 'control.ingest.optimized_files_dir'
    setting from the ngEO configuration.

    Also tries to get the postfix for optimized files from the
    'control.ingest.optimized_files_postfix' setting from the ngEO configuration.

    All relative paths are treated relative to the PROJECT_DIR directory setting.
    """

    config = config or get_ngeo_config()

    file_name = basename(file_name)
    if directory:
        file_name = join(directory, file_name)

    optimized_dir = get_project_relative_path(
        config.get(INGEST_SECTION, "optimized_files_dir")
    )

    postfix = safe_get(config, INGEST_SECTION, "optimized_files_postfix", "")
    root, ext = splitext(file_name)
    return join(optimized_dir, root + postfix + ext)
Esempio n. 10
0
def get_optimized_path(file_name, directory=None, config=None):
    """ Returns an absolute path to a filename within the storage directory for
    optimized raster files. Uses the 'control.ingest.optimized_files_dir' 
    setting from the ngEO configuration.
    
    Also tries to get the postfix for optimized files from the 
    'control.ingest.optimized_files_postfix' setting from the ngEO configuration.
    
    All relative paths are treated relative to the PROJECT_DIR directory setting.
    """
    
    config = config or get_ngeo_config()
    
    file_name = basename(file_name)
    if directory:
        file_name = join(directory, file_name)
    
    optimized_dir = get_project_relative_path(
        config.get(INGEST_SECTION, "optimized_files_dir")
    )
    
    postfix = safe_get(config, INGEST_SECTION, "optimized_files_postfix", "")
    root, ext = splitext(file_name)
    return join(optimized_dir, root + postfix + ext)
Esempio n. 11
0
def add_browse_layer(browse_layer, config=None):
    """ Add a browse layer to the ngEO Browse Server system. This includes the
        database models, cache configuration and filesystem paths.
    """
    config = config or get_ngeo_config()

    try:
        logger.info("Adding new browse layer '%s'." % browse_layer.id)
        # create a new browse layer model
        browse_layer_model = models.BrowseLayer(
            **browse_layer.get_kwargs()
        )

        browse_layer_model.full_clean()
        browse_layer_model.save()

        # relatedDatasets are ignored (see NGEO-1508)
        # for related_dataset_id in browse_layer.related_dataset_ids:
        #     models.RelatedDataset.objects.get_or_create(
        #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
        #     )

    except Exception:
        raise

    # create EOxServer dataset series
    dss_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.dataset_series"
        }
    )
    dss_mgr.create(browse_layer.id,
        eo_metadata=EOMetadata(
            browse_layer.id,
            datetime.now(), datetime.now(),
            MultiPolygon(Polygon.from_bbox((0, 0, 1, 1)))
        )
    )
    # create EOxServer layer metadata
    if browse_layer.title or browse_layer.description:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id}
        )
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    # add source to mapcache sqlite
    mapcache_models.Source.objects.create(name=browse_layer.id)

    # add an XML section to the mapcache config xml
    add_mapcache_layer_xml(browse_layer, config)

    # create a base directory for optimized files
    directory = get_project_relative_path(join(
        config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
    ))
    if not os.path.exists(directory):
        os.makedirs(directory)
Esempio n. 12
0
def add_browse_layer(browse_layer, config=None):
    """ Add a browse layer to the ngEO Browse Server system. This includes the
        database models, cache configuration and filesystem paths.
    """
    config = config or get_ngeo_config()

    try:
        logger.info("Adding new browse layer '%s'." % browse_layer.id)
        # create a new browse layer model
        browse_layer_model = models.BrowseLayer(**browse_layer.get_kwargs())

        browse_layer_model.full_clean()
        browse_layer_model.save()

        # relatedDatasets are ignored (see NGEO-1508)
        # for related_dataset_id in browse_layer.related_dataset_ids:
        #     models.RelatedDataset.objects.get_or_create(
        #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
        #     )

    except Exception:
        raise

    # create EOxServer dataset series
    dss_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.dataset_series"
        })
    dss_mgr.create(browse_layer.id,
                   eo_metadata=EOMetadata(
                       browse_layer.id, datetime.now(), datetime.now(),
                       MultiPolygon(Polygon.from_bbox((0, 0, 1, 1)))))
    # create EOxServer layer metadata
    if browse_layer.title or browse_layer.description:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id})
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    # add source to mapcache sqlite
    mapcache_models.Source.objects.create(name=browse_layer.id)

    # add an XML section to the mapcache config xml
    add_mapcache_layer_xml(browse_layer, config)

    # create a base directory for optimized files
    directory = get_project_relative_path(
        join(config.get(INGEST_SECTION, "optimized_files_dir"),
             browse_layer.id))
    if not os.path.exists(directory):
        os.makedirs(directory)

    # create SxCat collection if harvesting via SxCat is enabled and source
    # is given
    harvesting_via_sxcat = False
    try:
        harvesting_via_sxcat = config.getboolean("control",
                                                 "harvesting_via_sxcat")
    except:
        pass
    if harvesting_via_sxcat and browse_layer.harvesting_source:
        add_collection(browse_layer)
Esempio n. 13
0
def delete_browse_layer(browse_layer, purge=False, config=None):
    config = config or get_ngeo_config()

    # only remove MapCache configuration in order to allow a roll-back
    # without data loss
    if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
        logger.info("Starting disabling of browse layer '%s'." %
                    browse_layer.id)
    else:
        raise Exception(
            "Could not disable browse layer '%s' as it does not exist." %
            browse_layer.id)

    # remove browse layer from MapCache XML
    remove_mapcache_layer_xml(browse_layer, config)

    # disable SxCat harvesting for collection
    harvesting_via_sxcat = False
    try:
        harvesting_via_sxcat = config.getboolean("control",
                                                 "harvesting_via_sxcat")
    except:
        pass
    if harvesting_via_sxcat and browse_layer.harvesting_source:
        disable_collection(browse_layer)

    logger.info("Finished disabling of browse layer '%s'." % browse_layer.id)

    if purge:
        logger.info("Starting purging of browse layer '%s'." % browse_layer.id)
        # remove browse layer model. This should also delete all related browses
        # and browse reports
        models.BrowseLayer.objects.get(id=browse_layer.id).delete()

        # delete EOxServer layer metadata
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id})
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        # delete EOxServer dataset series
        dss_mgr = System.getRegistry().findAndBind(
            intf_id="resources.coverages.interfaces.Manager",
            params={
                "resources.coverages.interfaces.res_type": "eo.dataset_series"
            })
        dss_mgr.delete(browse_layer.id)

        # remove source from mapcache sqlite
        mapcache_models.Source.objects.get(name=browse_layer.id).delete()

        # delete browse layer cache
        try:
            logger.info("Deleting tileset for browse layer '%s'." %
                        browse_layer.id)
            os.remove(get_tileset_path(browse_layer.browse_type))
        except OSError:
            # when no browse was ingested, the sqlite file does not exist, so
            # just issue a warning
            logger.warning("Could not remove tileset '%s'." %
                           get_tileset_path(browse_layer.browse_type))

        # delete all optimized files by deleting the whole directory of the layer
        optimized_dir = get_project_relative_path(
            join(config.get(INGEST_SECTION, "optimized_files_dir"),
                 browse_layer.id))
        try:
            logger.info("Deleting optimized images for browse layer '%s'." %
                        browse_layer.id)
            shutil.rmtree(optimized_dir)
        except OSError:
            logger.error(
                "Could not remove directory for optimized files: '%s'." %
                optimized_dir)

        if harvesting_via_sxcat and browse_layer.harvesting_source:
            remove_collection(browse_layer)

        logger.info("Finished purging of browse layer '%s'." % browse_layer.id)
Esempio n. 14
0
 def wrapper(*args, **kwargs):
     config = get_ngeo_config()
     mapcache_config = get_mapcache_seed_config(config)
     with FileLock(get_project_relative_path("mapcache.xml.lck")):
         return func(*args, **kwargs)
Esempio n. 15
0
def seed_mapcache(seed_command, config_file, tileset, grid,
                  minx, miny, maxx, maxy, minzoom, maxzoom,
                  start_time, end_time, threads, delete, force=True):

    # translate grid URN to mapcache grid name
    try:
        grid = URN_TO_GRID[grid]
    except KeyError:
        raise SeedException("Invalid grid '%s'." % grid)

    bounds = CRS_BOUNDS[GRID_TO_SRID[grid]]
    full = float(abs(bounds[0]) + abs(bounds[2]))

    dateline_crossed = False
    if maxx>bounds[2]:
        dateline_crossed = True
    # extent is always within [bounds[0],bounds[2]]
    # where maxx can be >bounds[2] but <=full
    if minx<bounds[0] or minx>bounds[2] or maxx<bounds[0] or maxx>full:
        raise SeedException("Invalid extent '%s,%s,%s,%s'."
                            % (minx, miny, maxx, maxy))

    if minzoom is None: minzoom = 0
    if maxzoom is None: maxzoom = 6

    # start- and end-time are expected to be UTC Zulu
    start_time = start_time.replace(tzinfo=None)
    end_time = end_time.replace(tzinfo=None)

    logger.info("Starting mapcache seed with parameters: command='%s', "
                "config_file='%s', tileset='%s', grid='%s', "
                "extent='%s,%s,%s,%s', zoom='%s,%s', nthreads='%s', "
                "mode='%s', dimension='TIME=%sZ/%sZ'."
                % (seed_command, config_file, tileset, grid,
                  minx, miny, maxx, maxy, minzoom, maxzoom, threads,
                  "seed" if not delete else "delete",
                  start_time.isoformat(), end_time.isoformat()))

    seed_args = [
        seed_command,
        "-c", config_file,
        "-t", tileset,
        "-g", grid,
        "-e", "%f,%f,%f,%f" % (minx, miny, bounds[2] if dateline_crossed else maxx, maxy),
        "-n", str(threads),
        "-z", "%d,%d" % (minzoom, maxzoom),
        "-D", "TIME=%sZ/%sZ" % (start_time.isoformat(), end_time.isoformat()),
        "-m", "seed" if not delete else "delete",
        "-q",
        "-M", "1,1",
    ]
    if not delete and force:
        seed_args.append("-f")


    try:
        config = get_ngeo_config()
        timeout = safe_get(config, "mapcache.seed", "timeout")
        timeout = float(timeout) if timeout is not None else 60.0
    except:
        timeout = 60.0


    try:
        lock = FileLock(
            get_project_relative_path("mapcache_seed.lck"), timeout=timeout
        )

        with lock:
            logger.debug("mapcache seeding command: '%s'. raw: '%s'."
                         % (" ".join(seed_args), seed_args))
            process = subprocess.Popen(seed_args, stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            out, err = process.communicate()
            for string in (out, err):
                for line in string.split("\n"):
                    if line != '':
                        logger.info("MapCache output: %s" % line)

        if process.returncode != 0:
            raise SeedException("'%s' failed. Returncode '%d'."
                                % (seed_command, process.returncode))

        # seed second extent if dateline is crossed
        if dateline_crossed:
            with lock:
                index = seed_args.index("%f,%f,%f,%f" % (minx, miny, bounds[2], maxy))
                seed_args[index] = "%f,%f,%f,%f" % (bounds[0], miny, maxx-full, maxy)
                logger.debug("mapcache seeding command: '%s'. raw: '%s'."
                             % (" ".join(seed_args), seed_args))
                process = subprocess.Popen(seed_args, stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)

                out, err = process.communicate()
                for string in (out, err):
                    for line in string.split("\n"):
                        if line != '':
                            logger.info("MapCache output: %s" % line)

            if process.returncode != 0:
                raise SeedException("'%s' failed. Returncode '%d'."
                                    % (seed_command, process.returncode))

    except LockException, e:
        raise SeedException("Seeding failed: %s" % str(e))
Esempio n. 16
0
 def wrapper(*args, **kwargs):
     config = get_ngeo_config()
     mapcache_config = get_mapcache_seed_config(config)
     with FileLock(get_project_relative_path("mapcache.xml.lck")):
         return func(*args, **kwargs)
Esempio n. 17
0
def delete_browse_layer(browse_layer, purge=False, config=None):
    config = config or get_ngeo_config()

    # only remove MapCache configuration in order to allow a roll-back
    # without data loss
    if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
        logger.info("Starting disabling of browse layer '%s'." % browse_layer.id)
    else:
        raise Exception(
            "Could not disable browse layer '%s' as it does not exist."
            % browse_layer.id
        )

    # remove browse layer from MapCache XML
    remove_mapcache_layer_xml(browse_layer, config)

    logger.info("Finished disabling of browse layer '%s'." % browse_layer.id)

    if purge:
        logger.info("Starting purging of browse layer '%s'." % browse_layer.id)
        # remove browse layer model. This should also delete all related browses
        # and browse reports
        models.BrowseLayer.objects.get(id=browse_layer.id).delete()

        # delete EOxServer layer metadata
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id}
        )
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        # delete EOxServer dataset series
        dss_mgr = System.getRegistry().findAndBind(
            intf_id="resources.coverages.interfaces.Manager",
            params={
                "resources.coverages.interfaces.res_type": "eo.dataset_series"
            }
        )
        dss_mgr.delete(browse_layer.id)

        # remove source from mapcache sqlite
        mapcache_models.Source.objects.get(name=browse_layer.id).delete()

        # delete browse layer cache
        try:
            logger.info(
                "Deleting tileset for browse layer '%s'." % browse_layer.id
            )
            os.remove(get_tileset_path(browse_layer.browse_type))
        except OSError:
            # when no browse was ingested, the sqlite file does not exist, so
            # just issue a warning
            logger.warning(
                "Could not remove tileset '%s'."
                % get_tileset_path(browse_layer.browse_type)
            )

        # delete all optimized files by deleting the whole directory of the layer
        optimized_dir = get_project_relative_path(join(
            config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
        ))
        try:
            logger.info(
                "Deleting optimized images for browse layer '%s'."
                % browse_layer.id
            )
            shutil.rmtree(optimized_dir)
        except OSError:
            logger.error(
                "Could not remove directory for optimized files: '%s'."
                % optimized_dir
            )

        logger.info("Finished purging of browse layer '%s'." % browse_layer.id)
Esempio n. 18
0
def seed_mapcache(seed_command,
                  config_file,
                  tileset,
                  grid,
                  minx,
                  miny,
                  maxx,
                  maxy,
                  minzoom,
                  maxzoom,
                  start_time,
                  end_time,
                  threads,
                  delete,
                  force=True):

    # translate grid URN to mapcache grid name
    try:
        grid = URN_TO_GRID[grid]
    except KeyError:
        raise SeedException("Invalid grid '%s'." % grid)

    bounds = CRS_BOUNDS[GRID_TO_SRID[grid]]
    full = float(abs(bounds[0]) + abs(bounds[2]))

    dateline_crossed = False
    if maxx > bounds[2]:
        dateline_crossed = True
    # extent is always within [bounds[0],bounds[2]]
    # where maxx can be >bounds[2] but <=full
    if minx < bounds[0] or minx > bounds[2] or maxx < bounds[0] or maxx > full:
        raise SeedException("Invalid extent '%s,%s,%s,%s'." %
                            (minx, miny, maxx, maxy))

    if minzoom is None: minzoom = 0
    if maxzoom is None: maxzoom = 6

    # start- and end-time are expected to be UTC Zulu
    start_time = start_time.replace(tzinfo=None)
    end_time = end_time.replace(tzinfo=None)

    logger.info(
        "Starting mapcache seed with parameters: command='%s', "
        "config_file='%s', tileset='%s', grid='%s', "
        "extent='%s,%s,%s,%s', zoom='%s,%s', nthreads='%s', "
        "mode='%s', dimension='TIME=%sZ/%sZ'." %
        (seed_command, config_file, tileset, grid, minx, miny, maxx, maxy,
         minzoom, maxzoom, threads, "seed" if not delete else "delete",
         start_time.isoformat(), end_time.isoformat()))

    seed_args = [
        seed_command,
        "-c",
        config_file,
        "-t",
        tileset,
        "-g",
        grid,
        "-e",
        "%f,%f,%f,%f" %
        (minx, miny, bounds[2] if dateline_crossed else maxx, maxy),
        "-n",
        str(threads),
        "-z",
        "%d,%d" % (minzoom, maxzoom),
        "-D",
        "TIME=%sZ/%sZ" % (start_time.isoformat(), end_time.isoformat()),
        "-m",
        "seed" if not delete else "delete",
        "-q",
        "-M",
        "1,1",
    ]
    if not delete and force:
        seed_args.append("-f")

    try:
        config = get_ngeo_config()
        timeout = safe_get(config, "mapcache.seed", "timeout")
        timeout = float(timeout) if timeout is not None else 60.0
    except:
        timeout = 60.0

    try:
        lock = FileLock(get_project_relative_path("mapcache_seed.lck"),
                        timeout=timeout)

        with lock:
            logger.debug("mapcache seeding command: '%s'. raw: '%s'." %
                         (" ".join(seed_args), seed_args))
            process = subprocess.Popen(seed_args,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            out, err = process.communicate()
            for string in (out, err):
                for line in string.split("\n"):
                    if line != '':
                        logger.info("MapCache output: %s" % line)

        if process.returncode != 0:
            raise SeedException("'%s' failed. Returncode '%d'." %
                                (seed_command, process.returncode))

        # seed second extent if dateline is crossed
        if dateline_crossed:
            with lock:
                index = seed_args.index("%f,%f,%f,%f" %
                                        (minx, miny, bounds[2], maxy))
                seed_args[index] = "%f,%f,%f,%f" % (bounds[0], miny,
                                                    maxx - full, maxy)
                logger.debug("mapcache seeding command: '%s'. raw: '%s'." %
                             (" ".join(seed_args), seed_args))
                process = subprocess.Popen(seed_args,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)

                out, err = process.communicate()
                for string in (out, err):
                    for line in string.split("\n"):
                        if line != '':
                            logger.info("MapCache output: %s" % line)

            if process.returncode != 0:
                raise SeedException("'%s' failed. Returncode '%d'." %
                                    (seed_command, process.returncode))

    except LockException, e:
        raise SeedException("Seeding failed: %s" % str(e))