Пример #1
0
def remove_browse(browse_model, browse_layer_model, coverage_id, 
                  seed_areas, config=None):
    """ Delete all models and caches associated with browse model. Image itself 
    is not deleted.
    Returns the extent and filename of the replaced image.
    """
    
    # get previous extent to "un-seed" MapCache in that area
    coverage = eoxs_models.Coverage.objects.get(identifier=coverage_id).cast()
    replaced_extent = coverage.extent
    replaced_filename = coverage.data_items.get(
        semantic__startswith="bands"
    ).location

    try:
        height_values_item = coverage.data_items.get(
            semantic__startswith="bands"
        )
        os.remove(height_values_item.location)
    except backends_models.DataItem.DoesNotExist:
        pass

    coverage.delete()
    browse_model.delete()
    
    try:
        time_model = mapcache_models.Time.objects.get(
            start_time__lte=browse_model.start_time,
            end_time__gte=browse_model.end_time,
            source__name=browse_layer_model.id
        )
    except DoesNotExist:
        # issue a warning if no corresponding Time object exists
        logger.warning("No MapCache Time object found for time: %s, %s" % (browse_model.start_time, browse_model.end_time))
    
    # unseed here
    try:
        seed_mapcache(tileset=browse_layer_model.id, grid=browse_layer_model.grid, 
                      minx=time_model.minx, miny=time_model.miny,
                      maxx=time_model.maxx, maxy=time_model.maxy, 
                      minzoom=browse_layer_model.lowest_map_level, 
                      maxzoom=browse_layer_model.highest_map_level,
                      start_time=time_model.start_time,
                      end_time=time_model.end_time,
                      delete=True,
                      **get_mapcache_seed_config(config))
    
    except Exception, e:
        logger.warn("Un-seeding failed: %s" % str(e))
Пример #2
0
    def _handle_file(self, filename, config):
        root = etree.parse(filename)

        start_revision = root.findtext(ns_cfg("startRevision"))
        end_revision = root.findtext(ns_cfg("endRevision"))

        remove_layers_elems = root.xpath(
            "cfg:removeConfiguration/cfg:browseLayers",
            namespaces={"cfg": ns_cfg.uri})
        add_layers_elems = root.xpath("cfg:addConfiguration/cfg:browseLayers",
                                      namespaces={"cfg": ns_cfg.uri})

        add_layers = []
        for layers_elem in add_layers_elems:
            add_layers.extend(decode_browse_layers(layers_elem))

        remove_layers = []
        for layers_elem in remove_layers_elems:
            remove_layers.extend(decode_browse_layers(layers_elem))

        # get the mapcache config xml file path to make it transaction safe
        mapcache_config = get_mapcache_seed_config(config)
        mapcache_xml_filename = mapcache_config["config_file"]

        # transaction safety here
        with FileTransaction((mapcache_xml_filename, ), copy=True):
            with transaction.commit_on_success():
                with transaction.commit_on_success(using="mapcache"):
                    for browse_layer in add_layers:
                        if models.BrowseLayer.objects.filter(
                                id=browse_layer.id).exists():
                            update_browse_layer(browse_layer, config)
                        else:
                            add_browse_layer(browse_layer, config)

                    for browse_layer in remove_layers:
                        delete_browse_layer(browse_layer, config=config)

        # set the new revision
        config = config or get_ngeo_config()

        if not config.has_section("config"):
            config.add_section("config")

        revision = int(safe_get(config, "config", "revision", 0))
        config.set("config", "revision", int(end_revision))

        write_ngeo_config()
Пример #3
0
    def _handle_file(self, filename, config):
        root = etree.parse(filename)

        start_revision = root.findtext(ns_cfg("startRevision"))
        end_revision = root.findtext(ns_cfg("endRevision"))

        remove_layers_elems = root.xpath("cfg:removeConfiguration/cfg:browseLayers", namespaces={"cfg": ns_cfg.uri})
        add_layers_elems = root.xpath("cfg:addConfiguration/cfg:browseLayers", namespaces={"cfg": ns_cfg.uri})

        add_layers = []
        for layers_elem in add_layers_elems:
            add_layers.extend(decode_browse_layers(layers_elem))

        remove_layers = []
        for layers_elem in remove_layers_elems:
            remove_layers.extend(decode_browse_layers(layers_elem))

        # get the mapcache config xml file path to make it transaction safe
        mapcache_config = get_mapcache_seed_config(config)
        mapcache_xml_filename = mapcache_config["config_file"]

        # transaction safety here
        with FileTransaction((mapcache_xml_filename,), copy=True):
            with transaction.commit_on_success():
                with transaction.commit_on_success(using="mapcache"):
                    for browse_layer in add_layers:
                        if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
                            update_browse_layer(browse_layer, config)
                        else:
                            add_browse_layer(browse_layer, config)

                    for browse_layer in remove_layers:
                        delete_browse_layer(browse_layer, config=config)

        # set the new revision
        config = config or get_ngeo_config()

        if not config.has_section("config"):
            config.add_section("config")

        revision = int(safe_get(config, "config", "revision", 0))
        config.set("config", "revision", int(end_revision))

        write_ngeo_config()
Пример #4
0
 def wrapper(*args, **kwargs):
     config = get_ngeo_config()
     mapcache_config = get_mapcache_seed_config(config)
     with FileLock(get_project_relative_path("mapcache.xml.lck")):
         return func(*args, **kwargs)
Пример #5
0
def read_mapcache_xml(config):
    mapcache_config = get_mapcache_seed_config(config)
    mapcache_xml_filename = mapcache_config["config_file"]
    with open(mapcache_xml_filename) as f:
        parser = etree.XMLParser(remove_blank_text=True)
        return etree.parse(f, parser).getroot()
Пример #6
0
def remove_browse(browse_model, browse_layer_model, coverage_id,
                  seed_areas, unseed=True, config=None):
    """ Delete all models and caches associated with browse model. Image itself
    is not deleted.
    Returns the extent and filename of the replaced image.
    """

    # get previous extent to "un-seed" MapCache in that area
    rect_ds = System.getRegistry().getFromFactory(
        "resources.coverages.wrappers.EOCoverageFactory",
        {"obj_id": browse_model.coverage_id}
    )
    replaced_extent = rect_ds.getExtent()
    replaced_filename = rect_ds.getData().getLocation().getPath()

    # delete the EOxServer rectified dataset entry
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.rect_dataset"
        }
    )
    rect_mgr.delete(obj_id=browse_model.coverage_id)
    browse_model.delete()

    # search for time entries with an overlapping time span
    if browse_model.start_time==browse_model.end_time:
        times_qs = mapcache_models.Time.objects.filter(
            source=browse_layer_model.id,
            start_time__lte=browse_model.end_time,
            end_time__gte=browse_model.start_time
        )
    else:
        times_qs = mapcache_models.Time.objects.filter(
            Q(source=browse_layer_model.id),
            Q(start_time__lt=browse_model.end_time,
              end_time__gt=browse_model.start_time) |
            Q(start_time=F("end_time"),
              start_time__lte=browse_model.end_time,
              end_time__gte=browse_model.start_time)
        )

    if len(times_qs) == 1:
        time_model = times_qs[0]
    elif len(times_qs) == 0:
        #issue a warning if no corresponding Time object exists
        logger.warning("No MapCache Time object found for time: %s, %s" % (
            browse_model.start_time, browse_model.end_time
        ))
    elif len(times_qs) > 1:
        #issue a warning if too many corresponding Time objects exist
        #try to delete redundant time models
        #note that this situation should never happen but just in case...
        logger.warning("Multiple MapCache Time objects found for time: %s, "
                       "%s. Trying to delete redundant ones." % (
                       browse_model.start_time, browse_model.end_time
        ))
        first = True
        with transaction.commit_manually(using="mapcache"):
            for time_model_tmp in times_qs:
                if first:
                    first = False
                    time_model = time_model_tmp
                elif (time_model_tmp.start_time <= time_model.start_time and
                      time_model_tmp.end_time >= time_model.end_time):
                    time_model.delete()
                    time_model = time_model_tmp
                else:
                    time_model_tmp.delete()
            transaction.commit(using="mapcache")

    if unseed:
        # unseed here
        try:
            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=time_model.minx, miny=time_model.miny,
                          maxx=time_model.maxx, maxy=time_model.maxy,
                          minzoom=browse_layer_model.lowest_map_level,
                          maxzoom=browse_layer_model.highest_map_level,
                          start_time=time_model.start_time,
                          end_time=time_model.end_time,
                          delete=True,
                          **get_mapcache_seed_config(config))

        except Exception, e:
            logger.warning("Un-seeding failed: %s" % str(e))
Пример #7
0
def ingest_browse_report(parsed_browse_report, do_preprocessing=True, config=None):
    """ Ingests a browse report. reraise_exceptions if errors shall be handled
    externally
    """

    # initialize the EOxServer system/registry/configuration
    System.init()

    try:
        # get the according browse layer
        browse_type = parsed_browse_report.browse_type
        browse_layer = models.BrowseLayer.objects.get(browse_type=browse_type)
    except models.BrowseLayer.DoesNotExist:
        logger.warn("Browse layer with browse type '%s' does not " "exist." % parsed_browse_report.browse_type)
        raise IngestionException(
            "Browse layer with browse type '%s' does not " "exist." % parsed_browse_report.browse_type
        )

    # generate a browse report model
    browse_report = create_browse_report(parsed_browse_report, browse_layer)

    # initialize the preprocessor with configuration values
    crs = None
    if browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleMapsCompatible":
        crs = "EPSG:3857"
    elif browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleCRS84Quad":
        crs = "EPSG:4326"

    logger.debug("Using CRS '%s' ('%s')." % (crs, browse_layer.grid))

    # create the required preprocessor/format selection
    format_selection = get_format_selection("GTiff", **get_format_config(config))
    if do_preprocessing:
        # add config parameters and custom params
        params = get_optimization_config(config)

        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"

        # add band selection
        if browse_layer.r_band is not None and browse_layer.g_band is not None and browse_layer.b_band is not None:

            bands = [
                (browse_layer.r_band, rad_min, rad_max),
                (browse_layer.g_band, rad_min, rad_max),
                (browse_layer.b_band, rad_min, rad_max),
            ]

            if params["bandmode"] == RGBA:
                # RGBA
                bands.append((0, 0, 0))

            params["bands"] = bands

        preprocessor = NGEOPreProcessor(format_selection, crs=crs, **params)
    else:
        preprocessor = None  # TODO: CopyPreprocessor

    report_result = IngestBrowseReportResult()

    succeded = []
    failed = []

    timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")
    browse_dirname = _valid_path(
        "%s_%s_%s_%s"
        % (
            browse_type,
            browse_report.responsible_org_name,
            browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
            timestamp,
        )
    )
    success_dir = join(get_success_dir(config), browse_dirname)
    failure_dir = join(get_failure_dir(config), browse_dirname)

    if exists(success_dir):
        logger.warn("Success directory '%s' already exists.")
    else:
        makedirs(success_dir)
    if exists(failure_dir):
        logger.warn("Failure directory '%s' already exists.")
    else:
        makedirs(failure_dir)

    # iterate over all browses in the browse report
    for parsed_browse in parsed_browse_report:
        # transaction management per browse
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:
                    seed_areas = []
                    # try ingest a single browse and log success
                    result = ingest_browse(
                        parsed_browse,
                        browse_report,
                        browse_layer,
                        preprocessor,
                        crs,
                        success_dir,
                        failure_dir,
                        seed_areas,
                        config=config,
                    )

                    report_result.add(result)
                    succeded.append(parsed_browse)

                    # commit here to allow seeding
                    transaction.commit()
                    transaction.commit(using="mapcache")

                    logger.info("Committed changes to database.")

                    for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                        try:

                            # seed MapCache synchronously
                            # TODO: maybe replace this with an async solution
                            seed_mapcache(
                                tileset=browse_layer.id,
                                grid=browse_layer.grid,
                                minx=minx,
                                miny=miny,
                                maxx=maxx,
                                maxy=maxy,
                                minzoom=browse_layer.lowest_map_level,
                                maxzoom=browse_layer.highest_map_level,
                                start_time=start_time,
                                end_time=end_time,
                                delete=False,
                                **get_mapcache_seed_config(config)
                            )
                            logger.info("Successfully finished seeding.")

                        except Exception, e:
                            logger.warn("Seeding failed: %s" % str(e))

                    # log ingestions for report generation
                    # date/browseType/browseLayerId/start/end
                    report_logger.info(
                        "/\\/\\".join(
                            (
                                datetime.utcnow().isoformat("T") + "Z",
                                parsed_browse_report.browse_type,
                                browse_layer.id,
                                (
                                    parsed_browse.start_time.replace(tzinfo=None) - parsed_browse.start_time.utcoffset()
                                ).isoformat("T")
                                + "Z",
                                (
                                    parsed_browse.end_time.replace(tzinfo=None) - parsed_browse.end_time.utcoffset()
                                ).isoformat("T")
                                + "Z",
                            )
                        )
                    )

                except Exception, e:
                    # report error
                    logger.error("Failure during ingestion of browse '%s'." % parsed_browse.browse_identifier)
                    logger.error("Exception was '%s': %s" % (type(e).__name__, str(e)))
                    logger.debug(traceback.format_exc() + "\n")

                    # undo latest changes, append the failure and continue
                    report_result.add(
                        IngestBrowseFailureResult(
                            parsed_browse.browse_identifier, getattr(e, "code", None) or type(e).__name__, str(e)
                        )
                    )
                    failed.append(parsed_browse)

                    transaction.rollback()
                    transaction.rollback(using="mapcache")
Пример #8
0
def remove_browse(browse_model,
                  browse_layer_model,
                  coverage_id,
                  seed_areas,
                  unseed=True,
                  config=None):
    """ Delete all models and caches associated with browse model. Image itself
    is not deleted.
    Returns the extent and filename of the replaced image.
    """

    # get previous extent to "un-seed" MapCache in that area
    rect_ds = System.getRegistry().getFromFactory(
        "resources.coverages.wrappers.EOCoverageFactory",
        {"obj_id": browse_model.coverage_id})
    replaced_extent = rect_ds.getExtent()
    replaced_filename = rect_ds.getData().getLocation().getPath()

    # delete the EOxServer rectified dataset entry
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={"resources.coverages.interfaces.res_type": "eo.rect_dataset"})
    rect_mgr.delete(obj_id=browse_model.coverage_id)
    browse_model.delete()

    # search for time entries with an overlapping time span
    if browse_model.start_time == browse_model.end_time:
        times_qs = mapcache_models.Time.objects.filter(
            source=browse_layer_model.id,
            start_time__lte=browse_model.end_time,
            end_time__gte=browse_model.start_time)
    else:
        times_qs = mapcache_models.Time.objects.filter(
            Q(source=browse_layer_model.id),
            Q(start_time__lt=browse_model.end_time,
              end_time__gt=browse_model.start_time)
            | Q(start_time=F("end_time"),
                start_time__lte=browse_model.end_time,
                end_time__gte=browse_model.start_time))

    if len(times_qs) == 1:
        time_model = times_qs[0]
    elif len(times_qs) == 0:
        #issue a warning if no corresponding Time object exists
        logger.warning("No MapCache Time object found for time: %s, %s" %
                       (browse_model.start_time, browse_model.end_time))
    elif len(times_qs) > 1:
        #issue a warning if too many corresponding Time objects exist
        #try to delete redundant time models
        #note that this situation should never happen but just in case...
        logger.warning("Multiple MapCache Time objects found for time: %s, "
                       "%s. Trying to delete redundant ones." %
                       (browse_model.start_time, browse_model.end_time))
        first = True
        with transaction.commit_manually(using="mapcache"):
            for time_model_tmp in times_qs:
                if first:
                    first = False
                    time_model = time_model_tmp
                elif (time_model_tmp.start_time <= time_model.start_time
                      and time_model_tmp.end_time >= time_model.end_time):
                    time_model.delete()
                    time_model = time_model_tmp
                else:
                    time_model_tmp.delete()
            transaction.commit(using="mapcache")

    if unseed:
        # unseed here
        try:
            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=time_model.minx,
                          miny=time_model.miny,
                          maxx=time_model.maxx,
                          maxy=time_model.maxy,
                          minzoom=browse_layer_model.lowest_map_level,
                          maxzoom=browse_layer_model.highest_map_level,
                          start_time=time_model.start_time,
                          end_time=time_model.end_time,
                          delete=True,
                          **get_mapcache_seed_config(config))

        except Exception, e:
            logger.warning("Un-seeding failed: %s" % str(e))
Пример #9
0
def write_mapcache_xml(root, config):
    mapcache_config = get_mapcache_seed_config(config)
    mapcache_xml_filename = mapcache_config["config_file"]
    with open(mapcache_xml_filename, "w") as f:
        f.write(etree.tostring(root, pretty_print=True))
Пример #10
0
    def _handle(self, start, end, browse_layer_id, browse_type):
        from ngeo_browse_server.control.queries import remove_browse

        # query the browse layer
        if browse_layer_id:
            try:
                browse_layer_model = BrowseLayer.objects.get(
                    id=browse_layer_id)
            except BrowseLayer.DoesNotExist:
                logger.error("Browse layer '%s' does not exist" %
                             browse_layer_id)
                raise CommandError("Browse layer '%s' does not exist" %
                                   browse_layer_id)
        else:
            try:
                browse_layer_model = BrowseLayer.objects.get(
                    browse_type=browse_type)
            except BrowseLayer.DoesNotExist:
                logger.error("Browse layer with browse type'%s' does "
                             "not exist" % browse_type)
                raise CommandError("Browse layer with browse type'%s' does "
                                   "not exist" % browse_type)

        # get all browses of browse layer
        browses_qs = Browse.objects.all().filter(
            browse_layer=browse_layer_model)

        # apply start/end filter
        if start and not end:
            browses_qs = browses_qs.filter(start_time__gte=start)
        elif end and not start:
            browses_qs = browses_qs.filter(end_time__lte=end)
        elif start and end:
            browses_qs = browses_qs.filter(start_time__gte=start,
                                           end_time__lte=end)

        paths_to_delete = []
        seed_areas = []

        with transaction.commit_on_success():
            with transaction.commit_on_success(using="mapcache"):
                logger.info("Deleting '%d' browse%s from database." %
                            (browses_qs.count(),
                             "s" if browses_qs.count() > 1 else ""))
                # go through all browses to be deleted
                for browse_model in browses_qs:

                    _, filename = remove_browse(browse_model,
                                                browse_layer_model,
                                                browse_model.coverage_id,
                                                seed_areas)

                    paths_to_delete.append(filename)

        # loop through optimized browse images and delete them
        # This is done at this point to make sure a rollback is possible
        # if there is an error while deleting the browses and coverages
        for file_path in paths_to_delete:
            if exists(file_path):
                remove(file_path)
                logger.info("Optimized browse image deleted: %s" % file_path)
            else:
                logger.warning("Optimized browse image to be deleted not found"
                               " in path: %s" % file_path)

        # only if either start or end is present browses are left
        if start or end:
            if start:
                if end:
                    seed_areas = [
                        area for area in seed_areas
                        if not (area[4] >= start and area[5] <= end)
                    ]
                else:
                    seed_areas = [
                        area for area in seed_areas if not (area[4] >= start)
                    ]
            else:
                seed_areas = [
                    area for area in seed_areas if not (area[5] <= end)
                ]

            for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                try:

                    # seed MapCache synchronously
                    # TODO: maybe replace this with an async solution
                    seed_mapcache(tileset=browse_layer_model.id,
                                  grid=browse_layer_model.grid,
                                  minx=minx,
                                  miny=miny,
                                  maxx=maxx,
                                  maxy=maxy,
                                  minzoom=browse_layer_model.lowest_map_level,
                                  maxzoom=browse_layer_model.highest_map_level,
                                  start_time=start_time,
                                  end_time=end_time,
                                  delete=False,
                                  **get_mapcache_seed_config())
                    logger.info("Successfully finished seeding.")

                except Exception, e:
                    logger.warn("Seeding failed: %s" % str(e))
Пример #11
0
             miny = min(miny, time_model.miny)
             maxx = max(maxx, time_model.maxx)
             maxy = max(maxy, time_model.maxy)
             start_time = min(start_time, time_model.start_time)
             end_time = max(end_time, time_model.end_time)
             
             seed_mapcache(tileset=browse_layer_model.id, 
                           grid=browse_layer_model.grid, 
                           minx=time_model.minx, miny=time_model.miny,
                           maxx=time_model.maxx, maxy=time_model.maxy, 
                           minzoom=browse_layer_model.lowest_map_level, 
                           maxzoom=browse_layer_model.highest_map_level,
                           start_time=time_model.start_time,
                           end_time=time_model.end_time,
                           delete=True,
                           **get_mapcache_seed_config(config))
     
         logger.info("Result time span is %s/%s." % (isoformat(start_time),
                                                     isoformat(end_time)))
         times_qs.delete()
     
     time_model = mapcache_models.Time(start_time=start_time, end_time=end_time,
                                       minx=minx, miny=miny, 
                                       maxx=maxx, maxy=maxy,
                                       source=source)
     
     time_model.full_clean()
     time_model.save()
     
     seed_areas.append((minx, miny, maxx, maxy, start_time, end_time))
 
Пример #12
0
def import_browse_report(p, browse_report_file, browse_layer_model, crs,
                         seed_cache_levels, import_cache_levels, config):
    """
    """

    seed_areas = []

    report_result = IngestBrowseReportResult()

    browse_report = decode_browse_report(etree.parse(browse_report_file))
    browse_report_model = create_browse_report(browse_report,
                                               browse_layer_model)
    for browse in browse_report:
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:

                    result = import_browse(p, browse, browse_report_model,
                                           browse_layer_model, crs, seed_areas,
                                           config)
                    report_result.add(result)

                    transaction.commit()
                    transaction.commit(using="mapcache")

                except Exception, e:
                    logger.error("Failure during import of browse '%s'." %
                                 browse.browse_identifier)
                    logger.debug(traceback.format_exc() + "\n")
                    transaction.rollback()
                    transaction.rollback(using="mapcache")

                    report_result.add(IngestBrowseFailureResult(
                        browse.browse_identifier,
                        type(e).__name__, str(e))
                    )

                    continue

        tileset_name = browse_layer_model.id
        dim = isotime(browse.start_time) + "/" + isotime(browse.end_time)
        ts = tileset.open(get_tileset_path(browse_layer_model.browse_type, config), mode="w")

        grid = URN_TO_GRID[browse_layer_model.grid]
        tile_num = 0

        # import cache
        for minzoom, maxzoom in import_cache_levels:
            logger.info("Importing cached tiles from zoom level %d to %d."
                        % (minzoom, maxzoom))

            for x, y, z, f in p.get_cache_files(tileset_name, grid, dim):
                if z < minzoom or z > maxzoom:
                    continue

                ts.add_tile(tileset_name, grid, dim, x, y, z, f)
                tile_num += 1

        logger.info("Imported %d cached tiles." % tile_num)

        # seed cache
        for minzoom, maxzoom in seed_cache_levels:
            logger.info("Re-seeding tile cache from zoom level %d to %d."
                        % (minzoom, maxzoom))

            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=result.extent[0], miny=result.extent[1],
                          maxx=result.extent[2], maxy=result.extent[3],
                          minzoom=minzoom,
                          maxzoom=maxzoom,
                          start_time=result.time_interval[0],
                          end_time=result.time_interval[1],
                          delete=False,
                          **get_mapcache_seed_config(config))

            logger.info("Successfully finished seeding.")
Пример #13
0
def import_browse_report(p, browse_report_file, browse_layer_model, crs,
                         seed_cache_levels, import_cache_levels, config):
    """
    """

    seed_areas = []

    report_result = IngestBrowseReportResult()

    browse_report = decode_browse_report(etree.parse(browse_report_file))
    browse_report_model = create_browse_report(browse_report,
                                               browse_layer_model)
    for browse in browse_report:
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:

                    result = import_browse(p, browse, browse_report_model,
                                           browse_layer_model, crs, seed_areas,
                                           config)
                    report_result.add(result)

                    transaction.commit()
                    transaction.commit(using="mapcache")

                except Exception, e:
                    logger.error("Failure during import of browse '%s'." %
                                 browse.browse_identifier)
                    logger.debug(traceback.format_exc() + "\n")
                    transaction.rollback()
                    transaction.rollback(using="mapcache")

                    report_result.add(
                        IngestBrowseFailureResult(browse.browse_identifier,
                                                  type(e).__name__, str(e)))

                    continue

        tileset_name = browse_layer_model.id
        dim = isotime(browse.start_time) + "/" + isotime(browse.end_time)
        ts = tileset.open(get_tileset_path(browse_layer_model.browse_type,
                                           config),
                          mode="w")

        grid = URN_TO_GRID[browse_layer_model.grid]
        tile_num = 0

        # import cache
        for minzoom, maxzoom in import_cache_levels:
            logger.info("Importing cached tiles from zoom level %d to %d." %
                        (minzoom, maxzoom))

            for x, y, z, f in p.get_cache_files(tileset_name, grid, dim):
                if z < minzoom or z > maxzoom:
                    continue

                ts.add_tile(tileset_name, grid, dim, x, y, z, f)
                tile_num += 1

        logger.info("Imported %d cached tiles." % tile_num)

        # seed cache
        for minzoom, maxzoom in seed_cache_levels:
            logger.info("Re-seeding tile cache from zoom level %d to %d." %
                        (minzoom, maxzoom))

            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=result.extent[0],
                          miny=result.extent[1],
                          maxx=result.extent[2],
                          maxy=result.extent[3],
                          minzoom=minzoom,
                          maxzoom=maxzoom,
                          start_time=result.time_interval[0],
                          end_time=result.time_interval[1],
                          delete=False,
                          **get_mapcache_seed_config(config))

            logger.info("Successfully finished seeding.")
Пример #14
0
    def handle(self, *browse_layer_id, **kwargs):
        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        # check consistency
        if not len(browse_layer_id):
            logger.error("No browse layer given.")
            raise CommandError("No browse layer given.")
        elif len(browse_layer_id) > 1:
            logger.error("Too many browse layers given.")
            raise CommandError("Too many browse layers given.")
        else:
            browse_layer_id = browse_layer_id[0]

        try:
            # get the according browse layer
            browse_layer = models.BrowseLayer.objects.get(id=browse_layer_id)
        except models.BrowseLayer.DoesNotExist:
            logger.error("Browse layer '%s' does not exist." % browse_layer_id)
            raise CommandError("Browse layer '%s' does not exist." %
                               browse_layer_id)

        start = kwargs.get("start")
        end = kwargs.get("end")
        dry_run = kwargs.get("dry_run")
        force = kwargs.get("force")

        # parse start/end if given
        if start:
            start = getDateTime(start)
        if end:
            end = getDateTime(end)

        if force:
            logger.info("Starting reseeding browse layer '%s'." %
                        browse_layer_id)
        else:
            logger.info("Starting seeding browse layer '%s'." %
                        browse_layer_id)

        times_qs = mapcache_models.Time.objects.filter(source=browse_layer.id)

        # apply start/end filter
        if start and not end:
            times_qs = times_qs.filter(start_time__gte=start)
        elif end and not start:
            times_qs = times_qs.filter(end_time__lte=end)
        elif start and end:
            times_qs = times_qs.filter(start_time__gte=start,
                                       end_time__lte=end)

        for time_model in times_qs:

            if dry_run:
                logger.info("Time span to (re)seed is %s/%s." % (isotime(
                    time_model.start_time), isotime(time_model.end_time)))
            else:
                try:
                    logger.info("(Re)seeding time span %s/%s." % (isotime(
                        time_model.start_time), isotime(time_model.end_time)))
                    seed_mapcache(tileset=browse_layer.id,
                                  grid=browse_layer.grid,
                                  minx=time_model.minx,
                                  miny=time_model.miny,
                                  maxx=time_model.maxx,
                                  maxy=time_model.maxy,
                                  minzoom=browse_layer.lowest_map_level,
                                  maxzoom=browse_layer.highest_map_level,
                                  start_time=time_model.start_time,
                                  end_time=time_model.end_time,
                                  delete=False,
                                  force=force,
                                  **get_mapcache_seed_config())
                    logger.info("Successfully finished (re)seeding time span.")
                except Exception, e:
                    logger.warn("(Re)seeding failed: %s" % str(e))
Пример #15
0
def config(request):
    try:
        status = get_status()
        config = get_ngeo_config()

        if request.method not in ("PUT", "POST"):
            raise Exception("Invalid request method '%s'." % request.method)

        if request.method == "POST":
            # "setting" new configuration, which means removing the previous one.
            action = "set"
        else:
            action = "update"

        root = etree.parse(request)

        start_revision = root.findtext(ns_cfg("startRevision"))
        end_revision = root.findtext(ns_cfg("endRevision"))

        # TODO: check current and last revision

        remove_layers_elems = root.xpath(
            "cfg:removeConfiguration/cfg:browseLayers",
            namespaces={"cfg": ns_cfg.uri})
        add_layers_elems = root.xpath("cfg:addConfiguration/cfg:browseLayers",
                                      namespaces={"cfg": ns_cfg.uri})

        add_layers = []
        for layers_elem in add_layers_elems:
            add_layers.extend(decode_browse_layers(layers_elem))

        remove_layers = []
        for layers_elem in remove_layers_elems:
            remove_layers.extend(decode_browse_layers(layers_elem))

        # get the mapcache config xml file path to make it transaction safe

        mapcache_config = get_mapcache_seed_config(config)
        mapcache_xml_filename = mapcache_config["config_file"]

        # transaction safety here
        with FileTransaction((mapcache_xml_filename, ), copy=True):
            with transaction.commit_on_success():
                with transaction.commit_on_success(using="mapcache"):
                    for browse_layer in add_layers:
                        if models.BrowseLayer.objects.filter(
                                id=browse_layer.id).exists():
                            update_browse_layer(browse_layer, config)
                        else:
                            add_browse_layer(browse_layer, config)

                    for browse_layer in remove_layers:
                        delete_browse_layer(browse_layer, config=config)

        # set the new revision
        config = get_ngeo_config()

        if not config.has_section("config"):
            config.add_section("config")

        revision = int(safe_get(config, "config", "revision", 0))
        config.set("config", "revision", int(end_revision))

        write_ngeo_config()

        # return with the new revision
        return HttpResponse(
            '<?xml version="1.0"?>\n'
            '<synchronizeConfigurationResponse>%s</synchronizeConfigurationResponse>'
            % end_revision)

    except Exception, e:
        logger.error("%s: %s" % (type(e).__name__, str(e)))
        logger.debug(traceback.format_exc())

        return HttpResponse('<faultcode>ConfigurationError</faultcode>\n'
                            '<faultstring>%s</faultstring>' % str(e),
                            status=400)
Пример #16
0
def ingest_browse_report(parsed_browse_report, do_preprocessing=True, config=None):
    """ Ingests a browse report. reraise_exceptions if errors shall be handled
    externally
    """

    # initialize the EOxServer system/registry/configuration
    System.init()

    try:
        # get the according browse layer
        browse_type = parsed_browse_report.browse_type
        browse_layer = models.BrowseLayer.objects.get(browse_type=browse_type)
    except models.BrowseLayer.DoesNotExist:
        logger.warn("Browse layer with browse type '%s' does not "
                    "exist." % parsed_browse_report.browse_type)
        raise IngestionException("Browse layer with browse type '%s' does not "
                                 "exist." % parsed_browse_report.browse_type)

    # generate a browse report model
    browse_report = create_browse_report(parsed_browse_report, browse_layer)

    # initialize the preprocessor with configuration values
    crs = None
    if browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleMapsCompatible":
        crs = "EPSG:3857"
    elif browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleCRS84Quad":
        crs = "EPSG:4326"

    logger.debug("Using CRS '%s' ('%s')." % (crs, browse_layer.grid))

    # create the required preprocessor/format selection
    format_selection = get_format_selection("GTiff",
                                            **get_format_config(config))
    if do_preprocessing:
        # add config parameters and custom params
        params = get_optimization_config(config)

        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"

        # add band selection
        if (browse_layer.r_band is not None and
            browse_layer.g_band is not None and
            browse_layer.b_band is not None):

            bands = [(browse_layer.r_band, rad_min, rad_max),
                     (browse_layer.g_band, rad_min, rad_max),
                     (browse_layer.b_band, rad_min, rad_max)]

            if params["bandmode"] == RGBA:
                # RGBA
                bands.append((0, 0, 0))

            params["bands"] = bands

        preprocessor = NGEOPreProcessor(format_selection, crs=crs, **params)
    else:
        preprocessor = None # TODO: CopyPreprocessor

    report_result = IngestBrowseReportResult()

    succeded = []
    failed = []

    timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")
    browse_dirname = _valid_path("%s_%s_%s_%s" % (
        browse_type, browse_report.responsible_org_name,
        browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
        timestamp
    ))
    success_dir = join(get_success_dir(config), browse_dirname)
    failure_dir = join(get_failure_dir(config), browse_dirname)

    if exists(success_dir):
        logger.warn("Success directory '%s' already exists.")
    else:
        makedirs(success_dir)
    if exists(failure_dir):
        logger.warn("Failure directory '%s' already exists.")
    else:
        makedirs(failure_dir)

    # iterate over all browses in the browse report
    for parsed_browse in parsed_browse_report:
        # transaction management per browse
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:
                    seed_areas = []
                    # try ingest a single browse and log success
                    result = ingest_browse(parsed_browse, browse_report,
                                           browse_layer, preprocessor, crs,
                                           success_dir, failure_dir,
                                           seed_areas, config=config)

                    report_result.add(result)
                    succeded.append(parsed_browse)

                    # commit here to allow seeding
                    transaction.commit()
                    transaction.commit(using="mapcache")

                    logger.info("Committed changes to database.")

                    for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                        try:

                            # seed MapCache synchronously
                            # TODO: maybe replace this with an async solution
                            seed_mapcache(tileset=browse_layer.id,
                                          grid=browse_layer.grid,
                                          minx=minx, miny=miny,
                                          maxx=maxx, maxy=maxy,
                                          minzoom=browse_layer.lowest_map_level,
                                          maxzoom=browse_layer.highest_map_level,
                                          start_time=start_time,
                                          end_time=end_time,
                                          delete=False,
                                          **get_mapcache_seed_config(config))
                            logger.info("Successfully finished seeding.")

                        except Exception, e:
                            logger.warn("Seeding failed: %s" % str(e))

                    # log ingestions for report generation
                    # date/browseType/browseLayerId/start/end
                    report_logger.info("/\\/\\".join((
                        datetime.utcnow().isoformat("T") + "Z",
                        parsed_browse_report.browse_type,
                        browse_layer.id,
                        (parsed_browse.start_time.replace(tzinfo=None)-parsed_browse.start_time.utcoffset()).isoformat("T") + "Z",
                        (parsed_browse.end_time.replace(tzinfo=None)-parsed_browse.end_time.utcoffset()).isoformat("T") + "Z"
                    )))

                except Exception, e:
                    # report error
                    logger.error("Failure during ingestion of browse '%s'." %
                                 parsed_browse.browse_identifier)
                    logger.error("Exception was '%s': %s" % (type(e).__name__, str(e)))
                    logger.debug(traceback.format_exc() + "\n")

                    # undo latest changes, append the failure and continue
                    report_result.add(IngestBrowseFailureResult(
                        parsed_browse.browse_identifier,
                        getattr(e, "code", None) or type(e).__name__, str(e))
                    )
                    failed.append(parsed_browse)

                    transaction.rollback()
                    transaction.rollback(using="mapcache")
Пример #17
0
def write_mapcache_xml(root, config):
    mapcache_config = get_mapcache_seed_config(config)
    mapcache_xml_filename = mapcache_config["config_file"]
    with open(mapcache_xml_filename, "w") as f:
        f.write(etree.tostring(root, pretty_print=True))
Пример #18
0
def config(request):
    try:
        status = get_status()
        config = get_ngeo_config()

        if request.method not in ("PUT", "POST"):
            raise Exception("Invalid request method '%s'." % request.method)

        if request.method == "POST":
            # "setting" new configuration, which means removing the previous one.
            action = "set"
        else:
            action = "update"

        root = etree.parse(request)

        start_revision = root.findtext(ns_cfg("startRevision"))
        end_revision = root.findtext(ns_cfg("endRevision"))

        # TODO: check current and last revision

        remove_layers_elems = root.xpath("cfg:removeConfiguration/cfg:browseLayers", namespaces={"cfg": ns_cfg.uri})
        add_layers_elems = root.xpath("cfg:addConfiguration/cfg:browseLayers", namespaces={"cfg": ns_cfg.uri})

        add_layers = []
        for layers_elem in add_layers_elems:
            add_layers.extend(decode_browse_layers(layers_elem))

        remove_layers = []
        for layers_elem in remove_layers_elems:
            remove_layers.extend(decode_browse_layers(layers_elem))

        # get the mapcache config xml file path to make it transaction safe

        mapcache_config = get_mapcache_seed_config(config)
        mapcache_xml_filename = mapcache_config["config_file"]

        # transaction safety here
        with FileTransaction((mapcache_xml_filename,), copy=True):
            with transaction.commit_on_success():
                with transaction.commit_on_success(using="mapcache"):
                    for browse_layer in add_layers:
                        if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
                            update_browse_layer(browse_layer, config)
                        else:
                            add_browse_layer(browse_layer, config)

                    for browse_layer in remove_layers:
                        delete_browse_layer(browse_layer, config)

        # set the new revision
        config = get_ngeo_config()

        if not config.has_section("config"):
            config.add_section("config")

        revision = int(safe_get(config, "config", "revision", 0))
        config.set("config", "revision", end_revision)

        write_ngeo_config()

        # return with the new revision
        return HttpResponse('<?xml version="1.0"?>\n'
            '<synchronizeConfigurationResponse>%s</synchronizeConfigurationResponse>'
            % end_revision
        )

    except Exception, e:
        logger.error("%s: %s" % (type(e).__name__, str(e)))
        logger.debug(traceback.format_exc())

        return HttpResponse(
            '<faultcode>ConfigurationError</faultcode>\n'
            '<faultstring>%s</faultstring>' % str(e), status=400
        )
Пример #19
0
    def _handle(self, start, end, browse_layer_id, browse_type):
        from ngeo_browse_server.control.queries import remove_browse

        # query the browse layer
        if browse_layer_id:
            try:
                browse_layer_model = BrowseLayer.objects.get(
                    id=browse_layer_id
                )
            except BrowseLayer.DoesNotExist:
                logger.error(
                    "Browse layer '%s' does not exist" % browse_layer_id
                )
                raise CommandError(
                    "Browse layer '%s' does not exist" % browse_layer_id
                )
        else:
            try:
                browse_layer_model = BrowseLayer.objects.get(
                    browse_type=browse_type
                )
            except BrowseLayer.DoesNotExist:
                logger.error("Browse layer with browse type'%s' does "
                             "not exist" % browse_type)
                raise CommandError("Browse layer with browse type'%s' does "
                                   "not exist" % browse_type)

        # get all browses of browse layer
        browses_qs = Browse.objects.all().filter(
            browse_layer=browse_layer_model
        )

        # apply start/end filter
        if start and not end:
            browses_qs = browses_qs.filter(start_time__gte=start)
        elif end and not start:
            browses_qs = browses_qs.filter(end_time__lte=end)
        elif start and end:
            browses_qs = browses_qs.filter(
                start_time__gte=start, end_time__lte=end
            )

        paths_to_delete = []
        seed_areas = []

        with transaction.commit_on_success():
            with transaction.commit_on_success(using="mapcache"):
                logger.info("Deleting '%d' browse%s from database."
                            % (browses_qs.count(),
                               "s" if browses_qs.count() > 1 else ""))
                # go through all browses to be deleted
                for browse_model in browses_qs:

                    _, filename = remove_browse(
                        browse_model, browse_layer_model,
                        browse_model.coverage_id, seed_areas
                    )

                    paths_to_delete.append(filename)

        # loop through optimized browse images and delete them
        # This is done at this point to make sure a rollback is possible
        # if there is an error while deleting the browses and coverages
        for file_path in paths_to_delete:
            if exists(file_path):
                remove(file_path)
                logger.info("Optimized browse image deleted: %s" % file_path)
            else:
                logger.warning("Optimized browse image to be deleted not found"
                               " in path: %s" % file_path)

        # only if either start or end is present browses are left
        if start or end:
            if start:
                if end:
                    seed_areas = [
                        area for area in seed_areas
                        if not (area[4] >= start and area[5] <= end)
                    ]
                else:
                    seed_areas = [
                        area for area in seed_areas if not (area[4] >= start)
                    ]
            else:
                seed_areas = [
                    area for area in seed_areas if not (area[5] <= end)
                ]

            for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                try:

                    # seed MapCache synchronously
                    # TODO: maybe replace this with an async solution
                    seed_mapcache(tileset=browse_layer_model.id,
                                  grid=browse_layer_model.grid,
                                  minx=minx, miny=miny,
                                  maxx=maxx, maxy=maxy,
                                  minzoom=browse_layer_model.lowest_map_level,
                                  maxzoom=browse_layer_model.highest_map_level,
                                  start_time=start_time,
                                  end_time=end_time,
                                  delete=False,
                                  **get_mapcache_seed_config())
                    logger.info("Successfully finished seeding.")

                except Exception, e:
                    logger.warn("Seeding failed: %s" % str(e))
Пример #20
0
def ingest_browse_report(parsed_browse_report, do_preprocessing=True, config=None):
    """ Ingests a browse report. reraise_exceptions if errors shall be handled 
    externally
    """
    
    try:
        # get the according browse layer
        browse_type = parsed_browse_report.browse_type
        browse_layer = models.BrowseLayer.objects.get(browse_type=browse_type)
    except models.BrowseLayer.DoesNotExist:
        raise IngestionException("Browse layer with browse type '%s' does not "
                                 "exist." % parsed_browse_report.browse_type)
    
    # generate a browse report model
    browse_report = create_browse_report(parsed_browse_report, browse_layer)
    
    # initialize the preprocessor with configuration values
    crs = None
    if browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleMapsCompatible":
        crs = "EPSG:3857"
    elif browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleCRS84Quad":
        crs = "EPSG:4326"
        
    logger.debug("Using CRS '%s' ('%s')." % (crs, browse_layer.grid))
    
    # create the required preprocessor/format selection
    format_selection = get_format_selection("GTiff",
                                            **get_format_config(config))

    if do_preprocessing and not browse_layer.contains_vertical_curtains \
        and not browse_layer.contains_volumes:
        # add config parameters and custom params
        params = get_optimization_config(config)
        
        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"
        
        # add band selection
        if (browse_layer.r_band is not None and 
            browse_layer.g_band is not None and 
            browse_layer.b_band is not None):
            
            bands = [(browse_layer.r_band, rad_min, rad_max), 
                     (browse_layer.g_band, rad_min, rad_max), 
                     (browse_layer.b_band, rad_min, rad_max)]
            
            if params["bandmode"] == RGBA:
                # RGBA
                bands.append((0, 0, 0))
            
            params["bands"] = bands
        
        preprocessor = NGEOPreProcessor(format_selection, crs=crs, **params)

    elif browse_layer.contains_vertical_curtains:

        logger.info("Preparing Vertical Curtain Pre-Processor")

        params = {}

        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"

        preprocessor = VerticalCurtainPreprocessor(**params)

    elif browse_layer.contains_volumes:
        preprocessor = VolumePreProcessor()

    else:
        preprocessor = None # TODO: CopyPreprocessor
    
    report_result = IngestBrowseReportResult()
    
    succeded = []
    failed = []
    
    timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")
    browse_dirname = _valid_path("%s_%s_%s_%s" % (
        browse_type, browse_report.responsible_org_name,
        browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
        timestamp
    ))
    success_dir = join(get_success_dir(config), browse_dirname)
    failure_dir = join(get_failure_dir(config), browse_dirname)
    
    if exists(success_dir): 
        logger.warn("Success directory '%s' already exists.")
    else:
        makedirs(success_dir)
    if exists(failure_dir): 
        logger.warn("Failure directory '%s' already exists.")
    else:
        makedirs(failure_dir)
    
    # iterate over all browses in the browse report
    for parsed_browse in parsed_browse_report:
        # transaction management per browse
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:
                    seed_areas = []
                    # try ingest a single browse and log success
                    result = ingest_browse(parsed_browse, browse_report,
                                           browse_layer, preprocessor, crs,
                                           success_dir, failure_dir,
                                           seed_areas, config=config)
                    
                    report_result.add(result)
                    succeded.append(parsed_browse)
                    
                    # commit here to allow seeding
                    transaction.commit() 
                    transaction.commit(using="mapcache")
                    
                    
                    logger.info("Commited changes to database.")

                    if not browse_layer.contains_vertical_curtains and not browse_layer.contains_volumes:
                    
                        for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                            try:
                                
                                # seed MapCache synchronously
                                # TODO: maybe replace this with an async solution
                                seed_mapcache(tileset=browse_layer.id, 
                                              grid=browse_layer.grid, 
                                              minx=minx, miny=miny, 
                                              maxx=maxx, maxy=maxy, 
                                              minzoom=browse_layer.lowest_map_level, 
                                              maxzoom=browse_layer.highest_map_level,
                                              start_time=start_time,
                                              end_time=end_time,
                                              delete=False,
                                              **get_mapcache_seed_config(config))
                                logger.info("Successfully finished seeding.")
                                
                            except Exception, e:
                                logger.warn("Seeding failed: %s" % str(e))
                    
                    elif not browse_layer.contains_volumes:

                        host = "http://localhost/browse/ows"

                        level_0_num_tiles_y = 2  # rows
                        level_0_num_tiles_x = 4  # cols

                        seed_level = range(browse_layer.lowest_map_level, browse_layer.highest_map_level)

                        for tileLevel in seed_level:

                            tiles_x = level_0_num_tiles_x * pow(2, tileLevel);
                            tiles_y = level_0_num_tiles_y * pow(2, tileLevel)

                            #find which tiles are crossed by extent
                            tile_width = 360 / (tiles_x)
                            tile_height = 180 / (tiles_y)

                            coverage = eoxs_models.Coverage.objects.get(identifier=result.identifier)

                            #cycle through tiles
                            for col in range(tiles_x):
                                for row in range(tiles_y):

                                    west = -180 + (col * tile_width)
                                    east = west + tile_width
                                    north = 90 - (row * tile_height)
                                    south = north - tile_height

                                    if (coverage.footprint.intersects(Polygon.from_bbox( (west,south,east,north) ))):

                                        try:
                                            # NOTE: The MeshFactory ignores time
                                            time = (isoformat(result.time_interval[0]) + "/" + isoformat(result.time_interval[1]))
                                            
                                            baseurl = host + '?service=W3DS&request=GetTile&version=1.0.0&crs=EPSG:4326&layer={0}&style=default&format=model/gltf'.format(browse_layer.id)
                                            url = '{0}&tileLevel={1}&tilecol={2}&tilerow={3}&time={4}'.format(baseurl, tileLevel, col, row, time)

                                            logger.info('Seeding call to URL: %s' % (url,))

                                            response = urllib2.urlopen(url)
                                            response.close()

                                        except Exception, e:
                                            logger.warn("Seeding failed: %s" % str(e))

                        transaction.commit() 

                    else:
                        pass
Пример #21
0
 def wrapper(*args, **kwargs):
     config = get_ngeo_config()
     mapcache_config = get_mapcache_seed_config(config)
     with FileLock(get_project_relative_path("mapcache.xml.lck")):
         return func(*args, **kwargs)
Пример #22
0
            maxy = max(maxy, time_model.maxy)
            start_time = min(start_time, time_model.start_time)
            end_time = max(end_time, time_model.end_time)

            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=time_model.minx,
                          miny=time_model.miny,
                          maxx=time_model.maxx,
                          maxy=time_model.maxy,
                          minzoom=browse_layer_model.lowest_map_level,
                          maxzoom=browse_layer_model.highest_map_level,
                          start_time=time_model.start_time,
                          end_time=time_model.end_time,
                          delete=True,
                          **get_mapcache_seed_config(config))

        logger.info("Result time span is %s/%s." %
                    (isotime(start_time), isotime(end_time)))
        times_qs.delete()

    time_model = mapcache_models.Time(start_time=start_time,
                                      end_time=end_time,
                                      minx=minx,
                                      miny=miny,
                                      maxx=maxx,
                                      maxy=maxy,
                                      source=source)

    time_model.full_clean()
    time_model.save()
Пример #23
0
def read_mapcache_xml(config):
    mapcache_config = get_mapcache_seed_config(config)
    mapcache_xml_filename = mapcache_config["config_file"]
    with open(mapcache_xml_filename) as f:
        parser = etree.XMLParser(remove_blank_text=True)
        return etree.parse(f, parser).getroot()
Пример #24
0
    def handle(self, *browse_layer_id, **kwargs):
        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        # check consistency
        if not len(browse_layer_id):
            logger.error("No browse layer given.")
            raise CommandError("No browse layer given.")
        elif len(browse_layer_id) > 1:
            logger.error("Too many browse layers given.")
            raise CommandError("Too many browse layers given.")
        else:
            browse_layer_id = browse_layer_id[0]

        try:
            # get the according browse layer
            browse_layer = models.BrowseLayer.objects.get(id=browse_layer_id)
        except models.BrowseLayer.DoesNotExist:
            logger.error("Browse layer '%s' does not exist."
                         % browse_layer_id)
            raise CommandError("Browse layer '%s' does not exist."
                               % browse_layer_id)

        start = kwargs.get("start")
        end = kwargs.get("end")
        dry_run = kwargs.get("dry_run")
        force = kwargs.get("force")

        # parse start/end if given
        if start:
            start = getDateTime(start)
        if end:
            end = getDateTime(end)

        if force:
            logger.info("Starting reseeding browse layer '%s'." % browse_layer_id)
        else:
            logger.info("Starting seeding browse layer '%s'." % browse_layer_id)

        times_qs = mapcache_models.Time.objects.filter(
            source=browse_layer.id
        )

        # apply start/end filter
        if start and not end:
            times_qs = times_qs.filter(start_time__gte=start)
        elif end and not start:
            times_qs = times_qs.filter(end_time__lte=end)
        elif start and end:
            times_qs = times_qs.filter(start_time__gte=start,
                                       end_time__lte=end)

        for time_model in times_qs:

            if dry_run:
                logger.info("Time span to (re)seed is %s/%s."
                            % (isotime(time_model.start_time),
                               isotime(time_model.end_time)))
            else:
                try:
                    logger.info("(Re)seeding time span %s/%s."
                                % (isotime(time_model.start_time),
                                   isotime(time_model.end_time)))
                    seed_mapcache(tileset=browse_layer.id,
                                  grid=browse_layer.grid,
                                  minx=time_model.minx, miny=time_model.miny,
                                  maxx=time_model.maxx, maxy=time_model.maxy,
                                  minzoom=browse_layer.lowest_map_level,
                                  maxzoom=browse_layer.highest_map_level,
                                  start_time=time_model.start_time,
                                  end_time=time_model.end_time,
                                  delete=False, force=force,
                                  **get_mapcache_seed_config())
                    logger.info("Successfully finished (re)seeding time span.")
                except Exception, e:
                    logger.warn("(Re)seeding failed: %s" % str(e))