Example #1
0
    def handle(self, *filenames, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse layer configuration from command line.")

        if not filenames:
            raise CommandError("No input files provided.")

        on_error = kwargs["on_error"]

        config = get_ngeo_config()

        no_files_handled_success = 0
        no_files_handled_error = 0
        # handle each file separately
        for filename in filenames:
            try:
                # handle each browse layer xml
                self._handle_file(filename, config)
                no_files_handled_success += 1
            except Exception, e:
                # handle exceptions
                no_files_handled_error += 1
                logger.error("%s: %s" % (type(e).__name__, str(e)))
                if on_error == "continue":
                    # continue the execution with the next file
                    continue
                elif on_error == "stop":
                    # re-raise the exception to stop the execution
                    raise CommandError(e)
Example #2
0
 def _get_containers(self, params):
     containers = params.get("container_ids", [])
     wrappers = []
     
     for obj_id in containers:
         wrapper = System.getRegistry().getFromFactory(
             "resources.coverages.wrappers.DatasetSeriesFactory",
             {"obj_id": obj_id}
         )
         
         if not wrapper:
             wrapper = System.getRegistry().getFromFactory(
                 "resources.coverages.wrappers.EOCoverageFactory", {
                     "impl_id":"resources.coverages.wrappers.RectifiedStitchedMosaicWrapper",
                     "obj_id": obj_id
                 }
             )
         
         if not wrapper:
             raise InternalError(
                 "Dataset Series or Rectified Stitched Mosaic with ID %s not found." % obj_id
             ) 
         
         wrappers.append(wrapper)
     
     return wrappers 
Example #3
0
    def handle(self, *args, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse deletion from command line.")

        browse_layer_id = kwargs.get("browse_layer_id")
        browse_type = kwargs.get("browse_type")
        if not browse_layer_id and not browse_type:
            logger.error("No browse layer or browse type was specified.")
            raise CommandError("No browse layer or browse type was specified.")
        elif browse_layer_id and browse_type:
            logger.error("Both browse layer and browse type were specified.")
            raise CommandError(
                "Both browse layer and browse type were specified."
            )

        start = kwargs.get("start")
        end = kwargs.get("end")

        # parse start/end if given
        if start:
            start = getDateTime(start)
        if end:
            end = getDateTime(end)

        self._handle(start, end, browse_layer_id, browse_type)

        logger.info("Successfully finished browse deletion from command line.")
Example #4
0
    def handle(self, *args, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse deletion from command line.")

        browse_layer_id = kwargs.get("browse_layer_id")
        browse_type = kwargs.get("browse_type")
        if not browse_layer_id and not browse_type:
            logger.error("No browse layer or browse type was specified.")
            raise CommandError("No browse layer or browse type was specified.")
        elif browse_layer_id and browse_type:
            logger.error("Both browse layer and browse type were specified.")
            raise CommandError(
                "Both browse layer and browse type were specified.")

        start = kwargs.get("start")
        end = kwargs.get("end")

        # parse start/end if given
        if start:
            start = getDateTime(start)
        if end:
            end = getDateTime(end)

        self._handle(start, end, browse_layer_id, browse_type)

        logger.info("Successfully finished browse deletion from command line.")
Example #5
0
    def handle(self, *filenames, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse layer configuration from command line.")

        if not filenames:
            raise CommandError("No input files provided.")

        on_error = kwargs["on_error"]

        config = get_ngeo_config()

        no_files_handled_success = 0
        no_files_handled_error = 0
        # handle each file separately
        for filename in filenames:
            try:
                # handle each browse layer xml
                self._handle_file(filename, config)
                no_files_handled_success += 1
            except Exception, e:
                # handle exceptions
                no_files_handled_error += 1
                logger.error("%s: %s" % (type(e).__name__, str(e)))
                if on_error == "continue":
                    # continue the execution with the next file
                    continue
                elif on_error == "stop":
                    # re-raise the exception to stop the execution
                    raise CommandError(e)
Example #6
0
 def __init__(self):
     super(RectifiedStitchedMosaicManager, self).__init__()
     
     self.data_source_factory = System.getRegistry().bind(
         "resources.coverages.data.DataSourceFactory"
     )
     
     self.tile_index_factory = System.getRegistry().bind(
         "resources.coverages.data.TileIndexFactory"
     )
Example #7
0
 def __init__(self):
     self.id_factory = self._get_id_factory()
     
     self.location_factory = System.getRegistry().bind(
         "backends.factories.LocationFactory"
     )
     
     self.data_package_factory = System.getRegistry().bind(
         "resources.coverages.data.DataPackageFactory"
     )
Example #8
0
 def getDatasetById(self, cid):
     """ Convenience method to get a coverage by its ID.
     """
     return System.getRegistry().getFromFactory(
         "resources.coverages.wrappers.EOCoverageFactory",
         {"obj_id": cid}
     )
Example #9
0
 def isRequestConfigEnabled(self, config_key, default=False):
     value = System.getConfig().getConfigValue("testing", config_key)
     if value is None:
         return default
     elif value.lower() in ("yes", "y", "true", "on"):
         return True
     elif value.lower() in ("no", "n", "false", "off"):
         return False
     else:
         return default
Example #10
0
 def findDatasetsByFilters(self, *filters):
     """ Convenience method to get a list of coverages by given filter
     expressions.
     """
     filter_exprs = [
         System.getRegistry().getFromFactory(
             factory_id = "resources.coverages.filters.CoverageExpressionFactory",
             params = filter_expr
         )
         for filter_expr in filters
     ]
     
     return System.getRegistry().bind(
         "resources.coverages.wrappers.EOCoverageFactory"
     ).find(
         impl_ids = [
             "resources.coverages.wrappers.RectifiedDatasetWrapper",
             "resources.coverages.wrappers.ReferenceableDatasetWrapper"
         ],
         filter_exprs = filter_exprs
     )
Example #11
0
 def getManager(self, mgrtype=None, intf_id=None):
     if mgrtype is None:
         mgrtype = self.getType()
     if intf_id is None:
         intf_id = self.getInterfaceID()
     
     return System.getRegistry().findAndBind(
         intf_id=intf_id,
         params={
             "resources.coverages.interfaces.res_type": mgrtype
         }
     )
Example #12
0
 def _get_coverages(self, params):
     coverages = params.get("coverages", [])
     
     coverage_factory = System.getRegistry().bind(
         "resources.coverages.wrappers.EOCoverageFactory"
     )
     for cid in params.get("coverage_ids", []):
         coverage = coverage_factory.get(obj_id=cid)
         if not coverage:
             raise NoSuchCoverageException(cid)
         coverages.append(coverage)
     
     return coverages
Example #13
0
    def handle(self, *args, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse layer purging from command line.")

        browse_layer_id = kwargs.get("browse_layer_id")
        browse_type = kwargs.get("browse_type")
        if not browse_layer_id and not browse_type:
            logger.error("No browse layer or browse type was specified.")
            raise CommandError("No browse layer or browse type was specified.")
        elif browse_layer_id and browse_type:
            logger.error("Both browse layer and browse type were specified.")
            raise CommandError(
                "Both browse layer and browse type were specified.")

        self._handle(browse_layer_id, browse_type)
        logger.info(
            "Successfully finished browse layer purging from command line.")
Example #14
0
    def handle(self, *args, **kwargs):
        System.init()
        
        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse import from command line.")

        package_paths = args
        if not len(package_paths):
            logger.error("No packages given.")
            raise CommandError("No packages given.")
        
        ignore_cache = kwargs["ignore_cache"]
        
        config = get_ngeo_config()

        for package_path in package_paths:
            result = import_package(package_path, ignore_cache, config)

        logger.info("Successfully finished browse import from command line.")
Example #15
0
    def handle(self, *args, **kwargs):
        System.init()

        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse layer purging from command line.")

        browse_layer_id = kwargs.get("browse_layer_id")
        browse_type = kwargs.get("browse_type")
        if not browse_layer_id and not browse_type:
            logger.error("No browse layer or browse type was specified.")
            raise CommandError("No browse layer or browse type was specified.")
        elif browse_layer_id and browse_type:
            logger.error("Both browse layer and browse type were specified.")
            raise CommandError(
                "Both browse layer and browse type were specified."
            )

        self._handle(browse_layer_id, browse_type)
        logger.info("Successfully finished browse layer purging from command line.")
Example #16
0
 def reserve(self, coverage_id, request_id=None, until=None):
     """
     Tries to reserve a ``coverage_id`` until a given datetime. If ``until``
     is omitted, the configuration value 
     ``resources.coverages.coverage_id.reservation_time`` is used.
     
     If the ID is already reserved and the ``resource_id`` is not equal to
     the reserved ``resource_id``, a :class:`~.CoverageIdReservedError` is
     raised. If the ID is already taken by an existing coverage a 
     :class:`~.CoverageIdInUseError` is raised.
     These exceptions are sub-classes of :exc:`~.CoverageIdError`.
     """
     
     obj, _ = ReservedCoverageIdRecord.objects.get_or_create(
         coverage_id=coverage_id,
         defaults={
             "until": timezone.now()
         }
     )
     
     if not until:
         values = System.getConfig().getConfigValue(
             "resources.coverages.coverage_id", "reservation_time"
         ).split(":")
         
         for _ in xrange(len(values[:4]) - 4):
             values.insert(0, 0)
         
         dt = timedelta(days=int(values[0]), hours=int(values[1]),
                        minutes=int(values[2]), seconds=int(values[3]))
         until = timezone.now() + dt
     
     if timezone.now() < obj.until:
         if not (obj.request_id == request_id and obj.request_id is not None):
             raise CoverageIdReservedError(
                 "Coverage ID '%s' is reserved until %s" % (coverage_id, obj.until)
             )
     elif CoverageRecord.objects.filter(coverage_id=coverage_id).count() > 0:
         raise CoverageIdInUseError("Coverage ID '%s' is already in use."
             % coverage_id
         )
     
     obj.request_id = request_id
     obj.until = until
     obj.save()
Example #17
0
 def _get_containers(self, params):
     containers = params.get("container_ids", [])
     wrappers = []
     
     for obj_id in containers:
         wrapper = System.getRegistry().getFromFactory(
             "resources.coverages.wrappers.DatasetSeriesFactory",
             {"obj_id": obj_id}
         )
         
         if not wrapper:
             raise InternalError(
                 "Dataset Series or ID %s not found." % obj_id
             ) 
         
         wrappers.append(wrapper)
     
     return wrappers 
Example #18
0
def create_temporary_rectified_vrt(path_or_ds, srid=None,
    resample=gdal.GRA_NearestNeighbour, memory_limit=0.0,
    max_error=APPROX_ERR_TOL, method=METHOD_GCP, order=0):

    try:
        from eoxserver.core.system import System
        vrt_tmp_dir = System.getConfig().getConfigValue("processing.gdal.reftools", "vrt_tmp_dir")
    except: vrt_tmp_dir = None
    
    _, vrt_path = mkstemp(
        dir = vrt_tmp_dir,
        suffix = ".vrt"
    )
    
    create_rectified_vrt(path_or_ds, vrt_path, srid, 
        resample, memory_limit, max_error, 
        method, order)
    
    return vrt_path
Example #19
0
def getAllowedActions():

    global ALLOWED_ACTIONS

    if (ALLOWED_ACTIONS is not None): return ALLOWED_ACTIONS

    ACTIONS_UPPER = map(lambda s: s.upper(), ACTIONS)
    ACTIONS_U2N = dict(zip(ACTIONS_UPPER, ACTIONS))

    # unpack the allowed actions
    conf = System.getConfig()
    tmp = conf.getConfigValue("services.ows.wcst11", "allowed_actions")
    tmp = map(lambda s: s.strip().upper(), tmp.split(","))
    tmp = filter(lambda s: s in ACTIONS_UPPER, tmp)
    tmp = map(lambda s: ACTIONS_U2N[s], tmp)

    ALLOWED_ACTIONS = set(tmp)

    # always allow Add action
    #ALLOWED_ACTIONS.add( "Add" )

    return ALLOWED_ACTIONS
def getAllowedActions() :

    global ALLOWED_ACTIONS

    if ( ALLOWED_ACTIONS is not None ) : return ALLOWED_ACTIONS

    ACTIONS_UPPER = map( lambda s : s.upper() , ACTIONS )
    ACTIONS_U2N = dict( zip( ACTIONS_UPPER , ACTIONS ) ) 

    # unpack the allowed actions 
    conf = System.getConfig()
    tmp  = conf.getConfigValue("services.ows.wcst11","allowed_actions")
    tmp  = map( lambda s : s.strip().upper() , tmp.split(",") )
    tmp  = filter( lambda s : s in ACTIONS_UPPER , tmp ) 
    tmp  = map( lambda s : ACTIONS_U2N[s] , tmp ) 

    ALLOWED_ACTIONS = set( tmp ) 

    # always allow Add action  
    #ALLOWED_ACTIONS.add( "Add" ) 

    return ALLOWED_ACTIONS 
Example #21
0
def contextCreate(requestId=None, reponseHandler=None, maxAttempts=3):

    conf = System.getConfig()

    log_file = conf.getConfigValue("core.system", "logging_filename")
    log_level = conf.getConfigValue("core.system", "logging_level")
    log_format = conf.getConfigValue("core.system", "logging_format")

    path_temp0 = conf.getConfigValue("services.ows.wcst11", "path_wcst_temp")
    path_perm0 = conf.getConfigValue("services.ows.wcst11", "path_wcst_perm")
    muti_act = ("True" == conf.getConfigValue("services.ows.wcst11",
                                              "allow_multiple_actions"))

    context = {}
    context['responseHandler'] = checkResponseHandler(reponseHandler)
    context['isAsync'] = reponseHandler is not None
    context['mutipleActionsAllowed'] = muti_act
    context['loggingFilename'] = log_file
    context['loggingLevel'] = log_level
    context['loggingFormat'] = log_format

    logging.debug("WCSt11: loggingFilename %s " % str(log_file))
    logging.debug("WCSt11: loggingformat %s " % str(log_format))
    logging.debug("WCSt11: loggingLevel %s " % str(log_level))

    for i in xrange(maxAttempts):

        # internal transaction ID (used as request ID if not provided by the client)
        tid = getNewRequestID()

        path_temp = os.path.join(path_temp0, tid)
        path_perm = os.path.join(path_perm0, tid)

        # check if directories do not exist
        try:
            os.mkdir(path_temp)
        except Exception as e:
            logging.warning(
                "Failed to create the temporary storage directory! %s" %
                str(path_temp))
            continue  # try another process ID

        try:
            os.mkdir(path_perm)
        except Exception as e:
            os.rmdir(path_temp)
            logging.warning(
                "Failed to create the permanent storage directory! %s" %
                str(path_perm))
            continue  # try another process ID

        # store the values
        context['requestId'] = tid if (requestId is None) else requestId
        context['tid'] = tid
        context['pathTemp'] = path_temp
        context['pathPerm'] = path_perm

        # store the current working directory
        context['pathOrig'] = os.getcwd()

        # change to the temporary storage dir
        os.chdir(path_temp)

        #---------------------------------------------

        logging.debug("WCSt11: Request ID:        %s %s" %
                      (tid, "" if
                       (requestId is None) else "(%s)" % (requestId)))
        logging.debug("WCSt11: Permanent Storage:  %s" % path_perm)
        logging.debug("WCSt11: Temporary Storage:  %s" % path_temp)

        break

    else:
        msg = "WCSt11: Failed to create an unique WCS transaction's context!"
        logging.error(msg)
        raise OSError, msg

    return context
Example #22
0
def update_browse_layer(browse_layer, config=None):
    config = config or get_ngeo_config()

    try:
        logger.info("Fetching browse layer '%s' for update." % browse_layer.id)
        browse_layer_model = models.BrowseLayer.objects.get(id=browse_layer.id)
    except models.BrowseLayer.DoesNotExist:
        raise Exception(
            "Could not update browse layer '%s' as it does not exist." %
            browse_layer.id)

    immutable_values = ("id", "browse_type", "contains_vertical_curtains",
                        "r_band", "g_band", "b_band",
                        "radiometric_interval_min", "radiometric_interval_max",
                        "grid", "lowest_map_level", "highest_map_level",
                        "harvesting_source")
    for key in immutable_values:
        if getattr(browse_layer_model, key) != getattr(browse_layer, key):
            raise Exception("Cannot change immutable property '%s'." % key)

    mutable_values = [
        "title", "description", "browse_access_policy",
        "timedimension_default", "tile_query_limit", "strategy"
    ]

    refresh_mapcache_xml = False
    refresh_metadata = False
    for key in mutable_values:
        setattr(browse_layer_model, key, getattr(browse_layer, key))
        if key in ("timedimension_default", "tile_query_limit"):
            refresh_mapcache_xml = True
        if key in ("title", "description"):
            refresh_metadata = True

    # relatedDatasets are ignored (see NGEO-1508)
    # for related_dataset_id in browse_layer.related_dataset_ids:
    #     models.RelatedDataset.objects.get_or_create(
    #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
    #     )

    # # remove all related datasets that are not referenced any more
    # models.RelatedDataset.objects.filter(
    #     browse_layer=browse_layer_model
    # ).exclude(
    #     dataset_id__in=browse_layer.related_dataset_ids
    # ).delete()

    browse_layer_model.full_clean()
    browse_layer_model.save()

    # update EOxServer layer metadata
    if refresh_metadata:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id})
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    if refresh_mapcache_xml:
        try:
            remove_mapcache_layer_xml(browse_layer, config)
        except LayerException:
            logger.info("Nothing to be removed. Layer disabled?")
        add_mapcache_layer_xml(browse_layer, config)

    # re-configure SxCat harvesting for collection
    harvesting_via_sxcat = False
    try:
        harvesting_via_sxcat = config.getboolean("control",
                                                 "harvesting_via_sxcat")
    except:
        pass
    if (harvesting_via_sxcat and browse_layer.harvesting_source
            and browse_layer.harvesting_source
            == browse_layer_model.harvesting_source):
        add_collection(browse_layer)

    logger.info("Finished updating browse layer '%s'." % browse_layer.id)
Example #23
0
def ingest_browse_report(parsed_browse_report, do_preprocessing=True, config=None):
    """ Ingests a browse report. reraise_exceptions if errors shall be handled
    externally
    """

    # initialize the EOxServer system/registry/configuration
    System.init()

    try:
        # get the according browse layer
        browse_type = parsed_browse_report.browse_type
        browse_layer = models.BrowseLayer.objects.get(browse_type=browse_type)
    except models.BrowseLayer.DoesNotExist:
        logger.warn("Browse layer with browse type '%s' does not " "exist." % parsed_browse_report.browse_type)
        raise IngestionException(
            "Browse layer with browse type '%s' does not " "exist." % parsed_browse_report.browse_type
        )

    # generate a browse report model
    browse_report = create_browse_report(parsed_browse_report, browse_layer)

    # initialize the preprocessor with configuration values
    crs = None
    if browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleMapsCompatible":
        crs = "EPSG:3857"
    elif browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleCRS84Quad":
        crs = "EPSG:4326"

    logger.debug("Using CRS '%s' ('%s')." % (crs, browse_layer.grid))

    # create the required preprocessor/format selection
    format_selection = get_format_selection("GTiff", **get_format_config(config))
    if do_preprocessing:
        # add config parameters and custom params
        params = get_optimization_config(config)

        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"

        # add band selection
        if browse_layer.r_band is not None and browse_layer.g_band is not None and browse_layer.b_band is not None:

            bands = [
                (browse_layer.r_band, rad_min, rad_max),
                (browse_layer.g_band, rad_min, rad_max),
                (browse_layer.b_band, rad_min, rad_max),
            ]

            if params["bandmode"] == RGBA:
                # RGBA
                bands.append((0, 0, 0))

            params["bands"] = bands

        preprocessor = NGEOPreProcessor(format_selection, crs=crs, **params)
    else:
        preprocessor = None  # TODO: CopyPreprocessor

    report_result = IngestBrowseReportResult()

    succeded = []
    failed = []

    timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")
    browse_dirname = _valid_path(
        "%s_%s_%s_%s"
        % (
            browse_type,
            browse_report.responsible_org_name,
            browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
            timestamp,
        )
    )
    success_dir = join(get_success_dir(config), browse_dirname)
    failure_dir = join(get_failure_dir(config), browse_dirname)

    if exists(success_dir):
        logger.warn("Success directory '%s' already exists.")
    else:
        makedirs(success_dir)
    if exists(failure_dir):
        logger.warn("Failure directory '%s' already exists.")
    else:
        makedirs(failure_dir)

    # iterate over all browses in the browse report
    for parsed_browse in parsed_browse_report:
        # transaction management per browse
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:
                    seed_areas = []
                    # try ingest a single browse and log success
                    result = ingest_browse(
                        parsed_browse,
                        browse_report,
                        browse_layer,
                        preprocessor,
                        crs,
                        success_dir,
                        failure_dir,
                        seed_areas,
                        config=config,
                    )

                    report_result.add(result)
                    succeded.append(parsed_browse)

                    # commit here to allow seeding
                    transaction.commit()
                    transaction.commit(using="mapcache")

                    logger.info("Committed changes to database.")

                    for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                        try:

                            # seed MapCache synchronously
                            # TODO: maybe replace this with an async solution
                            seed_mapcache(
                                tileset=browse_layer.id,
                                grid=browse_layer.grid,
                                minx=minx,
                                miny=miny,
                                maxx=maxx,
                                maxy=maxy,
                                minzoom=browse_layer.lowest_map_level,
                                maxzoom=browse_layer.highest_map_level,
                                start_time=start_time,
                                end_time=end_time,
                                delete=False,
                                **get_mapcache_seed_config(config)
                            )
                            logger.info("Successfully finished seeding.")

                        except Exception, e:
                            logger.warn("Seeding failed: %s" % str(e))

                    # log ingestions for report generation
                    # date/browseType/browseLayerId/start/end
                    report_logger.info(
                        "/\\/\\".join(
                            (
                                datetime.utcnow().isoformat("T") + "Z",
                                parsed_browse_report.browse_type,
                                browse_layer.id,
                                (
                                    parsed_browse.start_time.replace(tzinfo=None) - parsed_browse.start_time.utcoffset()
                                ).isoformat("T")
                                + "Z",
                                (
                                    parsed_browse.end_time.replace(tzinfo=None) - parsed_browse.end_time.utcoffset()
                                ).isoformat("T")
                                + "Z",
                            )
                        )
                    )

                except Exception, e:
                    # report error
                    logger.error("Failure during ingestion of browse '%s'." % parsed_browse.browse_identifier)
                    logger.error("Exception was '%s': %s" % (type(e).__name__, str(e)))
                    logger.debug(traceback.format_exc() + "\n")

                    # undo latest changes, append the failure and continue
                    report_result.add(
                        IngestBrowseFailureResult(
                            parsed_browse.browse_identifier, getattr(e, "code", None) or type(e).__name__, str(e)
                        )
                    )
                    failed.append(parsed_browse)

                    transaction.rollback()
                    transaction.rollback(using="mapcache")
Example #24
0
def delete_browse_layer(browse_layer, purge=False, config=None):
    config = config or get_ngeo_config()

    # only remove MapCache configuration in order to allow a roll-back
    # without data loss
    if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
        logger.info("Starting disabling of browse layer '%s'." %
                    browse_layer.id)
    else:
        raise Exception(
            "Could not disable browse layer '%s' as it does not exist." %
            browse_layer.id)

    # remove browse layer from MapCache XML
    remove_mapcache_layer_xml(browse_layer, config)

    # disable SxCat harvesting for collection
    harvesting_via_sxcat = False
    try:
        harvesting_via_sxcat = config.getboolean("control",
                                                 "harvesting_via_sxcat")
    except:
        pass
    if harvesting_via_sxcat and browse_layer.harvesting_source:
        disable_collection(browse_layer)

    logger.info("Finished disabling of browse layer '%s'." % browse_layer.id)

    if purge:
        logger.info("Starting purging of browse layer '%s'." % browse_layer.id)
        # remove browse layer model. This should also delete all related browses
        # and browse reports
        models.BrowseLayer.objects.get(id=browse_layer.id).delete()

        # delete EOxServer layer metadata
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id})
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        # delete EOxServer dataset series
        dss_mgr = System.getRegistry().findAndBind(
            intf_id="resources.coverages.interfaces.Manager",
            params={
                "resources.coverages.interfaces.res_type": "eo.dataset_series"
            })
        dss_mgr.delete(browse_layer.id)

        # remove source from mapcache sqlite
        mapcache_models.Source.objects.get(name=browse_layer.id).delete()

        # delete browse layer cache
        try:
            logger.info("Deleting tileset for browse layer '%s'." %
                        browse_layer.id)
            os.remove(get_tileset_path(browse_layer.browse_type))
        except OSError:
            # when no browse was ingested, the sqlite file does not exist, so
            # just issue a warning
            logger.warning("Could not remove tileset '%s'." %
                           get_tileset_path(browse_layer.browse_type))

        # delete all optimized files by deleting the whole directory of the layer
        optimized_dir = get_project_relative_path(
            join(config.get(INGEST_SECTION, "optimized_files_dir"),
                 browse_layer.id))
        try:
            logger.info("Deleting optimized images for browse layer '%s'." %
                        browse_layer.id)
            shutil.rmtree(optimized_dir)
        except OSError:
            logger.error(
                "Could not remove directory for optimized files: '%s'." %
                optimized_dir)

        if harvesting_via_sxcat and browse_layer.harvesting_source:
            remove_collection(browse_layer)

        logger.info("Finished purging of browse layer '%s'." % browse_layer.id)
Example #25
0
def delete_browse_layer(browse_layer, purge=False, config=None):
    config = config or get_ngeo_config()

    # only remove MapCache configuration in order to allow a roll-back
    # without data loss
    if models.BrowseLayer.objects.filter(id=browse_layer.id).exists():
        logger.info("Starting disabling of browse layer '%s'." % browse_layer.id)
    else:
        raise Exception(
            "Could not disable browse layer '%s' as it does not exist."
            % browse_layer.id
        )

    # remove browse layer from MapCache XML
    remove_mapcache_layer_xml(browse_layer, config)

    logger.info("Finished disabling of browse layer '%s'." % browse_layer.id)

    if purge:
        logger.info("Starting purging of browse layer '%s'." % browse_layer.id)
        # remove browse layer model. This should also delete all related browses
        # and browse reports
        models.BrowseLayer.objects.get(id=browse_layer.id).delete()

        # delete EOxServer layer metadata
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id}
        )
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        # delete EOxServer dataset series
        dss_mgr = System.getRegistry().findAndBind(
            intf_id="resources.coverages.interfaces.Manager",
            params={
                "resources.coverages.interfaces.res_type": "eo.dataset_series"
            }
        )
        dss_mgr.delete(browse_layer.id)

        # remove source from mapcache sqlite
        mapcache_models.Source.objects.get(name=browse_layer.id).delete()

        # delete browse layer cache
        try:
            logger.info(
                "Deleting tileset for browse layer '%s'." % browse_layer.id
            )
            os.remove(get_tileset_path(browse_layer.browse_type))
        except OSError:
            # when no browse was ingested, the sqlite file does not exist, so
            # just issue a warning
            logger.warning(
                "Could not remove tileset '%s'."
                % get_tileset_path(browse_layer.browse_type)
            )

        # delete all optimized files by deleting the whole directory of the layer
        optimized_dir = get_project_relative_path(join(
            config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
        ))
        try:
            logger.info(
                "Deleting optimized images for browse layer '%s'."
                % browse_layer.id
            )
            shutil.rmtree(optimized_dir)
        except OSError:
            logger.error(
                "Could not remove directory for optimized files: '%s'."
                % optimized_dir
            )

        logger.info("Finished purging of browse layer '%s'." % browse_layer.id)
Example #26
0
            # get strategy and merge threshold
            threshold = ingest_config["merge_threshold"]
            if browse_layer.strategy != "inherit":
                strategy = browse_layer.strategy
            else:
                strategy = ingest_config["strategy"]

            if strategy == "merge" and timedelta < threshold:

                if previous_time > current_time:
                    # TODO: raise exception?
                    pass

                rect_ds = System.getRegistry().getFromFactory(
                    "resources.coverages.wrappers.EOCoverageFactory", {"obj_id": existing_browse_model.coverage_id}
                )
                merge_footprint = rect_ds.getFootprint()
                merge_with = rect_ds.getData().getLocation().getPath()

                replaced_time_interval = (existing_browse_model.start_time, existing_browse_model.end_time)

                _, _ = remove_browse(existing_browse_model, browse_layer, coverage_id, seed_areas, config=config)
                replaced = False
                logger.debug("Existing browse found, merging it.")
            else:
                # perform replacement

                replaced_time_interval = (existing_browse_model.start_time, existing_browse_model.end_time)

                replaced_extent, replaced_filename = remove_browse(
Example #27
0
def contextCreate( requestId = None , reponseHandler = None , maxAttempts = 3 ) : 

    conf = System.getConfig()

    log_file   = conf.getConfigValue("core.system","logging_filename") 
    log_level  = conf.getConfigValue("core.system","logging_level" ) 
    log_format = conf.getConfigValue("core.system","logging_format" ) 

    path_temp0 = conf.getConfigValue("services.ows.wcst11","path_wcst_temp") 
    path_perm0 = conf.getConfigValue("services.ows.wcst11","path_wcst_perm") 
    muti_act   = ( "True" == conf.getConfigValue("services.ows.wcst11","allow_multiple_actions") ) 

    context = {} 
    context['responseHandler'] = checkResponseHandler( reponseHandler ) 
    context['isAsync']         = reponseHandler is not None 
    context['mutipleActionsAllowed'] = muti_act
    context['loggingFilename'] = log_file 
    context['loggingLevel']    = log_level
    context['loggingFormat']   = log_format 

    logging.debug("WCSt11: loggingFilename %s "  % str(log_file) )
    logging.debug("WCSt11: loggingformat %s "  % str(log_format) )
    logging.debug("WCSt11: loggingLevel %s "  % str(log_level) )

    for i in xrange( maxAttempts ) :
    
        # internal transaction ID (used as request ID if not provided by the client) 
        tid = getNewRequestID() 

        path_temp = os.path.join( path_temp0 , tid ) 
        path_perm = os.path.join( path_perm0 , tid ) 

        # check if directories do not exist 
        try : 
            os.mkdir( path_temp )
        except Exception as e : 
            logging.warning( "Failed to create the temporary storage directory! %s" % str(path_temp) ) 
            continue # try another process ID

        try : 
            os.mkdir( path_perm )
        except Exception as e : 
            os.rmdir( path_temp ) 
            logging.warning( "Failed to create the permanent storage directory! %s" % str(path_perm) ) 
            continue # try another process ID

        # store the values 
        context['requestId'] = tid if ( requestId is None ) else requestId
        context['tid']       = tid 
        context['pathTemp']  = path_temp
        context['pathPerm']  = path_perm

        # store the current working directory
        context['pathOrig']  = os.getcwd() 

        # change to the temporary storage dir 
        os.chdir( path_temp ) 

        #---------------------------------------------

        logging.debug("WCSt11: Request ID:        %s %s" % ( tid , "" if ( requestId is None ) else "(%s)" %(requestId) ) )
        logging.debug("WCSt11: Permanent Storage:  %s" % path_perm )
        logging.debug("WCSt11: Temporary Storage:  %s" % path_temp )

        break 

    else : 
        msg = "WCSt11: Failed to create an unique WCS transaction's context!" 
        logging.error( msg ) 
        raise OSError , msg 

    return context 
Example #28
0
            # enqueue task for execution
            enqueueTask(PROCESS_CLASS, tid, param)

            print "ENQUEUE: %s" % tid, param

            break

        except QueueFull:  # retry if queue full

            print " --- QueueFull #%i - sleep: %g s" % (cnt + 1,
                                                        getWaitTime(cnt))

            time.sleep(getWaitTime(cnt))

            cnt += 1

            continue


if __name__ == "__main__":

    from eoxserver.core.system import System
    # initialize the system
    System.init()

    for i in xrange(N):

        testEnqueue("%6.6u" % i)

        #time.sleep(0.1)
Example #29
0
 def _create_contained(self, container, data_sources):
     # TODO: make this more efficient by using updateModel()
     
     new_datasets = []
     for data_source in data_sources:
         locations = data_source.detect()
         
         logger.info("Detected locations: %s"%[location.getPath() for location in locations])
         
         for location in locations:
             md_location = self._guess_metadata_location(location)
         
             data_package = self._create_data_package(location, md_location)
             
             coverage_factory = System.getRegistry().bind(
                 "resources.coverages.wrappers.EOCoverageFactory"
             )
             
             filter_exprs = [System.getRegistry().getFromFactory(
                 "resources.coverages.filters.CoverageExpressionFactory", {
                     "op_name": "referenced_by",
                     "operands": (location,)
                 }
             )]
             
             existing_coverages = coverage_factory.find(
                 impl_ids=["resources.coverages.wrappers.RectifiedDatasetWrapper",
                           "resources.coverages.wrappers.ReferenceableDatasetWrapper"],
                 filter_exprs=filter_exprs
             )
             
             if len(existing_coverages) == 1:
                 coverage = existing_coverages[0]
                 logger.info("Add %s (%s) to %s."%(
                         coverage.getCoverageId(), coverage.getType(),
                         container.getType()
                     )
                 )
                 container.addCoverage(existing_coverages[0])
                 new_datasets.append(existing_coverages[0])
                 
             else:
                 eo_metadata = data_package.readEOMetadata()
                 
                 coverage_id_mgr = CoverageIdManager()
                 
                 coverage_id = coverage_id_mgr.reserve(
                     eo_metadata.getEOID()
                 )
                 
                 try:
                     range_type_name = self._get_contained_range_type_name(
                         container, location
                     )
                     
                     if container.getType() == "eo.rect_stitched_mosaic":
                         default_srid = container.getSRID()
                     else:
                         default_srid = None
                     
                     logger.info("Creating new coverage with ID %s." % coverage_id)
                     # TODO: implement creation of ReferenceableDatasets,
                     # RectifiedStitchedMosaics for DatasetSeriesManager
                     new_dataset = self.rect_dataset_mgr.create(
                         coverage_id,
                         location=location,
                         md_location=md_location,
                         range_type_name=range_type_name,
                         data_source=data_source,
                         container=container,
                         default_srid=default_srid
                     )
                     
                     logger.info("Done creating new coverage with ID %s." % coverage_id)
                     
                     new_datasets.append(new_dataset)
                     
                 finally:
                     coverage_id_mgr.release(coverage_id)
                 
     
     return new_datasets
Example #30
0
 def _get_id_factory(self):
     return System.getRegistry().bind(
         "resources.coverages.wrappers.EOCoverageFactory"
     )
Example #31
0
 def __init__(self):
     super(BaseManagerContainerMixIn, self).__init__()
     
     self.rect_dataset_mgr = System.getRegistry().bind(
         "resources.coverages.managers.RectifiedDatasetManager"
     )
Example #32
0
            # get strategy and merge threshold
            threshold = ingest_config["merge_threshold"]
            if browse_layer.strategy != "inherit":
                strategy = browse_layer.strategy
            else:
                strategy = ingest_config["strategy"]

            if strategy == "merge" and timedelta < threshold:

                if previous_time > current_time:
                    # TODO: raise exception?
                    pass

                rect_ds = System.getRegistry().getFromFactory(
                    "resources.coverages.wrappers.EOCoverageFactory",
                    {"obj_id": existing_browse_model.coverage_id}
                )
                merge_footprint = rect_ds.getFootprint()
                merge_with = rect_ds.getData().getLocation().getPath()

                replaced_time_interval = (existing_browse_model.start_time,
                                          existing_browse_model.end_time)

                _, _ = remove_browse(
                    existing_browse_model, browse_layer, coverage_id,
                    seed_areas, config=config
                )
                replaced = False
                logger.debug("Existing browse found, merging it.")

            elif strategy == "skip" and current_time <= previous_time:
Example #33
0
            raise NGEOException(
                "Browse Identifier '%s' not valid: '%s'." %
                (browse.browse_identifier, str(e.messages[0])),
                "ValidationError")

        browse_identifier_model = models.BrowseIdentifier(
            value=browse.browse_identifier,
            browse=browse_model,
            browse_layer=browse_layer_model)
        browse_identifier_model.full_clean()
        browse_identifier_model.save()

    # initialize the Coverage Manager for Rectified Datasets to register the
    # datasets in the database
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={"resources.coverages.interfaces.res_type": "eo.rect_dataset"})

    # create EO metadata necessary for registration
    eo_metadata = EOMetadata(coverage_id, browse.start_time, browse.end_time,
                             footprint)

    # get dataset series ID from browse layer, if available
    container_ids = []
    if browse_layer_model:
        container_ids.append(browse_layer_model.id)

    range_type_name = "RGB" if num_bands == 3 else "RGBA"

    # register the optimized dataset
    logger.info("Creating Rectified Dataset.")
Example #34
0
    # django settings module 
    os.environ["DJANGO_SETTINGS_MODULE"] = DJANGO_SETTINGS 
    info("'%s' ... is set as the Django settings module " % DJANGO_SETTINGS )
    info("'%s' ... is set as the Django database " % DJANGO_DB )

    #usingDB = DJANGO_DB # ???

    # once the search path is set -> load the required modules
    from eoxserver.core.system import System
    from eoxserver.resources.processes.tracker import TaskStatus, QueueEmpty, \
            dequeueTask, startTask, reenqueueTask, stopTaskSuccessIfNotFinished, \
            reenqueueZombieTasks, deleteRetiredTasks, dbLocker 

    # initialize the system 
    System.init()
    #-------------------------------------------------------------------

    info( "ATPD Asynchronous Task Processing Daemon has just been started!")  
    info( "ATPD: id=%s (%i)" % ( SERVER_ID_STR , SERVER_ID ) ) 
    info( "ATPD: hostname=%s" % socket.getfqdn() ) 
    info( "ATPD: pid=%i " % os.getpid() )

    #-------------------------------------------------------------------
    # start the worker pool 
    
    GWP = WorkerPool( NTHREAD ) 

    # use the GWP terminating hadlers 
    signal.signal( signal.SIGINT,  signal_handler_sigint )
    signal.signal( signal.SIGTERM, signal_handler_sigterm )
Example #35
0
def ingest_browse_report(parsed_browse_report, do_preprocessing=True, config=None):
    """ Ingests a browse report. reraise_exceptions if errors shall be handled
    externally
    """

    # initialize the EOxServer system/registry/configuration
    System.init()

    try:
        # get the according browse layer
        browse_type = parsed_browse_report.browse_type
        browse_layer = models.BrowseLayer.objects.get(browse_type=browse_type)
    except models.BrowseLayer.DoesNotExist:
        logger.warn("Browse layer with browse type '%s' does not "
                    "exist." % parsed_browse_report.browse_type)
        raise IngestionException("Browse layer with browse type '%s' does not "
                                 "exist." % parsed_browse_report.browse_type)

    # generate a browse report model
    browse_report = create_browse_report(parsed_browse_report, browse_layer)

    # initialize the preprocessor with configuration values
    crs = None
    if browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleMapsCompatible":
        crs = "EPSG:3857"
    elif browse_layer.grid == "urn:ogc:def:wkss:OGC:1.0:GoogleCRS84Quad":
        crs = "EPSG:4326"

    logger.debug("Using CRS '%s' ('%s')." % (crs, browse_layer.grid))

    # create the required preprocessor/format selection
    format_selection = get_format_selection("GTiff",
                                            **get_format_config(config))
    if do_preprocessing:
        # add config parameters and custom params
        params = get_optimization_config(config)

        # add radiometric interval
        rad_min = browse_layer.radiometric_interval_min
        if rad_min is not None:
            params["radiometric_interval_min"] = rad_min
        else:
            rad_min = "min"
        rad_max = browse_layer.radiometric_interval_max
        if rad_max is not None:
            params["radiometric_interval_max"] = rad_max
        else:
            rad_max = "max"

        # add band selection
        if (browse_layer.r_band is not None and
            browse_layer.g_band is not None and
            browse_layer.b_band is not None):

            bands = [(browse_layer.r_band, rad_min, rad_max),
                     (browse_layer.g_band, rad_min, rad_max),
                     (browse_layer.b_band, rad_min, rad_max)]

            if params["bandmode"] == RGBA:
                # RGBA
                bands.append((0, 0, 0))

            params["bands"] = bands

        preprocessor = NGEOPreProcessor(format_selection, crs=crs, **params)
    else:
        preprocessor = None # TODO: CopyPreprocessor

    report_result = IngestBrowseReportResult()

    succeded = []
    failed = []

    timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")
    browse_dirname = _valid_path("%s_%s_%s_%s" % (
        browse_type, browse_report.responsible_org_name,
        browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
        timestamp
    ))
    success_dir = join(get_success_dir(config), browse_dirname)
    failure_dir = join(get_failure_dir(config), browse_dirname)

    if exists(success_dir):
        logger.warn("Success directory '%s' already exists.")
    else:
        makedirs(success_dir)
    if exists(failure_dir):
        logger.warn("Failure directory '%s' already exists.")
    else:
        makedirs(failure_dir)

    # iterate over all browses in the browse report
    for parsed_browse in parsed_browse_report:
        # transaction management per browse
        with transaction.commit_manually():
            with transaction.commit_manually(using="mapcache"):
                try:
                    seed_areas = []
                    # try ingest a single browse and log success
                    result = ingest_browse(parsed_browse, browse_report,
                                           browse_layer, preprocessor, crs,
                                           success_dir, failure_dir,
                                           seed_areas, config=config)

                    report_result.add(result)
                    succeded.append(parsed_browse)

                    # commit here to allow seeding
                    transaction.commit()
                    transaction.commit(using="mapcache")

                    logger.info("Committed changes to database.")

                    for minx, miny, maxx, maxy, start_time, end_time in seed_areas:
                        try:

                            # seed MapCache synchronously
                            # TODO: maybe replace this with an async solution
                            seed_mapcache(tileset=browse_layer.id,
                                          grid=browse_layer.grid,
                                          minx=minx, miny=miny,
                                          maxx=maxx, maxy=maxy,
                                          minzoom=browse_layer.lowest_map_level,
                                          maxzoom=browse_layer.highest_map_level,
                                          start_time=start_time,
                                          end_time=end_time,
                                          delete=False,
                                          **get_mapcache_seed_config(config))
                            logger.info("Successfully finished seeding.")

                        except Exception, e:
                            logger.warn("Seeding failed: %s" % str(e))

                    # log ingestions for report generation
                    # date/browseType/browseLayerId/start/end
                    report_logger.info("/\\/\\".join((
                        datetime.utcnow().isoformat("T") + "Z",
                        parsed_browse_report.browse_type,
                        browse_layer.id,
                        (parsed_browse.start_time.replace(tzinfo=None)-parsed_browse.start_time.utcoffset()).isoformat("T") + "Z",
                        (parsed_browse.end_time.replace(tzinfo=None)-parsed_browse.end_time.utcoffset()).isoformat("T") + "Z"
                    )))

                except Exception, e:
                    # report error
                    logger.error("Failure during ingestion of browse '%s'." %
                                 parsed_browse.browse_identifier)
                    logger.error("Exception was '%s': %s" % (type(e).__name__, str(e)))
                    logger.debug(traceback.format_exc() + "\n")

                    # undo latest changes, append the failure and continue
                    report_result.add(IngestBrowseFailureResult(
                        parsed_browse.browse_identifier,
                        getattr(e, "code", None) or type(e).__name__, str(e))
                    )
                    failed.append(parsed_browse)

                    transaction.rollback()
                    transaction.rollback(using="mapcache")
Example #36
0
def add_browse_layer(browse_layer, config=None):
    """ Add a browse layer to the ngEO Browse Server system. This includes the
        database models, cache configuration and filesystem paths.
    """
    config = config or get_ngeo_config()

    try:
        logger.info("Adding new browse layer '%s'." % browse_layer.id)
        # create a new browse layer model
        browse_layer_model = models.BrowseLayer(**browse_layer.get_kwargs())

        browse_layer_model.full_clean()
        browse_layer_model.save()

        # relatedDatasets are ignored (see NGEO-1508)
        # for related_dataset_id in browse_layer.related_dataset_ids:
        #     models.RelatedDataset.objects.get_or_create(
        #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
        #     )

    except Exception:
        raise

    # create EOxServer dataset series
    dss_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.dataset_series"
        })
    dss_mgr.create(browse_layer.id,
                   eo_metadata=EOMetadata(
                       browse_layer.id, datetime.now(), datetime.now(),
                       MultiPolygon(Polygon.from_bbox((0, 0, 1, 1)))))
    # create EOxServer layer metadata
    if browse_layer.title or browse_layer.description:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id})
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    # add source to mapcache sqlite
    mapcache_models.Source.objects.create(name=browse_layer.id)

    # add an XML section to the mapcache config xml
    add_mapcache_layer_xml(browse_layer, config)

    # create a base directory for optimized files
    directory = get_project_relative_path(
        join(config.get(INGEST_SECTION, "optimized_files_dir"),
             browse_layer.id))
    if not os.path.exists(directory):
        os.makedirs(directory)

    # create SxCat collection if harvesting via SxCat is enabled and source
    # is given
    harvesting_via_sxcat = False
    try:
        harvesting_via_sxcat = config.getboolean("control",
                                                 "harvesting_via_sxcat")
    except:
        pass
    if harvesting_via_sxcat and browse_layer.harvesting_source:
        add_collection(browse_layer)
Example #37
0
            raise NGEOException("Browse Identifier '%s' not valid: '%s'." %
                                (browse.browse_identifier, str(e.messages[0])),
                                "ValidationError")

        browse_identifier_model = models.BrowseIdentifier(
            value=browse.browse_identifier, browse=browse_model,
            browse_layer=browse_layer_model
        )
        browse_identifier_model.full_clean()
        browse_identifier_model.save()

    # initialize the Coverage Manager for Rectified Datasets to register the
    # datasets in the database
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.rect_dataset"
        }
    )

    # create EO metadata necessary for registration
    eo_metadata = EOMetadata(
        coverage_id, browse.start_time, browse.end_time, footprint
    )

    # get dataset series ID from browse layer, if available
    container_ids = []
    if browse_layer_model:
        container_ids.append(browse_layer_model.id)

    range_type_name = "RGB" if num_bands == 3 else "RGBA"
Example #38
0
def remove_browse(browse_model,
                  browse_layer_model,
                  coverage_id,
                  seed_areas,
                  unseed=True,
                  config=None):
    """ Delete all models and caches associated with browse model. Image itself
    is not deleted.
    Returns the extent and filename of the replaced image.
    """

    # get previous extent to "un-seed" MapCache in that area
    rect_ds = System.getRegistry().getFromFactory(
        "resources.coverages.wrappers.EOCoverageFactory",
        {"obj_id": browse_model.coverage_id})
    replaced_extent = rect_ds.getExtent()
    replaced_filename = rect_ds.getData().getLocation().getPath()

    # delete the EOxServer rectified dataset entry
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={"resources.coverages.interfaces.res_type": "eo.rect_dataset"})
    rect_mgr.delete(obj_id=browse_model.coverage_id)
    browse_model.delete()

    # search for time entries with an overlapping time span
    if browse_model.start_time == browse_model.end_time:
        times_qs = mapcache_models.Time.objects.filter(
            source=browse_layer_model.id,
            start_time__lte=browse_model.end_time,
            end_time__gte=browse_model.start_time)
    else:
        times_qs = mapcache_models.Time.objects.filter(
            Q(source=browse_layer_model.id),
            Q(start_time__lt=browse_model.end_time,
              end_time__gt=browse_model.start_time)
            | Q(start_time=F("end_time"),
                start_time__lte=browse_model.end_time,
                end_time__gte=browse_model.start_time))

    if len(times_qs) == 1:
        time_model = times_qs[0]
    elif len(times_qs) == 0:
        #issue a warning if no corresponding Time object exists
        logger.warning("No MapCache Time object found for time: %s, %s" %
                       (browse_model.start_time, browse_model.end_time))
    elif len(times_qs) > 1:
        #issue a warning if too many corresponding Time objects exist
        #try to delete redundant time models
        #note that this situation should never happen but just in case...
        logger.warning("Multiple MapCache Time objects found for time: %s, "
                       "%s. Trying to delete redundant ones." %
                       (browse_model.start_time, browse_model.end_time))
        first = True
        with transaction.commit_manually(using="mapcache"):
            for time_model_tmp in times_qs:
                if first:
                    first = False
                    time_model = time_model_tmp
                elif (time_model_tmp.start_time <= time_model.start_time
                      and time_model_tmp.end_time >= time_model.end_time):
                    time_model.delete()
                    time_model = time_model_tmp
                else:
                    time_model_tmp.delete()
            transaction.commit(using="mapcache")

    if unseed:
        # unseed here
        try:
            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=time_model.minx,
                          miny=time_model.miny,
                          maxx=time_model.maxx,
                          maxy=time_model.maxy,
                          minzoom=browse_layer_model.lowest_map_level,
                          maxzoom=browse_layer_model.highest_map_level,
                          start_time=time_model.start_time,
                          end_time=time_model.end_time,
                          delete=True,
                          **get_mapcache_seed_config(config))

        except Exception, e:
            logger.warning("Un-seeding failed: %s" % str(e))
Example #39
0
def remove_browse(browse_model, browse_layer_model, coverage_id,
                  seed_areas, unseed=True, config=None):
    """ Delete all models and caches associated with browse model. Image itself
    is not deleted.
    Returns the extent and filename of the replaced image.
    """

    # get previous extent to "un-seed" MapCache in that area
    rect_ds = System.getRegistry().getFromFactory(
        "resources.coverages.wrappers.EOCoverageFactory",
        {"obj_id": browse_model.coverage_id}
    )
    replaced_extent = rect_ds.getExtent()
    replaced_filename = rect_ds.getData().getLocation().getPath()

    # delete the EOxServer rectified dataset entry
    rect_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.rect_dataset"
        }
    )
    rect_mgr.delete(obj_id=browse_model.coverage_id)
    browse_model.delete()

    # search for time entries with an overlapping time span
    if browse_model.start_time==browse_model.end_time:
        times_qs = mapcache_models.Time.objects.filter(
            source=browse_layer_model.id,
            start_time__lte=browse_model.end_time,
            end_time__gte=browse_model.start_time
        )
    else:
        times_qs = mapcache_models.Time.objects.filter(
            Q(source=browse_layer_model.id),
            Q(start_time__lt=browse_model.end_time,
              end_time__gt=browse_model.start_time) |
            Q(start_time=F("end_time"),
              start_time__lte=browse_model.end_time,
              end_time__gte=browse_model.start_time)
        )

    if len(times_qs) == 1:
        time_model = times_qs[0]
    elif len(times_qs) == 0:
        #issue a warning if no corresponding Time object exists
        logger.warning("No MapCache Time object found for time: %s, %s" % (
            browse_model.start_time, browse_model.end_time
        ))
    elif len(times_qs) > 1:
        #issue a warning if too many corresponding Time objects exist
        #try to delete redundant time models
        #note that this situation should never happen but just in case...
        logger.warning("Multiple MapCache Time objects found for time: %s, "
                       "%s. Trying to delete redundant ones." % (
                       browse_model.start_time, browse_model.end_time
        ))
        first = True
        with transaction.commit_manually(using="mapcache"):
            for time_model_tmp in times_qs:
                if first:
                    first = False
                    time_model = time_model_tmp
                elif (time_model_tmp.start_time <= time_model.start_time and
                      time_model_tmp.end_time >= time_model.end_time):
                    time_model.delete()
                    time_model = time_model_tmp
                else:
                    time_model_tmp.delete()
            transaction.commit(using="mapcache")

    if unseed:
        # unseed here
        try:
            seed_mapcache(tileset=browse_layer_model.id,
                          grid=browse_layer_model.grid,
                          minx=time_model.minx, miny=time_model.miny,
                          maxx=time_model.maxx, maxy=time_model.maxy,
                          minzoom=browse_layer_model.lowest_map_level,
                          maxzoom=browse_layer_model.highest_map_level,
                          start_time=time_model.start_time,
                          end_time=time_model.end_time,
                          delete=True,
                          **get_mapcache_seed_config(config))

        except Exception, e:
            logger.warning("Un-seeding failed: %s" % str(e))
Example #40
0
    def handle(self, *args, **kwargs):
        System.init()
        
        # parse command arguments
        self.verbosity = int(kwargs.get("verbosity", 1))
        traceback = kwargs.get("traceback", False)
        self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback)

        logger.info("Starting browse export from command line.")

        browse_layer_id = kwargs.get("browse_layer_id")
        browse_type = kwargs.get("browse_type")
        if not browse_layer_id and not browse_type:
            logger.error("No browse layer or browse type was specified.")
            raise CommandError("No browse layer or browse type was specified.")
        elif browse_layer_id and browse_type:
            logger.error("Both browse layer and browse type were specified.")
            raise CommandError("Both browse layer and browse type were specified.")
        
        start = kwargs.get("start")
        end = kwargs.get("end")
        compression = kwargs.get("compression")
        export_cache = kwargs["export_cache"]
        output_path = kwargs.get("output_path")
        
        # parse start/end if given
        if start: 
            start = getDateTime(start)
        if end:
            end = getDateTime(end)
        
        if not output_path:
            output_path = package.generate_filename(compression)
        
        with package.create(output_path, compression) as p:
            # query the browse layer
            if browse_layer_id:
                try:
                    browse_layer_model = BrowseLayer.objects.get(id=browse_layer_id)
                except BrowseLayer.DoesNotExist:
                    logger.error("Browse layer '%s' does not exist" 
                                 % browse_layer_id)
                    raise CommandError("Browse layer '%s' does not exist" 
                                       % browse_layer_id)
            else:
                try:
                    browse_layer_model = BrowseLayer.objects.get(browse_type=browse_type)
                except BrowseLayer.DoesNotExist:
                    logger.error("Browse layer with browse type '%s' does "
                                 "not exist" % browse_type)
                    raise CommandError("Browse layer with browse type '%s' does "
                                       "not exist" % browse_type)
            
            browse_layer = browselayer_data.BrowseLayer.from_model(browse_layer_model)
            p.set_browse_layer(
                serialize_browse_layers((browse_layer,), pretty_print=True)
            )
            
            # query browse reports; optionally filter for start/end time
            browse_reports_qs = BrowseReport.objects.all()
            
            # apply start/end filter
            if start and not end:
                browse_reports_qs = browse_reports_qs.filter(browses__start_time__gte=start)
            elif end and not start:
                browse_reports_qs = browse_reports_qs.filter(browses__end_time__lte=end)
            elif start and end:
                browse_reports_qs = browse_reports_qs.filter(browses__start_time__gte=start, 
                                                             browses__end_time__lte=end)
            
            # use count annotation to exclude all browse reports with no browses
            browse_reports_qs = browse_reports_qs.annotate(
                browse_count=Count('browses')
            ).filter(browse_layer=browse_layer_model, browse_count__gt=0)
            
            # iterate over all browse reports
            for browse_report_model in browse_reports_qs:
                browses_qs = Browse.objects.filter(
                    browse_report=browse_report_model
                )
                if start:
                    browses_qs = browses_qs.filter(start_time__gte=start)
                if end:
                    browses_qs = browses_qs.filter(end_time__lte=end)
                
                browse_report = browsereport_data.BrowseReport.from_model(
                    browse_report_model, browses_qs
                )
                
                # iterate over all browses in the query
                for browse, browse_model in izip(browse_report, browses_qs):
                    coverage_wrapper = System.getRegistry().getFromFactory(
                        "resources.coverages.wrappers.EOCoverageFactory",
                        {"obj_id": browse_model.coverage_id}
                    )
                    
                    # set the 
                    base_filename = browse_model.coverage_id
                    data_filename = base_filename + ".tif"
                    md_filename = base_filename + ".xml"
                    footprint_filename = base_filename + ".wkb"
                    
                    browse._file_name = data_filename
                    
                    # add optimized browse image to package
                    data_package = coverage_wrapper.getData()
                    data_package.prepareAccess()
                    browse_file_path = data_package.getGDALDatasetIdentifier()
                    with open(browse_file_path) as f:
                        p.add_browse(f, data_filename)
                        wkb = coverage_wrapper.getFootprint().wkb
                        p.add_footprint(footprint_filename, wkb)
                    
                    if export_cache:
                        time_model = mapcache_models.Time.objects.get(
                            start_time__lte=browse_model.start_time,
                            end_time__gte=browse_model.end_time,
                            source__name=browse_layer_model.id
                        )
                        
                        # get "dim" parameter
                        dim = (isotime(time_model.start_time) + "/" +
                               isotime(time_model.end_time))
                        
                        # exit if a merged browse is found
                        if dim != (isotime(browse_model.start_time) + "/" +
                               isotime(browse_model.end_time)):
                            logger.error("Browse layer '%s' contains "
                                         "merged browses and exporting "
                                         "of cache is requested. Try "
                                         "without exporting the cache."
                                         % browse_layer_model.id)
                            raise CommandError("Browse layer '%s' contains "
                                               "merged browses and exporting "
                                               "of cache is requested. Try "
                                               "without exporting the cache."
                                               % browse_layer_model.id)
                        
                        # get path to sqlite tileset and open it
                        ts = tileset.open(
                            get_tileset_path(browse_layer.browse_type)
                        )
                        
                        for tile_desc in ts.get_tiles(
                            browse_layer.id, 
                            URN_TO_GRID[browse_layer.grid], dim=dim,
                            minzoom=browse_layer.highest_map_level,
                            maxzoom=browse_layer.lowest_map_level
                        ):
                            p.add_cache_file(*tile_desc)
                            
                        
                
                # save browse report xml and add it to the package
                p.add_browse_report(
                    serialize_browse_report(browse_report, pretty_print=True),
                    name="%s_%s_%s_%s.xml" % (
                        browse_report.browse_type,
                        browse_report.responsible_org_name,
                        browse_report.date_time.strftime("%Y%m%d%H%M%S%f"),
                        uuid.uuid4().hex
                    )
                )

        logger.info("Successfully finished browse export from command line.")
Example #41
0
def add_browse_layer(browse_layer, config=None):
    """ Add a browse layer to the ngEO Browse Server system. This includes the
        database models, cache configuration and filesystem paths.
    """
    config = config or get_ngeo_config()

    try:
        logger.info("Adding new browse layer '%s'." % browse_layer.id)
        # create a new browse layer model
        browse_layer_model = models.BrowseLayer(
            **browse_layer.get_kwargs()
        )

        browse_layer_model.full_clean()
        browse_layer_model.save()

        # relatedDatasets are ignored (see NGEO-1508)
        # for related_dataset_id in browse_layer.related_dataset_ids:
        #     models.RelatedDataset.objects.get_or_create(
        #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
        #     )

    except Exception:
        raise

    # create EOxServer dataset series
    dss_mgr = System.getRegistry().findAndBind(
        intf_id="resources.coverages.interfaces.Manager",
        params={
            "resources.coverages.interfaces.res_type": "eo.dataset_series"
        }
    )
    dss_mgr.create(browse_layer.id,
        eo_metadata=EOMetadata(
            browse_layer.id,
            datetime.now(), datetime.now(),
            MultiPolygon(Polygon.from_bbox((0, 0, 1, 1)))
        )
    )
    # create EOxServer layer metadata
    if browse_layer.title or browse_layer.description:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id}
        )
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    # add source to mapcache sqlite
    mapcache_models.Source.objects.create(name=browse_layer.id)

    # add an XML section to the mapcache config xml
    add_mapcache_layer_xml(browse_layer, config)

    # create a base directory for optimized files
    directory = get_project_relative_path(join(
        config.get(INGEST_SECTION, "optimized_files_dir"), browse_layer.id
    ))
    if not os.path.exists(directory):
        os.makedirs(directory)
Example #42
0
 def getStitchedMosaicById(self, cid):
     return System.getRegistry().getFromFactory(
         "resources.coverages.wrappers.EOCoverageFactory",
         {"obj_id": cid}
     )
Example #43
0
def update_browse_layer(browse_layer, config=None):
    config = config or get_ngeo_config()

    try:
        logger.info("Fetching browse layer '%s' for update." % browse_layer.id)
        browse_layer_model = models.BrowseLayer.objects.get(id=browse_layer.id)
    except models.BrowseLayer.DoesNotExist:
        raise Exception(
            "Could not update browse layer '%s' as it does not exist."
            % browse_layer.id
        )

    immutable_values = (
        "id", "browse_type", "contains_vertical_curtains", "r_band", "g_band",
        "b_band", "radiometric_interval_min", "radiometric_interval_max",
        "grid", "lowest_map_level", "highest_map_level"
    )
    for key in immutable_values:
        if getattr(browse_layer_model, key) != getattr(browse_layer, key):
            raise Exception("Cannot change immutable property '%s'." % key)

    mutable_values = [
        "title", "description", "browse_access_policy",
        "timedimension_default", "tile_query_limit", "strategy"
    ]

    refresh_mapcache_xml = False
    refresh_metadata = False
    for key in mutable_values:
        setattr(browse_layer_model, key, getattr(browse_layer, key))
        if key in ("timedimension_default", "tile_query_limit"):
            refresh_mapcache_xml = True
        if key in ("title", "description"):
            refresh_metadata = True

    # relatedDatasets are ignored (see NGEO-1508)
    # for related_dataset_id in browse_layer.related_dataset_ids:
    #     models.RelatedDataset.objects.get_or_create(
    #         dataset_id=related_dataset_id, browse_layer=browse_layer_model
    #     )

    # # remove all related datasets that are not referenced any more
    # models.RelatedDataset.objects.filter(
    #     browse_layer=browse_layer_model
    # ).exclude(
    #     dataset_id__in=browse_layer.related_dataset_ids
    # ).delete()

    browse_layer_model.full_clean()
    browse_layer_model.save()

    # update EOxServer layer metadata
    if refresh_metadata:
        dss = System.getRegistry().getFromFactory(
            "resources.coverages.wrappers.DatasetSeriesFactory",
            {"obj_id": browse_layer.id}
        )
        dss._DatasetSeriesWrapper__model.layer_metadata.all().delete()
        if browse_layer.title:
            md_title = LayerMetadataRecord.objects.get_or_create(
                key="ows_title", value=str(browse_layer.title))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_title)
        if browse_layer.description:
            md_abstract = LayerMetadataRecord.objects.get_or_create(
                key="ows_abstract", value=str(browse_layer.description))[0]
            dss._DatasetSeriesWrapper__model.layer_metadata.add(md_abstract)

    if refresh_mapcache_xml:
        try:
            remove_mapcache_layer_xml(browse_layer, config)
        except LayerException:
            logger.info("Nothing to be removed. Layer disabled?")
        add_mapcache_layer_xml(browse_layer, config)
    logger.info("Finished updating browse layer '%s'." % browse_layer.id)
Example #44
0
 def getDatasetSeriesById(self, eoid):
     return System.getRegistry().getFromFactory(
         "resources.coverages.wrappers.DatasetSeriesFactory",
         {"obj_id": eoid}
     )
Example #45
0
def _wcst11AlterCapabilities(respSrc, OWS):

    conf = System.getConfig()
    regs = System.getRegistry()

    # get the service URL
    base_url = conf.getConfigValue("services.owscommon", "http_service_url")

    # =====================================================================
    # check the content

    try:

        # check if the WCST11 request handler is registered and enabled
        if not regs.getImplementationStatus(
                "services.ows.wcs11Transaction.WCS11TransactionHandler"):
            raise Exception, "Operation handler is not enabled!"

        # check if payload contains XML content
        if respSrc.content_type not in ("text/xml", "application/xml"):
            raise Exception, "Not XML!"

        # parse the original payload
        root = etree.fromstring(respSrc.content)

        # check the root element
        if splitQN(root.tag)[1] != "Capabilities":
            raise Exception, "Not Capabilities!"

        # check version
        if not root.get('version', '').startswith(OWS.version):
            raise Exception, "Not Capabilities version %s!" % OWS.version

        # look for OperationsMetadata
        eOM = root.find(OWS.E_OperationsMetadata)

        # look for ServiceIdentification
        eSI = root.find(OWS.E_ServiceIdentification)

        if (eOM is None) and (eSI is None):
            raise Exception, "No element to be altered has been found!"

    except Exception as e:

        # keep track of the failures
        logger.debug(
            "_wcst11AlterCapabilities(): version %s : Content not altered! reason: %s "
            % (OWS.version, str(e)))

        # return unafected original response
        return respSrc

    # =====================================================================
    # insert new Profile element to ServiceIdentification

    conf = System.getConfig()

    if eOM is not None:

        #insert sub-element before the selected elements
        def insertBefore(dst, src, before):

            # get the sublelements
            elements = filter(lambda e: (e is not None),
                              map(lambda tag: dst.find(tag), before))

            try:
                # locate firts sublelemet
                dl = list(dst)
                idx = min(map(lambda e: dl.index(e), elements))

                # create element
                e = etree.Element(src)

                # insert element at the desired position
                dst.insert(idx, e)

            except:

                # simply append elemet at the end
                e = etree.SubElement(dst, src)

            return e

        before = (OWS11.E_Fees, OWS11.E_AccessConstraints)

        # ows:Profile - WCSt >>Multiple Actions<<
        if ("True" == conf.getConfigValue("services.ows.wcst11",
                                          "allow_multiple_actions")):
            #etree.SubElement( eSI , OWS.E_Profile ).text = "urn:ogc:extension:WCS:1.1:TransactionMultipleActions"
            insertBefore(
                eSI, OWS.E_Profile, before
            ).text = "urn:ogc:extension:WCS:1.1:TransactionMultipleActions"

        # unpack the allowed actions
        allowedActions = conf.getConfigValue("services.ows.wcst11",
                                             "allowed_actions")
        allowedActions = set(
            filter(lambda s: s in ACTIONS_UPPER,
                   map(lambda s: s.strip().upper(),
                       allowedActions.split(","))))

        # annotate allowd actions
        for action in allowedActions:
            # ows:Profile - WCSt allowed action action
            #etree.SubElement( eSI , OWS.E_Profile ).text = "urn:ogc:extension:WCS:1.1:Transaction%s" % ACTIONS_U2N[action]
            insertBefore(
                eSI, OWS.E_Profile, before
            ).text = "urn:ogc:extension:WCS:1.1:Transaction%s" % ACTIONS_U2N[
                action]

    # =====================================================================
    # insert new Operation element to OperationMetadata

    if eOM is not None:

        # ows:Operation
        eOp = etree.SubElement(eOM, OWS.E_Operation, {A_name: "transaction"})

        # ows:DCP
        tmp = etree.SubElement(eOp, OWS.E_DCP)
        tmp = etree.SubElement(tmp, OWS.E_HTTP)
        tmp = etree.SubElement(tmp, OWS.E_Post, {
            A_href: base_url,
            A_type: "simple"
        })

        # ows:Constraint
        if 1 < int(OWS.version[0]):
            tmp = etree.SubElement(tmp, OWS.E_Constraint,
                                   {A_name: "PostEncoding"})
            tmp = etree.SubElement(tmp, OWS.E_AllowedValues)
            tmp = etree.SubElement(tmp, OWS.E_Value)
            tmp.text = "XML"

        # ows:Parameter
        tmp = etree.SubElement(eOp, OWS.E_Parameter, {A_name: "service"})
        tmp = etree.SubElement(tmp, OWS.E_AllowedValues)
        tmp = etree.SubElement(tmp, OWS.E_Value)
        tmp.text = "WCS"

        # ows:Parameter
        tmp = etree.SubElement(eOp, OWS.E_Parameter, {A_name: "version"})
        tmp = etree.SubElement(tmp, OWS.E_AllowedValues)
        tmp = etree.SubElement(tmp, OWS.E_Value)
        tmp.text = "1.1"

    # =====================================================================
    # return the altered payload

    return Response(content=etree.tostring(root, "UTF-8"),
                    content_type=respSrc.content_type,
                    status=respSrc.status)