Пример #1
0
    def __init__(self):
        Application.__init__(self, len(sys.argv), sys.argv)
        self.setMessagingEnabled(False)
        self.setDatabaseEnabled(True, True)
        self.setRecordStreamEnabled(True)

        self._serverRoot = os.path.dirname(__file__)
        self._listenAddress = '0.0.0.0'  # all interfaces
        self._port = 8080
        self._connections = 5
        self._queryObjects = 100000  # maximum number of objects per query
        self._realtimeGap = None  # minimum data age: 5min
        self._samplesM = None  # maximum number of samples per query
        self._htpasswd = '@CONFIGDIR@/fdsnws.htpasswd'
        self._accessLogFile = ''

        self._allowRestricted = True
        self._serveDataSelect = True
        self._serveEvent = True
        self._serveStation = True

        self._hideAuthor = False
        self._evaluationMode = None
        self._eventTypeWhitelist = None
        self._eventTypeBlacklist = None

        self._accessLog = None
        self._inv = None

        self._fileNamePrefix = 'fdsnws'

        # Leave signal handling to us
        Application.HandleSignals(False, False)
Пример #2
0
	def __init__(self):
		Application.__init__(self, len(sys.argv), sys.argv)
		self.setMessagingEnabled(False)
		self.setDatabaseEnabled(True, True)
		self.setRecordStreamEnabled(True)

		self._serverRoot    = os.path.dirname(__file__)
		self._listenAddress = '0.0.0.0' # all interfaces
		self._port          = 8080
		self._connections   = 5
		self._queryObjects  = 100000    # maximum number of objects per query
		self._realtimeGap   = None      # minimum data age: 5min
		self._samplesM      = None      # maximum number of samples per query
		self._htpasswd      = '@CONFIGDIR@/fdsnws.htpasswd'
		self._accessLogFile = ''

		self._allowRestricted   = True
		self._serveDataSelect   = True
		self._serveEvent        = True
		self._serveStation      = True

		self._hideAuthor            = False
		self._evaluationMode        = None
		self._eventTypeWhitelist    = None
		self._eventTypeBlacklist    = None

		self._accessLog     = None
		self._inv           = None

		self._fileNamePrefix = 'fdsnws'

		# Leave signal handling to us
		Application.HandleSignals(False, False)
Пример #3
0
    def __init__(self, argc, argv):
        # Log all messages to a file for S3
        self._logfile_for_s3 = NamedTemporaryFile()
        self._logger_for_s3 = Logging.FileOutput(self._logfile_for_s3.name)
        for level in ('notice', 'error', 'warning', 'info', 'debug'):
            self._logger_for_s3.subscribe(Logging.getGlobalChannel(level))

        Application.__init__(self, argc, argv)

        # default location to write outputs to
        self.output = settings.OUTPUT_DIR
        self.filename = None
        self.mag_type = None
        self.mag_value = None
        self.server = 'IRIS'
        self.fdsn_client = None
        self.networks = 'ALL'
        self.region = 'not specified'
        self.evid = None
        self.resultid = None
        self.notificationemail = None
        self.fromemail = None
        self.email_aws_region = None
        self.email_method = 'ses'

        self.email_subject_postfix = ''
        self.email_subject_prefix = ''

        self.smtp_server = None
        self.smtp_port = 25
        self.smtp_ssl = False
        self.smtp_tls = False
        self.smtp_user = None
        self.smtp_password = None

        self.write_s3 = False
        self.bucket_name = None
        self.agency = 'GA'
        self.make_maps = True
        self.overwrite = False
        self.save_waveforms = None
        self.save_inventory = None
        self.waveforms = None
        self.inventory = None

        self.eqinfo: Optional[model.Event] = None

        # enable messaging support
        self.setMessagingEnabled(True)

        # disable database access
        self.setDatabaseEnabled(False, False)

        # default spread username
        self.setMessagingUsername("gawphase")

        # send all objects to the focal mechanism group
        self.setPrimaryMessagingGroup("FOCMECH")
Пример #4
0
    def __init__(self):
        Application.__init__(self, len(sys.argv), sys.argv)
        self.setMessagingEnabled(True)
        self.setDatabaseEnabled(True, True)
        self.setRecordStreamEnabled(True)
        self.setLoadInventoryEnabled(True)

        self._serverRoot = os.path.dirname(__file__)
        self._listenAddress = '0.0.0.0'  # all interfaces
        self._port = 8080
        self._connections = 5
        self._queryObjects = 100000    # maximum number of objects per query
        self._realtimeGap = None      # minimum data age: 5min
        self._samplesM = None      # maximum number of samples per query
        self._recordBulkSize = 102400    # desired record bulk size
        self._htpasswd = '@CONFIGDIR@/fdsnws.htpasswd'
        self._accessLogFile = ''
        self._requestLogFile = ''

        self._allowRestricted = True
        self._useArclinkAccess = False
        self._serveDataSelect = True
        self._serveEvent = True
        self._serveStation = True
        self._serveAvailability = False
        self._daEnabled = False
        self._daCacheDuration = 300
        self._daCache = None
        self._openStreams = None
        self._daRepositoryName = 'primary'
        self._daDCCName = 'DCC'

        self._hideAuthor = False
        self._evaluationMode = None
        self._eventTypeWhitelist = None
        self._eventTypeBlacklist = None
        self._eventFormats = None
        self._stationFilter = None
        self._dataSelectFilter = None
        self._debugFilter = False

        self._accessLog = None

        self._fileNamePrefix = 'fdsnws'

        self._trackdbEnabled = False
        self._trackdbDefaultUser = '******'

        self._authEnabled = False
        self._authGnupgHome = '@ROOTDIR@/var/lib/gpg'
        self._authBlacklist = []

        self._userdb = UserDB()
        self._access = Access()

        # Leave signal handling to us
        Application.HandleSignals(False, False)
Пример #5
0
	def __init__(self):
		Application.__init__(self, len(sys.argv), sys.argv)
		self.setMessagingEnabled(True)
		self.setDatabaseEnabled(True, True)
		self.setRecordStreamEnabled(True)
		self.setLoadInventoryEnabled(True)

		self._serverRoot     = os.path.dirname(__file__)
		self._listenAddress  = '0.0.0.0' # all interfaces
		self._port           = 8080
		self._connections    = 5
		self._queryObjects   = 100000    # maximum number of objects per query
		self._realtimeGap    = None      # minimum data age: 5min
		self._samplesM       = None      # maximum number of samples per query
		self._recordBulkSize = 102400    # desired record bulk size
		self._htpasswd       = '@CONFIGDIR@/fdsnws.htpasswd'
		self._accessLogFile  = ''

		self._allowRestricted   = True
		self._useArclinkAccess  = False
		self._serveDataSelect   = True
		self._serveEvent        = True
		self._serveStation      = True

		self._hideAuthor            = False
		self._evaluationMode        = None
		self._eventTypeWhitelist    = None
		self._eventTypeBlacklist    = None
		self._eventFormats          = None
		self._stationFilter         = None
		self._dataSelectFilter      = None
		self._debugFilter           = False

		self._accessLog     = None

		self._fileNamePrefix = 'fdsnws'

		self._trackdbEnabled = False
		self._trackdbDefaultUser = '******'

		self._authEnabled   = False
		self._authGnupgHome = '@ROOTDIR@/var/lib/gpg'
		self._authBlacklist = []

		self._userdb        = UserDB()
		self._access        = Access()

		# Leave signal handling to us
		Application.HandleSignals(False, False)
Пример #6
0
 def __init__(self, inv, bufferSize, access=None, user=None):
     BaseResource.__init__(self, VERSION)
     self._rsURL = Application.Instance().recordStreamURL()
     self.__inv = inv
     self.__access = access
     self.__user = user
     self.__bufferSize = bufferSize
Пример #7
0
class FDSNEvent(resource.Resource):
    isLeaf = True

    #---------------------------------------------------------------------------
    def __init__(self,
                 hideAuthor=False,
                 evaluationMode=None,
                 eventTypeWhitelist=None,
                 eventTypeBlacklist=None):
        self._hideAuthor = hideAuthor
        self._evaluationMode = evaluationMode
        self._eventTypeWhitelist = eventTypeWhitelist
        self._eventTypeBlacklist = eventTypeBlacklist

    #---------------------------------------------------------------------------
    def render_GET(self, req):
        # Parse and validate GET parameters
        ro = _EventRequestOptions(req.args)
        try:
            ro.parse()
        except ValueError, e:
            Logging.warning(str(e))
            return HTTP.renderErrorPage(req, http.BAD_REQUEST, str(e), ro)

        # Catalog filter is not supported, any filter value will result in 204
        if ro.catalogs:
            msg = "no matching events found"
            return HTTP.renderErrorPage(req, http.NO_CONTENT, msg, ro)

        # updateafter not implemented
        if ro.updatedAfter:
            msg = "filtering based on update time not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        # Exporter, 'None' is used for text output
        if ro.format in ro.VText:
            exp = None
        else:
            exp = Exporter.Create(ro.Exporters[ro.format])
            if exp:
                exp.setFormattedOutput(bool(ro.formatted))
            else:
                msg = "output format '%s' no available, export module '%s' could " \
                      "not be loaded." % (ro.format, ro.Exporters[ro.format])
                return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg,
                                            ro)

        # Create database query
        dbq = DataModel.DatabaseQuery(Application.Instance().database())
        if dbq.hasError():
            msg = "could not connect to database: %s" % dbq.errorMsg()
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        # Process request in separate thread
        d = deferToThread(self._processRequest, req, ro, dbq, exp)
        req.notifyFinish().addErrback(utils.onCancel, d)
        d.addBoth(utils.onFinish, req)

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #8
0
    def __init__(self, argc, argv):
        Application.__init__(self, argc, argv)
        self.setMessagingEnabled(True)
        self.setDatabaseEnabled(True, True)
        self.addMessagingSubscription("EVENT")
        self.addMessagingSubscription("LOCATION")
        self.addMessagingSubscription("MAGNITUDE")
        self.addMessagingSubscription("FOCMECH")
        self.setAutoApplyNotifierEnabled(True)

        # object buffers
        self._state = {}
        self._origin = {}
        self._magnitude = {}
        self._focalmechanism = {}

        self._cleanupCounter = 0
        self._xdebug = False
        self._cleanup_interval = 3600.
Пример #9
0
    def __init__(self, argc, argv):
        Application.__init__(self, argc, argv)
        self.setMessagingEnabled(True)
        self.setDatabaseEnabled(True, True)
        self.addMessagingSubscription("EVENT")
        self.addMessagingSubscription("LOCATION")
        self.addMessagingSubscription("MAGNITUDE")
        self.addMessagingSubscription("FOCMECH")
        self.setAutoApplyNotifierEnabled(True)

        # object buffers
        self._state = {}
        self._origin = {}
        self._magnitude = {}
        self._focalmechanism = {}

        self._cleanupCounter = 0
        self._xdebug = False
        self._cleanup_interval = 3600.
Пример #10
0
    def render_GET(self, req):
        # Parse and validate GET parameters
        ro = _EventRequestOptions()
        try:
            ro.parseGET(req.args)
            ro.parse()
        except ValueError as e:
            Logging.warning(str(e))
            return self.renderErrorPage(req, http.BAD_REQUEST, str(e), ro)

        # Catalog filter is not supported
        if ro.catalogs:
            msg = "catalog filter not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if ro.comments and self._hideComments:
            msg = "including of comments not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        # updateafter not implemented
        if ro.updatedAfter:
            msg = "filtering based on update time not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if self._formatList is not None and ro.format not in self._formatList:
            msg = "output format '%s' not available" % ro.format
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        # Exporter, 'None' is used for text output
        if ro.format in ro.VText:
            exp = None
        else:
            exp = Exporter.Create(ro.Exporters[ro.format])
            if exp:
                exp.setFormattedOutput(bool(ro.formatted))
            else:
                msg = "output format '%s' not available, export module '%s' could " \
                      "not be loaded." % (ro.format, ro.Exporters[ro.format])
                return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        # Create database query
        db = DatabaseInterface.Open(Application.Instance().databaseURI())
        if db is None:
            msg = "could not connect to database"
            return self.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        dbq = DataModel.DatabaseQuery(db)

        # Process request in separate thread
        d = deferToThread(self._processRequest, req, ro, dbq, exp)
        req.notifyFinish().addErrback(utils.onCancel, d)
        d.addBoth(utils.onFinish, req)

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #11
0
    def _prepareRequest(self, req, ro):
        if ro.availability and not self._daEnabled:
            msg = "including of availability information not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if ro.updatedAfter:
            msg = "filtering based on update time not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if ro.matchTimeSeries and not self._daEnabled:
            msg = "filtering based on available time series not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        # load data availability if requested
        dac = None
        if ro.availability or ro.matchTimeSeries:
            dac = Application.Instance().getDACache()
            if dac is None or len(dac.extents()) == 0:
                msg = "no data availabiltiy extent information found"
                return self.renderErrorPage(req, http.NO_CONTENT, msg, ro)

        # Exporter, 'None' is used for text output
        if ro.format in ro.VText:
            if ro.includeRes:
                msg = "response level output not available in text format"
                return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)
            req.setHeader('Content-Type', 'text/plain')
            d = deferToThread(self._processRequestText, req, ro, dac)
        else:
            exp = Exporter.Create(ro.Exporters[ro.format])
            if exp is None:
                msg = "output format '%s' no available, export module '%s' " \
                      "could not be loaded." % (
                          ro.format, ro.Exporters[ro.format])
                return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

            req.setHeader('Content-Type', 'application/xml')
            exp.setFormattedOutput(bool(ro.formatted))
            d = deferToThread(self._processRequestExp, req, ro, exp, dac)

        req.notifyFinish().addErrback(utils.onCancel, d)
        d.addBoth(utils.onFinish, req)

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #12
0
	def initConfiguration(self):
		if not Application.initConfiguration(self):
			return False

		cfg = self.configuration()

		# bind address and port
		try: self._listenAddress = cfg.getString('listenAddress')
		except ConfigException: pass
		try: self._port = cfg.getInt('port')
		except ConfigException: pass

		# maximum number of connections
		try: self._connections = cfg.getInt('connections')
		except ConfigException: pass

		# maximum number of objects per query, used in fdsnws-station and
		# fdsnws-event to limit main memory consumption
		try: self._queryObjects = cfg.getInt('queryObjects')
		except ConfigException: pass

		# restrict end time of request to now-realtimeGap seconds, used in
		# fdsnws-dataselect
		try: self._realtimeGap = cfg.getInt('realtimeGap')
		except ConfigException: pass

		# maximum number of samples (in units of million) per query, used in
		# fdsnws-dataselect to limit bandwidth
		try: self._samplesM = cfg.getDouble('samplesM')
		except ConfigException: pass

		# location of htpasswd file
		try:
			self._htpasswd = cfg.getString('htpasswd')
		except ConfigException: pass
		self._htpasswd = Environment.Instance().absolutePath(self._htpasswd)

		# location of access log file
		try:
			self._accessLogFile = Environment.Instance().absolutePath(
			                      cfg.getString('accessLog'))
		except ConfigException: pass

		# access to restricted inventory information
		try: self._allowRestricted = cfg.getBool('allowRestricted')
		except: pass

		# services to enable
		try: self._serveDataSelect = cfg.getBool('serveDataSelect')
		except: pass
		try: self._serveEvent = cfg.getBool('serveEvent')
		except: pass
		try: self._serveStation = cfg.getBool('serveStation')
		except: pass

		# event filter
		try: self._hideAuthor = cfg.getBool('hideAuthor')
		except: pass
		try:
			name = cfg.getString('evaluationMode')
			if name.lower() == DataModel.EEvaluationModeNames.name(DataModel.MANUAL):
				self._evaluationMode = DataModel.MANUAL
			elif name.lower() == DataModel.EEvaluationModeNames.name(DataModel.AUTOMATIC):
				self._evaluationMode = DataModel.AUTOMATIC
			else:
				print >> sys.stderr, "invalid evaluation mode string: %s" % name
				return False
		except: pass
		try:
			strings = cfg.getStrings('eventType.whitelist')
			if len(strings) > 1 or len(strings[0]):
				self._eventTypeWhitelist = [ s.lower() for s in strings ]
		except: pass
		try:
			strings = cfg.getStrings('eventType.blacklist')
			if len(strings) > 0 or len(strings[0]):
				self._eventTypeBlacklist = [ s.lower() for s in strings ]
		except: pass

		# prefix to be used as default for output filenames
		try: self._fileNamePrefix = cfg.getString('fileNamePrefix')
		except ConfigException: pass

		return True
Пример #13
0
    def _processRequest(self, req, ro):

        if ro.quality != 'B' and ro.quality != 'M':
            msg = "quality other than 'B' or 'M' not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        if ro.minimumLength:
            msg = "enforcing of minimum record length not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        if ro.longestOnly:
            msg = "limitation to longest segment not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        app = Application.Instance()
        ro._checkTimes(app._realtimeGap)

        maxSamples = None
        if app._samplesM is not None:
            maxSamples = app._samplesM * 1000000
            samples = 0

        app = Application.Instance()
        if app._trackdbEnabled:
            userid = ro.userName or app._trackdbDefaultUser
            reqid = 'ws' + str(int(round(time.time() * 1000) - 1420070400000))
            xff = req.requestHeaders.getRawHeaders("x-forwarded-for")
            if xff:
                userIP = xff[0].split(",")[0].strip()
            else:
                userIP = req.getClientIP()

            tracker = RequestTrackerDB("fdsnws", app.connection(), reqid,
                                       "WAVEFORM", userid,
                                       "REQUEST WAVEFORM " + reqid, "fdsnws",
                                       userIP, req.getClientIP())

        else:
            tracker = None

        # Open record stream
        rs = _MyRecordStream(self._rsURL, tracker, self.__bufferSize)

        # Add request streams
        # iterate over inventory networks
        for s in ro.streams:
            for net in self._networkIter(s):
                for sta in self._stationIter(net, s):
                    for loc in self._locationIter(sta, s):
                        for cha in self._streamIter(loc, s):
                            try:
                                start_time = max(cha.start(), s.time.start)

                            except Exception:
                                start_time = s.time.start

                            try:
                                end_time = min(cha.end(), s.time.end)

                            except Exception:
                                end_time = s.time.end

                            if utils.isRestricted(cha) and \
                                (not self.__user or (self.__access and
                                 not self.__access.authorize(self.__user,
                                     net.code(), sta.code(), loc.code(),
                                     cha.code(), start_time, end_time))):
                                continue

                            # enforce maximum sample per request restriction
                            if maxSamples is not None:
                                try:
                                    n = cha.sampleRateNumerator()
                                    d = cha.sampleRateDenominator()
                                except ValueError:
                                    msg = "skipping stream without sampling " \
                                          "rate definition: %s.%s.%s.%s" % (
                                          net.code(), sta.code(), loc.code(),
                                          cha.code())
                                    Logging.warning(msg)
                                    continue

                                # calculate number of samples for requested
                                # time window
                                diffSec = (end_time - start_time).length()
                                samples += int(diffSec * n / d)
                                if samples > maxSamples:
                                    msg = "maximum number of %sM samples " \
                                          "exceeded" % str(app._samplesM)
                                    return HTTP.renderErrorPage(
                                        req, http.REQUEST_ENTITY_TOO_LARGE,
                                        msg, ro)

                            Logging.debug("adding stream: %s.%s.%s.%s %s - %s" \
                                          % (net.code(), sta.code(), loc.code(),
                                             cha.code(), start_time.iso(),
                                             end_time.iso()))
                            rs.addStream(net.code(), sta.code(), loc.code(),
                                         cha.code(), start_time, end_time,
                                         utils.isRestricted(cha),
                                         sta.archiveNetworkCode())

        # Build output filename
        fileName = Application.Instance()._fileNamePrefix.replace(
            "%time", time.strftime('%Y-%m-%dT%H:%M:%S')) + '.mseed'

        # Create producer for async IO
        prod = _WaveformProducer(req, ro, rs, fileName, tracker)
        req.registerProducer(prod, True)
        prod.resumeProducing()

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #14
0
    def initConfiguration(self):
        if not Application.initConfiguration(self):
            return False

        # bind address and port
        try:
            self._listenAddress = self.configGetString('listenAddress')
        except ConfigException:
            pass
        try:
            self._port = self.configGetInt('port')
        except ConfigException:
            pass

        # maximum number of connections
        try:
            self._connections = self.configGetInt('connections')
        except ConfigException:
            pass

        # maximum number of objects per query, used in fdsnws-station and
        # fdsnws-event to limit main memory consumption
        try:
            self._queryObjects = self.configGetInt('queryObjects')
        except ConfigException:
            pass

        # restrict end time of request to now-realtimeGap seconds, used in
        # fdsnws-dataselect
        try:
            self._realtimeGap = self.configGetInt('realtimeGap')
        except ConfigException:
            pass

        # maximum number of samples (in units of million) per query, used in
        # fdsnws-dataselect to limit bandwidth
        try:
            self._samplesM = self.configGetDouble('samplesM')
        except ConfigException:
            pass

        # location of htpasswd file
        try:
            self._htpasswd = self.configGetString('htpasswd')
        except ConfigException:
            pass
        self._htpasswd = Environment.Instance().absolutePath(self._htpasswd)

        # location of access log file
        try:
            self._accessLogFile = Environment.Instance().absolutePath(
                self.configGetString('accessLog'))
        except ConfigException:
            pass

        # access to restricted inventory information
        try:
            self._allowRestricted = self.configGetBool('allowRestricted')
        except:
            pass

        # use arclink-access bindings
        try:
            self._useArclinkAccess = self.configGetBool('useArclinkAccess')
        except:
            pass

        # services to enable
        try:
            self._serveDataSelect = self.configGetBool('serveDataSelect')
        except:
            pass
        try:
            self._serveEvent = self.configGetBool('serveEvent')
        except:
            pass
        try:
            self._serveStation = self.configGetBool('serveStation')
        except:
            pass

        # event filter
        try:
            self._hideAuthor = self.configGetBool('hideAuthor')
        except:
            pass
        try:
            name = self.configGetString('evaluationMode')
            if name.lower() == DataModel.EEvaluationModeNames.name(
                    DataModel.MANUAL):
                self._evaluationMode = DataModel.MANUAL
            elif name.lower() == DataModel.EEvaluationModeNames.name(
                    DataModel.AUTOMATIC):
                self._evaluationMode = DataModel.AUTOMATIC
            else:
                print >> sys.stderr, "invalid evaluation mode string: %s" % name
                return False
        except:
            pass
        try:
            strings = self.configGetStrings('eventType.whitelist')
            if len(strings) > 1 or len(strings[0]):
                self._eventTypeWhitelist = [s.lower() for s in strings]
        except:
            pass
        try:
            strings = self.configGetStrings('eventType.blacklist')
            if len(strings) > 0 or len(strings[0]):
                self._eventTypeBlacklist = [s.lower() for s in strings]
        except:
            pass

        # station filter
        try:
            self._stationFilter = Environment.Instance().absolutePath(
                self.configGetString('stationFilter'))
        except ConfigException:
            pass

        # dataSelect filter
        try:
            self._dataSelectFilter = Environment.Instance().absolutePath(
                self.configGetString('dataSelectFilter'))
        except ConfigException:
            pass

        # output filter debug information
        try:
            self._debugFilter = self.configGetBool('debugFilter')
        except ConfigException:
            pass

        # prefix to be used as default for output filenames
        try:
            self._fileNamePrefix = self.configGetString('fileNamePrefix')
        except ConfigException:
            pass

        # save request logs in database?
        try:
            self._trackdbEnabled = self.configGetBool('trackdb.enable')
        except ConfigException:
            pass

        # default user
        try:
            self._trackdbDefaultUser = self.configGetString(
                'trackdb.defaultUser')
        except ConfigException:
            pass

        # enable authentication extension?
        try:
            self._authEnabled = self.configGetBool('auth.enable')
        except ConfigException:
            pass

        # GnuPG home directory
        try:
            self._authGnupgHome = self.configGetString('auth.gnupgHome')
        except ConfigException:
            pass
        self._authGnupgHome = Environment.Instance().absolutePath(
            self._authGnupgHome)

        return True
Пример #15
0
    def _processRequestExp(self, req, ro, dbq, exp, ep):
        objCount = ep.eventCount()
        maxObj = Application.Instance()._queryObjects

        if not HTTP.checkObjects(req, objCount, maxObj):
            return False

        pickIDs = set()
        if ro.picks is None:
            ro.picks = True

        # add related information
        for iEvent in xrange(ep.eventCount()):
            if req._disconnected:
                return False
            e = ep.event(iEvent)
            if self._hideAuthor:
                self._removeAuthor(e)

            # eventDescriptions and comments
            objCount += dbq.loadEventDescriptions(e)
            if ro.comments:
                objCount += self._loadComments(dbq, e)
            if not HTTP.checkObjects(req, objCount, maxObj):
                return False

            # origin references: either all or preferred only
            dbIter = dbq.getObjects(e, DataModel.OriginReference.TypeInfo())
            for obj in dbIter:
                oRef = DataModel.OriginReference.Cast(obj)
                if oRef is None:
                    continue
                if ro.allOrigins:
                    e.add(oRef)
                elif oRef.originID() == e.preferredOriginID():
                    e.add(oRef)
                    dbIter.close()
                # TODO: if focal mechanisms are added make sure derived
                # origin is loaded

            objCount += e.originReferenceCount()

            if not HTTP.checkObjects(req, objCount, maxObj):
                return False

            # TODO: add focal mechanisms

            # origins
            for iORef in xrange(e.originReferenceCount()):
                if req._disconnected:
                    return False
                oID = e.originReference(iORef).originID()
                obj = dbq.getObject(DataModel.Origin.TypeInfo(), oID)
                o = DataModel.Origin.Cast(obj)
                if o is None:
                    continue

                ep.add(o)
                objCount += 1
                if self._hideAuthor:
                    self._removeAuthor(o)

                # comments
                if ro.comments:
                    objCount += self._loadComments(dbq, o)
                if not HTTP.checkObjects(req, objCount, maxObj):
                    return False

                # magnitudes
                dbIter = dbq.getObjects(oID, DataModel.Magnitude.TypeInfo())
                for obj in dbIter:
                    mag = DataModel.Magnitude.Cast(obj)
                    if mag is None:
                        continue
                    if ro.allMags:
                        o.add(mag)
                    elif mag.publicID() == e.preferredMagnitudeID():
                        o.add(mag)
                        dbIter.close()

                    if self._hideAuthor:
                        self._removeAuthor(mag)

                objCount += o.magnitudeCount()
                if ro.comments:
                    for iMag in xrange(o.magnitudeCount()):
                        objCount += self._loadComments(dbq, o.magnitude(iMag))
                if not HTTP.checkObjects(req, objCount, maxObj):
                    return False

                # TODO station magnitudes, amplitudes
                # - added pick id for each pick referenced by amplitude

                # arrivals
                if ro.arrivals:
                    objCount += dbq.loadArrivals(o)
                    if self._removeAuthor:
                        for iArrival in xrange(o.arrivalCount()):
                            self._removeAuthor(o.arrival(iArrival))

                    # collect pick IDs if requested
                    if ro.picks:
                        for iArrival in xrange(o.arrivalCount()):
                            pickIDs.add(o.arrival(iArrival).pickID())

                if not HTTP.checkObjects(req, objCount, maxObj):
                    return False

        # picks
        if pickIDs:
            objCount += len(pickIDs)
            if not HTTP.checkObjects(req, objCount, maxObj):
                return False

            for pickID in pickIDs:
                obj = dbq.getObject(DataModel.Pick.TypeInfo(), pickID)
                pick = DataModel.Pick.Cast(obj)
                if pick is not None:
                    if self._hideAuthor:
                        self._removeAuthor(pick)
                    if ro.comments:
                        objCount += self._loadComments(dbq, pick)
                    ep.add(pick)
                if not HTTP.checkObjects(req, objCount, maxObj):
                    return False

        # write response
        sink = utils.Sink(req)
        if not exp.write(sink, ep):
            return False
        Logging.debug("%s: returned %i events and %i origins (total " \
                       "objects/bytes: %i/%i)" % (ro.service, ep.eventCount(),
                       ep.originCount(), objCount, sink.written))
        utils.accessLog(req, ro, http.OK, sink.written, None)
        return True
Пример #16
0
    def _findEvents(self, ep, ro, dbq):
        db = Application.Instance().database()

        def _T(name):
            return db.convertColumnName(name)

        def _time(time):
            return db.timeToString(time)

        orderByMag = ro.orderBy and ro.orderBy.startswith('magnitude')
        reqMag = ro.mag or orderByMag
        reqDist = ro.geo and ro.geo.bCircle
        colPID = _T('publicID')
        colTime = _T('time_value')
        colMag = _T('magnitude_value')
        if orderByMag:
            colOrderBy = "m.%s" % colMag
        else:
            colOrderBy = "o.%s" % colTime

        bBox = None
        if ro.geo:
            colLat, colLon = _T('latitude_value'), _T('longitude_value')
            if ro.geo.bBox:
                bBox = ro.geo.bBox
            else:
                bBox = ro.geo.bCircle.calculateBBox()

        # SELECT --------------------------------
        q = "SELECT DISTINCT pe.%s, e.*, %s" % (colPID, colOrderBy)
        if reqDist:  # Great circle distance calculated by Haversine formula
            c = ro.geo.bCircle
            q += ", DEGREES(ACOS(" \
                 "COS(RADIANS(o.%s)) * COS(RADIANS(%s)) * " \
                 "COS(RADIANS(o.%s) - RADIANS(%s)) + SIN(RADIANS(o.%s)) * " \
                 "SIN(RADIANS(%s)))) AS distance" % (
                     colLat, c.lat, colLon, c.lon, colLat, c.lat)

        # FROM ----------------------------------
        q += " FROM Event AS e, PublicObject AS pe" \
             ", Origin AS o, PublicObject AS po"
        if reqMag:
            q += ", Magnitude AS m, PublicObject AS pm"

        # WHERE ---------------------------------
        q += " WHERE e._oid = pe._oid"

        # event type white list filter, defined via configuration and/or request
        # parameters
        types = None
        if self._eventTypeWhitelist and ro.eventTypes:
            types = self._eventTypeWhitelist.intersection(ro.eventTypes)
            if not types:
                Logging.debug('all requested event types filtered by '
                              'configured event type white list')
                return
        elif self._eventTypeWhitelist:
            types = self._eventTypeWhitelist
        elif ro.eventTypes:
            types = ro.eventTypes
        if types is not None:
            allowNull = -1 in types
            types = [x for x in types if x >= 0]

            etqIn = "e.%s IN ('%s')" % (_T('type'), "', '".join(
                DataModel.EEventTypeNames.name(x) for x in types))
            if allowNull:
                etqNull = "e.%s is NULL" % _T('type')
                if types:
                    q += " AND (%s OR %s)" % (etqNull, etqIn)
                else:
                    q += " AND %s" % etqNull
            else:
                q += " AND %s" % etqIn

        # event type black list filter, defined in configuration
        if self._eventTypeBlacklist:
            allowNull = -1 not in self._eventTypeBlacklist
            types = [x for x in self._eventTypeBlacklist if x >= 0]

            etqNotIn = "e.%s NOT IN ('%s')" % (_T('type'), "', '".join(
                DataModel.EEventTypeNames.name(x) for x in types))
            if allowNull:
                etqNull = "e.%s is NULL" % _T('type')
                if types:
                    q += " AND (%s OR %s)" % (etqNull, etqNotIn)
                else:
                    q += " AND %s" % etqNull
            else:
                q += " AND %s" % etqNotIn

        # event agency id filter
        if ro.contributors:
            q += " AND e.%s AND upper(e.%s) IN('%s')" % (
                _T('creationinfo_used'), _T('creationinfo_agencyid'),
                "', '".join(ro.contributors).upper())

        # origin information filter
        q += " AND o._oid = po._oid AND po.%s = e.%s" % (
            colPID, _T('preferredOriginID'))

        # evaluation mode config parameter
        if self._evaluationMode is not None:
            colEvalMode = _T('evaluationMode')
            q += " AND o.%s = '%s'" % (colEvalMode,
                                       DataModel.EEvaluationModeNames.name(
                                           self._evaluationMode))

        # time
        if ro.time:
            colTimeMS = _T('time_value_ms')
            if ro.time.start is not None:
                t = _time(ro.time.start)
                ms = ro.time.start.microseconds()
                q += " AND (o.%s > '%s' OR (o.%s = '%s' AND o.%s >= %i))" % (
                    colTime, t, colTime, t, colTimeMS, ms)
            if ro.time.end is not None:
                t = _time(ro.time.end)
                ms = ro.time.end.microseconds()
                q += " AND (o.%s < '%s' OR (o.%s = '%s' AND o.%s <= %i))" % (
                    colTime, t, colTime, t, colTimeMS, ms)

        # bounding box
        if bBox:
            if bBox.minLat is not None:
                q += " AND o.%s >= %s" % (colLat, bBox.minLat)
            if bBox.maxLat is not None:
                q += " AND o.%s <= %s" % (colLat, bBox.maxLat)
            if bBox.dateLineCrossing():
                q += " AND (o.%s >= %s OR o.%s <= %s)" % (colLon, bBox.minLon,
                                                          colLon, bBox.maxLon)
            else:
                if bBox.minLon is not None:
                    q += " AND o.%s >= %s" % (colLon, bBox.minLon)
                if bBox.maxLon is not None:
                    q += " AND o.%s <= %s" % (colLon, bBox.maxLon)

        # depth
        if ro.depth:
            q += " AND o.%s" % _T("depth_used")
            colDepth = _T('depth_value')
            if ro.depth.min is not None:
                q += " AND o.%s >= %s" % (colDepth, ro.depth.min)
            if ro.depth.max is not None:
                q += " AND o.%s <= %s" % (colDepth, ro.depth.max)

        # updated after
        if ro.updatedAfter:
            t = _time(ro.updatedAfter)
            ms = ro.updatedAfter.microseconds()
            colCTime = _T('creationinfo_creationtime')
            colCTimeMS = _T('creationinfo_creationtime_ms')
            colMTime = _T('creationinfo_modificationtime')
            colMTimeMS = _T('creationinfo_modificationtime_ms')
            tFilter = "(o.%s > '%s' OR (o.%s = '%s' AND o.%s > %i))"

            q += " AND ("
            q += tFilter % (colCTime, t, colCTime, t, colCTimeMS, ms) + " OR "
            q += tFilter % (colMTime, t, colMTime, t, colMTimeMS, ms) + ")"

        # magnitude information filter
        if reqMag:
            q += " AND m._oid = pm._oid AND "
            if ro.mag and ro.mag.type:
                # join magnitude table on oID of origin and magnitude type
                q += "m._parent_oid = o._oid AND m.%s = '%s'" % (
                    _T('type'), dbq.toString(ro.mag.type))
            else:
                # join magnitude table on preferred magnitude id of event
                q += "pm.%s = e.%s" % (colPID, _T('preferredMagnitudeID'))

            if ro.mag and ro.mag.min is not None:
                q += " AND m.%s >= %s" % (colMag, ro.mag.min)
            if ro.mag and ro.mag.max is not None:
                q += " AND m.%s <= %s" % (colMag, ro.mag.max)

        # ORDER BY ------------------------------
        q += " ORDER BY %s" % colOrderBy
        if ro.orderBy and ro.orderBy.endswith('-asc'):
            q += " ASC"
        else:
            q += " DESC"

        # SUBQUERY distance (optional) ----------
        if reqDist:
            q = "SELECT * FROM (%s) AS subquery WHERE distance " % q
            c = ro.geo.bCircle
            if c.minRad is not None:
                q += ">= %s" % c.minRad
            if c.maxRad is not None:
                if c.minRad is not None:
                    q += " AND distance "
                q += "<= %s" % c.maxRad

        # LIMIT/OFFSET --------------------------
        if ro.limit is not None or ro.offset is not None:
            # Postgres allows to omit the LIMIT parameter for offsets, MySQL
            # does not. According to the MySQL manual a very large number should
            # be used for this case.
            l = DBMaxUInt
            if ro.limit is not None:
                l = ro.limit
            q += " LIMIT %i" % l
            if ro.offset is not None:
                q += " OFFSET %i" % ro.offset

        Logging.debug("event query: %s" % q)

        for e in dbq.getObjectIterator(q, DataModel.Event.TypeInfo()):
            ep.add(DataModel.Event.Cast(e))
Пример #17
0
 def __init__(self, userName=None):
     resource.Resource.__init__(self)
     self._rsURL = Application.Instance().recordStreamURL()
     self.userName = userName
Пример #18
0
    def _processRequest(self, req, ro):

        if ro.quality != 'B' and ro.quality != 'M':
            msg = "quality other than 'B' or 'M' not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if ro.minimumLength:
            msg = "enforcing of minimum record length not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        if ro.longestOnly:
            msg = "limitation to longest segment not supported"
            return self.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

        app = Application.Instance()
        ro._checkTimes(app._realtimeGap)

        maxSamples = None
        if app._samplesM is not None:
            maxSamples = app._samplesM * 1000000
            samples = 0

        trackerList = []

        if app._trackdbEnabled or app._requestLog:
            xff = req.requestHeaders.getRawHeaders("x-forwarded-for")
            if xff:
                userIP = xff[0].split(",")[0].strip()
            else:
                userIP = req.getClientIP()

            clientID = req.getHeader("User-Agent")
            if clientID:
                clientID = clientID[:80]
            else:
                clientID = "fdsnws"

        if app._trackdbEnabled:
            if ro.userName:
                userID = ro.userName
            else:
                userID = app._trackdbDefaultUser

            reqID = 'ws' + str(int(round(time.time() * 1000) - 1420070400000))
            tracker = RequestTrackerDB(clientID, app.connection(), reqID,
                                       "WAVEFORM", userID,
                                       "REQUEST WAVEFORM " + reqID, "fdsnws",
                                       userIP, req.getClientIP())

            trackerList.append(tracker)

        if app._requestLog:
            tracker = app._requestLog.tracker(ro.service, ro.userName, userIP,
                                              clientID)
            trackerList.append(tracker)

        # Open record stream
        rs = _MyRecordStream(self._rsURL, trackerList, self.__bufferSize)

        forbidden = None

        # Add request streams
        # iterate over inventory networks
        for s in ro.streams:
            for net in self._networkIter(s):
                netRestricted = utils.isRestricted(net)
                if not trackerList and netRestricted and not self.__user:
                    forbidden = forbidden or (forbidden is None)
                    continue
                for sta in self._stationIter(net, s):
                    staRestricted = utils.isRestricted(sta)
                    if not trackerList and staRestricted and not self.__user:
                        forbidden = forbidden or (forbidden is None)
                        continue
                    for loc in self._locationIter(sta, s):
                        for cha in self._streamIter(loc, s):
                            start_time = max(cha.start(), s.time.start)

                            try:
                                end_time = min(cha.end(), s.time.end)
                            except ValueError:
                                end_time = s.time.end

                            if (netRestricted or staRestricted
                                    or utils.isRestricted(cha)
                                ) and (not self.__user or
                                       (self.__access
                                        and not self.__access.authorize(
                                            self.__user, net.code(),
                                            sta.code(), loc.code(), cha.code(),
                                            start_time, end_time))):

                                for tracker in trackerList:
                                    net_class = 't' if net.code()[0] \
                                        in "0123456789XYZ" else 'p'
                                    tracker.line_status(
                                        start_time, end_time, net.code(),
                                        sta.code(), cha.code(), loc.code(),
                                        True, net_class, True, [], "fdsnws",
                                        "DENIED", 0, "")

                                forbidden = forbidden or (forbidden is None)
                                continue

                            forbidden = False

                            # enforce maximum sample per request restriction
                            if maxSamples is not None:
                                try:
                                    n = cha.sampleRateNumerator()
                                    d = cha.sampleRateDenominator()
                                except ValueError:
                                    msg = "skipping stream without sampling " \
                                          "rate definition: %s.%s.%s.%s" % (
                                              net.code(), sta.code(),
                                              loc.code(), cha.code())
                                    Logging.warning(msg)
                                    continue

                                # calculate number of samples for requested
                                # time window
                                diffSec = (end_time - start_time).length()
                                samples += int(diffSec * n / d)
                                if samples > maxSamples:
                                    msg = "maximum number of %sM samples " \
                                          "exceeded" % str(app._samplesM)
                                    return self.renderErrorPage(
                                        req, http.REQUEST_ENTITY_TOO_LARGE,
                                        msg, ro)

                            Logging.debug(
                                "adding stream: %s.%s.%s.%s %s - %s" %
                                (net.code(), sta.code(), loc.code(),
                                 cha.code(), start_time.iso(), end_time.iso()))
                            rs.addStream(net.code(), sta.code(), loc.code(),
                                         cha.code(), start_time, end_time,
                                         utils.isRestricted(cha),
                                         sta.archiveNetworkCode())

        if forbidden:
            for tracker in trackerList:
                tracker.volume_status("fdsnws", "DENIED", 0, "")
                tracker.request_status("END", "")

            msg = "access denied"
            return self.renderErrorPage(req, http.FORBIDDEN, msg, ro)

        elif forbidden is None:
            for tracker in trackerList:
                tracker.volume_status("fdsnws", "NODATA", 0, "")
                tracker.request_status("END", "")

            msg = "no metadata found"
            return self.renderErrorPage(req, http.NO_CONTENT, msg, ro)

        # Build output filename
        fileName = Application.Instance()._fileNamePrefix.replace(
            "%time", time.strftime('%Y-%m-%dT%H:%M:%S')) + '.mseed'

        # Create producer for async IO
        prod = _WaveformProducer(req, ro, rs, fileName, trackerList)
        req.registerProducer(prod, True)
        prod.resumeProducing()

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #19
0
    def _processRequest(self, req, ro):

        if ro.quality != 'B' and ro.quality != 'M':
            msg = "quality other than 'B' or 'M' not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        if ro.minimumLength:
            msg = "enforcing of minimum record length not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        if ro.longestOnly:
            msg = "limitation to longest segment not supported"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        app = Application.Instance()
        ro._checkTimes(app._realtimeGap)

        # Open record stream
        rs = RecordStream.Open(self._rsURL)
        if rs is None:
            msg = "could not open record stream"
            return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

        maxSamples = None
        if app._samplesM is not None:
            maxSamples = app._samplesM * 1000000
            samples = 0

        # Add request streams
        # iterate over inventory networks
        inv = Application.Instance()._inv
        for s in ro.streams:
            for net in self._networkIter(inv, s):
                if ro.userName is None and utils.isRestricted(net):
                    continue
                for sta in self._stationIter(net, s):
                    if ro.userName is None and utils.isRestricted(sta):
                        continue
                    for loc in self._locationIter(sta, s):
                        for cha in self._streamIter(loc, s):
                            # enforce maximum sample per request restriction
                            if maxSamples is not None:
                                try:
                                    n = cha.sampleRateNumerator()
                                    d = cha.sampleRateDenominator()
                                except ValueException:
                                    msg = "skipping stream without sampling " \
                                          "rate definition: %s.%s.%s.%s" % (
                                          net.code(), sta.code(), loc.code(),
                                          cha.code())
                                    Logging.warning(msg)
                                    continue

                                # calculate number of samples for requested
                                # time window
                                diffSec = (s.time.end - s.time.start).length()
                                samples += int(diffSec * n / d)
                                if samples > maxSamples:
                                    msg = "maximum number of %sM samples " \
                                          "exceeded" % str(app._samplesM)
                                    return HTTP.renderErrorPage(
                                        req, http.REQUEST_ENTITY_TOO_LARGE,
                                        msg, ro)

                            Logging.debug("adding stream: %s.%s.%s.%s %s - %s" \
                                          % (net.code(), sta.code(), loc.code(),
                                             cha.code(), s.time.start.iso(),
                                             s.time.end.iso()))
                            rs.addStream(net.code(), sta.code(), loc.code(),
                                         cha.code(), s.time.start, s.time.end)

        # Build output filename
        fileName = Application.Instance()._fileNamePrefix + '.mseed'

        # Create producer for async IO
        req.registerProducer(_WaveformProducer(req, ro, rs, fileName), False)

        # The request is handled by the deferred object
        return server.NOT_DONE_YET
Пример #20
0
    def init(self):
        """
        SC3 specific method.

        Returning False means that we do not enter the SeisComP3 run loop.
        """

        if Application.init(self) == False:
            return False

        result: Optional[model.WPhaseResult] = None

        Logging.enableConsoleLogging(Logging.getGlobalChannel("error"))
        wphase_failed = False

        if self.filename is not None:
            # Output JSON was provided, so we just send it to messaging.
            logger.info("Parsing W-Phase result from file.")
            result = model.WPhaseResult.parse_file(self.filename)
        else:
            # Run the inversion.
            try:
                logger.info("Starting W-Phase.")
                result = runwphase(
                    output_dir=self.output,
                    server=self.fdsn_client,
                    eqinfo=self.eqinfo,
                    networks=self.networks,
                    make_maps=self.make_maps,
                    output_dir_can_exist=self.overwrite,
                    save_waveforms=self.save_waveforms,
                    save_inventory=self.save_inventory,
                    waveforms=self.waveforms,
                    inventory=self.inventory,
                )
            except Exception:
                logger.exception("W-Phase run failed.")
                wphase_failed = True

        if result is not None:
            try:
                objs = createAndSendObjects(result,
                                            self.connection(),
                                            evid=self.evid,
                                            agency=self.agency)
            except Exception:
                logger.exception("Failed to create objects for SC3.")
            else:
                filename = os.path.join(str(self.output), "sc3.xml")
                try:
                    # write output to file
                    writeSCML(filename, objs)
                    logger.info("Stored results in SCML as %s", filename)
                except Exception:
                    logger.exception("Failed to write SCML to %s.", filename)

        if self.write_s3:
            # We always want to write to S3, even in the event of failure as
            # the JSON output may explain the failure. We also want to do it
            # at the very end since we keep the sc3 log file.

            try:
                from wphase.aws import write_to_s3
                write_to_s3(self.output, self.bucket_name, self.evid,
                            self.resultid,
                            [(self._logfile_for_s3.name, 'sc3.log')],
                            logger.error)
            except Exception as e:
                logger.error('failed write to S3: %s', e)
            finally:
                self._logfile_for_s3.close()

        if self.notificationemail:
            # must be done after writing to S3
            success = result is not None and result.MomentTensor is not None
            subject, body = self.createEmail(
                event_id=self.evid,
                result_id=self.resultid,
                result=result,
                call_succeeded=success,
            )
            send_email(
                recipients=self.notificationemail,
                subject=self.email_subject_prefix + str(subject) +
                self.email_subject_postfix,
                message=body,
                from_email=self.fromemail,
                method=self.email_method,
                email_aws_region=self.email_aws_region,
                server=self.smtp_server,
                port=self.smtp_port,
                user=self.smtp_user,
                password=self.smtp_password,
                ssl=self.smtp_ssl,
                tls=self.smtp_tls,
            )

        sys.exit(1 if wphase_failed else 0)
Пример #21
0
    def validateParameters(self):
        """Called by the seiscomp client Application setup."""
        if not Application.validateParameters(self):
            return False

        getter = self.processArg

        def getflag(name, to=None):
            getter(name, to=to, conv=lambda x: True)

        try:
            # If there is an unrecognized option it must be a JSON file
            # of wphase outputs. In this case, the file is parsed and pushed
            # to the messaging system and written to disk.
            self.filename = self.commandline().unrecognizedOptions()[0]
        except Exception:
            # Otherwise we expect a description of the location. Wphase is
            # then run and results pushed pushed to the messaging system
            # and written to disk.

            # depth only needed for BoM XML
            try:
                depth = float(self.commandline().optionDouble("depth"))
            except Exception:
                depth = 0.

            getter('save-waveforms', 'save_waveforms')
            getter('save-inventory', 'save_inventory')
            getter('waveforms', 'waveforms')
            getter('inventory', 'inventory')
            getter('server')

            if self.server is not None:
                self.fdsn_client = Client(self.server)
            elif not (self.waveforms and self.inventory):
                logger.error(
                    'Must provide either server or waveforms+inventory to run inversion.'
                )
                return False

            getter('evid')

            try:
                self.eqinfo = model.Event(
                    latitude=float(self.commandline().optionDouble("lat")),
                    longitude=float(self.commandline().optionDouble("lon")),
                    depth=depth,
                    time=self.commandline().optionString("time"),
                )
            except Exception:
                if not (self.fdsn_client and self.evid):
                    logger.error(
                        'You must provide a JSON payload, lat/lon/time or an evid to fetch from FDSN'
                    )
                    return False
                try:
                    cat = self.fdsn_client.get_events(eventid=self.evid)
                    origin = cat.events[0].preferred_origin()
                except Exception:
                    logger.exception(
                        "Could not retrieve event %s from FDSN server at %s",
                        self.evid, self.server)
                    return False
                self.eqinfo = model.Event(
                    longitude=origin.longitude,
                    latitude=origin.latitude,
                    depth=origin.depth,
                    time=origin.time,
                )

            getter('sourcezone')
            getter('magtype', 'mag_type')
            getter('magvalue', 'mag_value', conv=float)
            getter('outputs', 'output')
            getter('networks')
            getter('region')
            getter('resultid')
            getter('notificationemail')
            getter('emailmethod', 'email_method')
            getter('fromemail')
            getter('email-subject-prefix', 'email_subject_prefix')
            getter('email-subject-postfix', 'email_subject_postfix')
            getter('emailawsregion', 'email_aws_region', 'us-west-2')
            getter('writeS3', 'write_s3', False, lambda x: True)
            getter('bucketname', 'bucket_name')
            getter('agency')
            getflag('make-maps', 'make_maps')
            getflag('overwrite', 'overwrite')

            getter('smtp-server', 'smtp_server')
            getter('smtp-port', 'smtp_port')
            getter('smtp-user', 'smtp_user')
            getter('smtp-password', 'smtp_password')
            getflag('smtp-ssl', 'smtp_ssl')
            getflag('smtp-tls', 'smtp_tls')

            if self.evid is not None:
                self.output = os.path.join(self.output, self.evid)
                self.eqinfo.id = self.evid

            if self.resultid is not None:
                self.output = os.path.join(self.output, self.resultid)

            if self.notificationemail is not None:
                self.notificationemail = str(self.notificationemail).split(',')

            if self.write_s3 and (
                    self.evid is None or \
                    self.bucket_name is None):
                logger.error(
                    'attempt to write to s3, but did not provide bucket name.')
                return False

            if self.notificationemail is not None and (
                    self.mag_type is None or \
                    self.mag_value is None or \
                    self.evid is None or \
                    self.resultid is None
                    ):
                logger.error('cannot send email.')
                return False

        return True
Пример #22
0
class FDSNEvent(resource.Resource):
	isLeaf = True

	#---------------------------------------------------------------------------
	def __init__(self, hideAuthor = False, evaluationMode = None,
	             eventTypeWhitelist = None, eventTypeBlacklist = None,
	             formatList = None):
		self._hideAuthor = hideAuthor
		self._evaluationMode = evaluationMode
		self._eventTypeWhitelist = eventTypeWhitelist
		self._eventTypeBlacklist = eventTypeBlacklist
		self._formatList = formatList


	#---------------------------------------------------------------------------
	def render_OPTIONS(self, req):
		req.setHeader('Access-Control-Allow-Origin', '*')
		req.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
		req.setHeader('Access-Control-Allow-Headers',
                      'Accept, Content-Type, X-Requested-With, Origin')
		req.setHeader('Content-Type', 'text/plain')
		return ""


	#---------------------------------------------------------------------------
	def render_GET(self, req):
		# Parse and validate GET parameters
		ro = _EventRequestOptions(req.args)
		try:
			ro.parse()
		except ValueError, e:
			Logging.warning(str(e))
			return HTTP.renderErrorPage(req, http.BAD_REQUEST, str(e), ro)

		# Catalog filter is not supported
		if ro.catalogs:
			msg = "catalog filter not supported"
			return HTTP.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

		# updateafter not implemented
		if ro.updatedAfter:
			msg = "filtering based on update time not supported"
			return HTTP.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

		if self._formatList is not None and ro.format not in self._formatList:
			msg = "output format '%s' not available" % ro.format
			return HTTP.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

		# Exporter, 'None' is used for text output
		if ro.format in ro.VText:
			exp = None
		else:
			exp = Exporter.Create(ro.Exporters[ro.format])
			if exp:
				exp.setFormattedOutput(bool(ro.formatted))
			else:
				msg = "output format '%s' not available, export module '%s' could " \
				      "not be loaded." % (ro.format, ro.Exporters[ro.format])
				return HTTP.renderErrorPage(req, http.BAD_REQUEST, msg, ro)

		# Create database query
		db = DatabaseInterface.Open(Application.Instance().databaseURI())
		if db is None:
			msg = "could not connect to database: %s" % dbq.errorMsg()
			return HTTP.renderErrorPage(req, http.SERVICE_UNAVAILABLE, msg, ro)

		dbq = DataModel.DatabaseQuery(db)

		# Process request in separate thread
		d = deferToThread(self._processRequest, req, ro, dbq, exp)
		req.notifyFinish().addErrback(utils.onCancel, d)
		d.addBoth(utils.onFinish, req)

		# The request is handled by the deferred object
		return server.NOT_DONE_YET
Пример #23
0
    def _processRequestExp(self, req, ro, dbq, exp, ep):
        objCount = ep.eventCount()
        maxObj = Application.Instance()._queryObjects

        if not self.checkObjects(req, objCount, maxObj):
            return False

        pickIDs = set()
        if ro.picks is None:
            ro.picks = True

        # add related information
        for iEvent in range(ep.eventCount()):
            if req._disconnected:
                return False
            e = ep.event(iEvent)
            if self._hideAuthor:
                self._removeAuthor(e)

            originIDs = set()
            magIDs = set()
            magIDs.add(e.preferredMagnitudeID())

            # eventDescriptions and comments
            objCount += dbq.loadEventDescriptions(e)
            if ro.comments:
                objCount += self._loadComments(dbq, e)
            if not self.checkObjects(req, objCount, maxObj):
                return False

            # origin references: either all or preferred only
            dbIter = dbq.getObjects(e, DataModel.OriginReference.TypeInfo())
            for obj in dbIter:
                oRef = DataModel.OriginReference.Cast(obj)
                if oRef is None:
                    continue
                if ro.allOrigins:
                    e.add(oRef)
                    originIDs.add(oRef.originID())
                elif oRef.originID() == e.preferredOriginID():
                    e.add(oRef)
                    originIDs.add(oRef.originID())
                    dbIter.close()

            objCount += e.originReferenceCount()

            # focalMechanism references: either none, preferred only or all
            if ro.fm or ro.allFMs:
                dbIter = dbq.getObjects(
                    e, DataModel.FocalMechanismReference.TypeInfo())
                for obj in dbIter:
                    fmRef = DataModel.FocalMechanismReference.Cast(obj)
                    if fmRef is None:
                        continue
                    if ro.allFMs:
                        e.add(fmRef)
                    elif fmRef.focalMechanismID(
                    ) == e.preferredFocalMechanismID():
                        e.add(fmRef)
                        dbIter.close()

            objCount += e.focalMechanismReferenceCount()

            if not self.checkObjects(req, objCount, maxObj):
                return False

            # focal mechanisms: process before origins to add derived origin to
            # originID list since it may be missing from origin reference list
            for iFMRef in range(e.focalMechanismReferenceCount()):
                if req._disconnected:
                    return False
                fmID = e.focalMechanismReference(iFMRef).focalMechanismID()
                obj = dbq.getObject(DataModel.FocalMechanism.TypeInfo(), fmID)
                fm = DataModel.FocalMechanism.Cast(obj)
                if fm is None:
                    continue

                ep.add(fm)
                objCount += 1
                if self._hideAuthor:
                    self._removeAuthor(fm)

                # comments
                if ro.comments:
                    objCount += self._loadComments(dbq, fm)

                # momentTensors
                objCount += dbq.loadMomentTensors(fm)

                if not self.checkObjects(req, objCount, maxObj):
                    return False

                for iMT in range(fm.momentTensorCount()):
                    mt = fm.momentTensor(iMT)

                    originIDs.add(mt.derivedOriginID())
                    magIDs.add(mt.momentMagnitudeID())

                    if self._hideAuthor:
                        self._removeAuthor(mt)

                    if ro.comments:
                        for iMT in range(fm.momentTensorCount()):
                            objCount += self._loadComments(dbq, mt)

                    objCount += dbq.loadDataUseds(mt)
                    objCount += dbq.loadMomentTensorPhaseSettings(mt)
                    if ro.staMTs:
                        objCount += dbq.loadMomentTensorStationContributions(
                            mt)
                        for iStaMT in range(
                                mt.momentTensorStationContributionCount()):
                            objCount += dbq.load(
                                mt.momentTensorStationContribution(iStaMT))

                    if not self.checkObjects(req, objCount, maxObj):
                        return False

            # find ID of origin containing preferred Magnitude
            if e.preferredMagnitudeID():
                obj = dbq.getObject(DataModel.Magnitude.TypeInfo(),
                                    e.preferredMagnitudeID())
                m = DataModel.Magnitude.Cast(obj)
                if m is not None:
                    oID = dbq.parentPublicID(m)
                    if oID:
                        originIDs.add(oID)

            # origins
            for oID in sorted(originIDs):
                if req._disconnected:
                    return False
                obj = dbq.getObject(DataModel.Origin.TypeInfo(), oID)
                o = DataModel.Origin.Cast(obj)
                if o is None:
                    continue

                ep.add(o)
                objCount += 1
                if self._hideAuthor:
                    self._removeAuthor(o)

                # comments
                if ro.comments:
                    objCount += self._loadComments(dbq, o)
                if not self.checkObjects(req, objCount, maxObj):
                    return False

                # magnitudes
                dbIter = dbq.getObjects(oID, DataModel.Magnitude.TypeInfo())
                for obj in dbIter:
                    mag = DataModel.Magnitude.Cast(obj)
                    if mag is None:
                        continue
                    if ro.allMags:
                        o.add(mag)
                    elif mag.publicID() in magIDs:
                        o.add(mag)
                        dbIter.close()

                    if self._hideAuthor:
                        self._removeAuthor(mag)

                objCount += o.magnitudeCount()
                if ro.comments:
                    for iMag in range(o.magnitudeCount()):
                        objCount += self._loadComments(dbq, o.magnitude(iMag))
                if not self.checkObjects(req, objCount, maxObj):
                    return False

                # TODO station magnitudes, amplitudes
                # - added pick id for each pick referenced by amplitude

                # arrivals
                if ro.arrivals:
                    objCount += dbq.loadArrivals(o)
                    if self._removeAuthor:
                        for iArrival in range(o.arrivalCount()):
                            self._removeAuthor(o.arrival(iArrival))

                    # collect pick IDs if requested
                    if ro.picks:
                        for iArrival in range(o.arrivalCount()):
                            pickIDs.add(o.arrival(iArrival).pickID())

                if not self.checkObjects(req, objCount, maxObj):
                    return False

        # picks
        if pickIDs:
            objCount += len(pickIDs)
            if not self.checkObjects(req, objCount, maxObj):
                return False

            for pickID in sorted(pickIDs):
                obj = dbq.getObject(DataModel.Pick.TypeInfo(), pickID)
                pick = DataModel.Pick.Cast(obj)
                if pick is not None:
                    if self._hideAuthor:
                        self._removeAuthor(pick)
                    if ro.comments:
                        objCount += self._loadComments(dbq, pick)
                    ep.add(pick)
                if not self.checkObjects(req, objCount, maxObj):
                    return False

        # write response
        sink = utils.Sink(req)
        if not exp.write(sink, ep):
            return False
        Logging.debug("%s: returned %i events and %i origins (total "
                      "objects/chars: %i/%i)" %
                      (ro.service, ep.eventCount(), ep.originCount(), objCount,
                       sink.written))
        utils.accessLog(req, ro, http.OK, sink.written, None)
        return True
Пример #24
0
def accessLog(req, ro, code, length, err):
    logger = Application.Instance()._accessLog
    if logger is None:
        return

    logger.log(AccessLogEntry(req, ro, code, length, err))
Пример #25
0
 def __init__(self, inv, access=None, user=None):
     resource.Resource.__init__(self)
     self._rsURL = Application.Instance().recordStreamURL()
     self.__inv = inv
     self.__access = access
     self.__user = user
Пример #26
0
	def initConfiguration(self):
		if not Application.initConfiguration(self):
			return False

		# bind address and port
		try: self._listenAddress = self.configGetString('listenAddress')
		except ConfigException: pass
		try: self._port = self.configGetInt('port')
		except ConfigException: pass

		# maximum number of connections
		try: self._connections = self.configGetInt('connections')
		except ConfigException: pass

		# maximum number of objects per query, used in fdsnws-station and
		# fdsnws-event to limit main memory consumption
		try: self._queryObjects = self.configGetInt('queryObjects')
		except ConfigException: pass

		# restrict end time of request to now-realtimeGap seconds, used in
		# fdsnws-dataselect
		try: self._realtimeGap = self.configGetInt('realtimeGap')
		except ConfigException: pass

		# maximum number of samples (in units of million) per query, used in
		# fdsnws-dataselect to limit bandwidth
		try: self._samplesM = self.configGetDouble('samplesM')
		except ConfigException: pass

		try: self._recordBulkSize = self.configGetInt('recordBulkSize')
		except ConfigException: pass

		if self._recordBulkSize < 1:
			print >> sys.stderr, "Invalid recordBulkSize, must be larger than 0"
			return False

		# location of htpasswd file
		try:
			self._htpasswd = self.configGetString('htpasswd')
		except ConfigException: pass
		self._htpasswd = Environment.Instance().absolutePath(self._htpasswd)

		# location of access log file
		try:
			self._accessLogFile = Environment.Instance().absolutePath(
			                      self.configGetString('accessLog'))
		except ConfigException: pass

		# access to restricted inventory information
		try: self._allowRestricted = self.configGetBool('allowRestricted')
		except: pass

		# use arclink-access bindings
		try: self._useArclinkAccess = self.configGetBool('useArclinkAccess')
		except: pass

		# services to enable
		try: self._serveDataSelect = self.configGetBool('serveDataSelect')
		except: pass
		try: self._serveEvent = self.configGetBool('serveEvent')
		except: pass
		try: self._serveStation = self.configGetBool('serveStation')
		except: pass

		# event filter
		try: self._hideAuthor = self.configGetBool('hideAuthor')
		except: pass
		try:
			name = self.configGetString('evaluationMode')
			if name.lower() == DataModel.EEvaluationModeNames.name(DataModel.MANUAL):
				self._evaluationMode = DataModel.MANUAL
			elif name.lower() == DataModel.EEvaluationModeNames.name(DataModel.AUTOMATIC):
				self._evaluationMode = DataModel.AUTOMATIC
			else:
				print >> sys.stderr, "invalid evaluation mode string: %s" % name
				return False
		except: pass
		try:
			strings = self.configGetStrings('eventType.whitelist')
			if len(strings) > 1 or len(strings[0]):
				self._eventTypeWhitelist = [ s.lower() for s in strings ]
		except: pass
		try:
			strings = self.configGetStrings('eventType.blacklist')
			if len(strings) > 0 or len(strings[0]):
				self._eventTypeBlacklist = [ s.lower() for s in strings ]
		except: pass

		# station filter
		try: self._stationFilter = Environment.Instance().absolutePath(self.configGetString('stationFilter'))
		except ConfigException: pass

		# dataSelect filter
		try: self._dataSelectFilter = Environment.Instance().absolutePath(self.configGetString('dataSelectFilter'))
		except ConfigException: pass

		# output filter debug information
		try: self._debugFilter = self.configGetBool('debugFilter')
		except ConfigException: pass

		# prefix to be used as default for output filenames
		try: self._fileNamePrefix = self.configGetString('fileNamePrefix')
		except ConfigException: pass

		# save request logs in database?
		try: self._trackdbEnabled = self.configGetBool('trackdb.enable')
		except ConfigException: pass

		# default user
		try: self._trackdbDefaultUser = self.configGetString('trackdb.defaultUser')
		except ConfigException: pass

		# enable authentication extension?
		try: self._authEnabled = self.configGetBool('auth.enable')
		except ConfigException: pass

		# GnuPG home directory
		try: self._authGnupgHome = self.configGetString('auth.gnupgHome')
		except ConfigException: pass
		self._authGnupgHome = Environment.Instance().absolutePath(self._authGnupgHome)

		# If the database connection is passed via command line or configuration
		# file then messaging is disabled. Messaging is only used to get
		# the configured database connection URI.
		if self.databaseURI() != "":
			self.setMessagingEnabled(False)
		else:
			# Without the event service, event a database connection is not
			# required if the inventory is loaded from file
			if not self._serveEvent and not self._useArclinkAccess and not self.isInventoryDatabaseEnabled():
				self.setMessagingEnabled(False)
				self.setDatabaseEnabled(False, False)

		return True
Пример #27
0
    def initConfiguration(self):
        if not Application.initConfiguration(self):
            return False

        # bind address and port
        try:
            self._listenAddress = self.configGetString('listenAddress')
        except Exception:
            pass
        try:
            self._port = self.configGetInt('port')
        except Exception:
            pass

        # maximum number of connections
        try:
            self._connections = self.configGetInt('connections')
        except Exception:
            pass

        # maximum number of objects per query, used in fdsnws-station and
        # fdsnws-event to limit main memory consumption
        try:
            self._queryObjects = self.configGetInt('queryObjects')
        except Exception:
            pass

        # restrict end time of request to now-realtimeGap seconds, used in
        # fdsnws-dataselect
        try:
            self._realtimeGap = self.configGetInt('realtimeGap')
        except Exception:
            pass

        # maximum number of samples (in units of million) per query, used in
        # fdsnws-dataselect to limit bandwidth
        try:
            self._samplesM = self.configGetDouble('samplesM')
        except Exception:
            pass

        try:
            self._recordBulkSize = self.configGetInt('recordBulkSize')
        except Exception:
            pass

        if self._recordBulkSize < 1:
            print >> sys.stderr, "Invalid recordBulkSize, must be larger than 0"
            return False

        # location of htpasswd file
        try:
            self._htpasswd = self.configGetString('htpasswd')
        except Exception:
            pass
        self._htpasswd = Environment.Instance().absolutePath(self._htpasswd)

        # location of access log file
        try:
            self._accessLogFile = Environment.Instance().absolutePath(
                self.configGetString('accessLog'))
        except Exception:
            pass

        # location of request log file
        try:
            self._requestLogFile = Environment.Instance().absolutePath(
                self.configGetString('requestLog'))
        except Exception:
            pass

        # access to restricted inventory information
        try:
            self._allowRestricted = self.configGetBool('allowRestricted')
        except Exception:
            pass

        # use arclink-access bindings
        try:
            self._useArclinkAccess = self.configGetBool('useArclinkAccess')
        except Exception:
            pass

        # services to enable
        try:
            self._serveDataSelect = self.configGetBool('serveDataSelect')
        except Exception:
            pass
        try:
            self._serveEvent = self.configGetBool('serveEvent')
        except Exception:
            pass
        try:
            self._serveStation = self.configGetBool('serveStation')
        except Exception:
            pass
        try:
            self._serveAvailability = self.configGetBool('serveAvailability')
        except Exception:
            pass

        # data availability
        try:
            self._daEnabled = self.configGetBool('dataAvailability.enable')
        except Exception:
            pass
        try:
            self._daCacheDuration = self.configGetInt(
                'dataAvailability.cacheDuration')
        except Exception:
            pass
        try:
            self._daRepositoryName = self.configGetString(
                'dataAvailability.repositoryName')
        except Exception:
            pass
        try:
            self._daDCCName = self.configGetString('dataAvailability.dccName')
        except Exception:
            pass

        if self._serveAvailability and not self._daEnabled:
            print >> sys.stderr, "can't serve availabilty without " \
                                 "dataAvailability.enable set to true"
            return False
        if not bool(re.match(r'^[a-zA-Z0-9_\ -]*$', self._daRepositoryName)):
            print >> sys.stderr, "invalid characters in dataAvailability.repositoryName"
            return False
        if not bool(re.match(r'^[a-zA-Z0-9_\ -]*$', self._daDCCName)):
            print >> sys.stderr, "invalid characters in dataAvailability.dccName"
            return False

        # event filter
        try:
            self._hideAuthor = self.configGetBool('hideAuthor')
        except Exception:
            pass
        try:
            name = self.configGetString('evaluationMode')
            if name.lower() == DataModel.EEvaluationModeNames.name(DataModel.MANUAL):
                self._evaluationMode = DataModel.MANUAL
            elif name.lower() == DataModel.EEvaluationModeNames.name(DataModel.AUTOMATIC):
                self._evaluationMode = DataModel.AUTOMATIC
            else:
                print >> sys.stderr, "invalid evaluation mode string: %s" % name
                return False
        except Exception:
            pass
        try:
            strings = self.configGetStrings('eventType.whitelist')
            if len(strings) > 1 or len(strings[0]):
                self._eventTypeWhitelist = [s.lower() for s in strings]
        except Exception:
            pass
        try:
            strings = self.configGetStrings('eventType.blacklist')
            if len(strings) > 0 or len(strings[0]):
                self._eventTypeBlacklist = [s.lower() for s in strings]
        except Exception:
            pass
        try:
            strings = self.configGetStrings('eventFormats')
            if len(strings) > 1 or len(strings[0]):
                self._eventFormats = [s.lower() for s in strings]
        except Exception:
            pass

        # station filter
        try:
            self._stationFilter = Environment.Instance().absolutePath(
                self.configGetString('stationFilter'))
        except Exception:
            pass

        # dataSelect filter
        try:
            self._dataSelectFilter = Environment.Instance().absolutePath(
                self.configGetString('dataSelectFilter'))
        except Exception:
            pass

        # output filter debug information
        try:
            self._debugFilter = self.configGetBool('debugFilter')
        except Exception:
            pass

        # prefix to be used as default for output filenames
        try:
            self._fileNamePrefix = self.configGetString('fileNamePrefix')
        except Exception:
            pass

        # save request logs in database?
        try:
            self._trackdbEnabled = self.configGetBool('trackdb.enable')
        except Exception:
            pass

        # default user
        try:
            self._trackdbDefaultUser = self.configGetString(
                'trackdb.defaultUser')
        except Exception:
            pass

        # enable authentication extension?
        try:
            self._authEnabled = self.configGetBool('auth.enable')
        except Exception:
            pass

        # GnuPG home directory
        try:
            self._authGnupgHome = self.configGetString('auth.gnupgHome')
        except Exception:
            pass
        self._authGnupgHome = Environment.Instance().absolutePath(self._authGnupgHome)

        # blacklist of users/tokens
        try:
            strings = self.configGetStrings('auth.blacklist')
            if len(strings) > 1 or len(strings[0]):
                self._authBlacklist = strings
        except Exception:
            pass

        # If the database connection is passed via command line or configuration
        # file then messaging is disabled. Messaging is only used to get
        # the configured database connection URI.
        if self.databaseURI() != "":
            self.setMessagingEnabled(self._trackdbEnabled)
        else:
            # Without the event service, a database connection is not
            # required if the inventory is loaded from file and no data
            # availability information should be processed
            if not self._serveEvent and not self._useArclinkAccess and \
               (not self._serveStation or (
                   not self.isInventoryDatabaseEnabled() and not self._daEnabled)):
                self.setMessagingEnabled(self._trackdbEnabled)
                self.setDatabaseEnabled(False, False)

        return True
Пример #28
0
def accessLog(req, ro, code, length, err):
    logger = Application.Instance()._accessLog  # pylint: disable=W0212
    if logger is None:
        return

    logger.log(AccessLogEntry(req, ro, code, length, err))